branch_name stringclasses 149 values | text stringlengths 23 89.3M | directory_id stringlengths 40 40 | languages listlengths 1 19 | num_files int64 1 11.8k | repo_language stringclasses 38 values | repo_name stringlengths 6 114 | revision_id stringlengths 40 40 | snapshot_id stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|
refs/heads/master | <repo_name>GabrielGM01/Jogos_python<file_sep>/jogos.py
import jogo_numero_secreto
import jogo_da_forca
print("----------------------------------")
print("--------Escolha o seu jogo--------")
print("----------------------------------")
print("(1)Forca (2) Numero secreto ")
jogo = int(input("Digite o numero do jogo "))
if(jogo == 1):
print("Jogando o jogo da forca")
jogo_da_forca.jogar()
elif(jogo ==2):
print("Jogando o jogo do Numero secreto")
jogo_numero_secreto.jogar()
<file_sep>/jogo_da_forca.py
import random
"""--------------------------------------------------"""
def jogar():
apresentacao_do_game()
palavra_secreta = carregar_palavra_secreta()
letras_acertadas = verificar_letras_acertadas(palavra_secreta)
erros = 6
vitoria = True
while(erros):
print(letras_acertadas)
tentativa = chute()
if(tentativa in palavra_secreta):
marca_chute_correto(tentativa, letras_acertadas, palavra_secreta)
acertou = "_" in letras_acertadas
if(acertou == False):
print(letras_acertadas)
print("A palavra secreta é {}".format(palavra_secreta))
print("Voce venceu")
break
else:
erros -= 1
if(erros == 0):
print("A palavra secreta é {}".format(palavra_secreta))
print("Voce perdeu")
break
print("Voce tem {} tentativas".format(erros))
"""--------------------------------------------------"""
def apresentacao_do_game():
print("-------------------------------")
print("Bem vindo ao jogo da forca")
print("-------------------------------")
"""--------------------------------------------------"""
def carregar_palavra_secreta():
arquivo = open("palavras.txt", "r")
palavras = []
for linha in arquivo:
linha = linha.strip()
palavras.append(linha)
arquivo.close()
numero = random.randrange(0,len(palavras))
palavra_secreta = palavras[numero].upper()
return palavra_secreta
"""-------------------------------------------------"""
def verificar_letras_acertadas(palavra):
return ["_" for letra in palavra]
"""-------------------------------------------------"""
def chute():
tentativa = input("Digite uma Letra:")
tentativa = tentativa.strip().upper()
return tentativa
"""-------------------------------------------------"""
def marca_chute_correto(tentativa, letras_acertadas, palavra_secreta):
index = 0
for letra in palavra_secreta:
if(tentativa == letra):
letras_acertadas[index] = letra
index += 1
"""-------------------------------------------------"""
if (__name__== "__main__"):
jogar()
<file_sep>/README.md
# Jogos_python
Jogos que eu criei enquanto estava aprendendo Lógica de programação.
<file_sep>/jogo_numero_secreto.py
import random
def jogar():
print("---------------------------------------------")
print("Bem vindo ao jogo do numero secreto")
print("---------------------------------------------")
tentativas = 0
numero_secreto = numero_secreto = random.randrange(1,101)
pontos = 1000
print("dificuldade")
print("(1) facil (2)medio (3)dificil")
dificuldade = int(input("digite a deificuldade: "))
if(dificuldade == 1):
tentativas = 20
elif(dificuldade == 2):
tentativas = 10
else:
tentativas = 5
for rodada in range(1,tentativas +1):
print("rodada:{} tentativas:{}".format(rodada , tentativas))
chutestr = input("digite um numero entre 1 e 100 ")
print("voce digitou:{}".format(chutestr))
chute = int(chutestr)
if(chute <1 or chute >100):
print("voce precisa digitar um numero de 1 a 100")
continue
acerto = chute == numero_secreto
maior = chute > numero_secreto
menor = chute < numero_secreto
if(numero_secreto == chute):
print("parabens voce acertou-pontos:{}".format(pontos))
break
else:
if(maior):
print("o numero e maior do que o numero secreto")
elif(menor):
print("o numero e menor do que o numero secreto")
numeros_perdidos = abs(numero_secreto - chute)
pontos = pontos - numeros_perdidos
print("fim de jogo")
if (__name__== "__main__"):
jogar()
| affa7b9b97768699bbdebdd4b84da876705528a2 | [
"Markdown",
"Python"
] | 4 | Python | GabrielGM01/Jogos_python | 276aaf2bbb29d450ed70419894bc02f578a0a0e5 | 1b74e14d3881bba405b4d34781f0de590b2294d0 |
refs/heads/master | <repo_name>dchathu30/SWProject<file_sep>/Software/UpdateStaff.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class UpdateStaff : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
DataTable dt;
SqlDataAdapter da;
public UpdateStaff()
{
InitializeComponent();
}
private void btnadd_Click(object sender, EventArgs e)
{
}
private void UpdateStaff_Load(object sender, EventArgs e)
{
con.Open();
SqlCommand myCommand = new SqlCommand("Select Staff_ID from Staff", con);
SqlDataReader myReader = myCommand.ExecuteReader();
while (myReader.Read())
{
cmbID.Items.Add(myReader.GetValue(0).ToString());
}
myReader.Close();
myReader.Dispose();
con.Close();
}
private void cmbID_SelectedIndexChanged(object sender, EventArgs e)
{
try
{
con.Open();
SqlCommand myCommand = new SqlCommand("Select * from Staff WHERE Staff_ID=@Staff_ID", con);
myCommand.Parameters.AddWithValue("@Staff_ID", cmbID.SelectedItem.ToString());
SqlDataReader myReader = myCommand.ExecuteReader();
while (myReader.Read())
{
if (myReader.GetValue(1).ToString().Trim() == "Acedemic")
cmbStatus.SelectedItem = "Acedemic";
else
cmbStatus.SelectedItem = "Non Acedemic";
cmbTitle.Text = myReader.GetValue(2).ToString();
txtName.Text = myReader.GetValue(3).ToString();
txtlname.Text = myReader.GetValue(4).ToString();
txtpos.Text = myReader.GetValue(6).ToString();
txtAddress.Text = myReader.GetValue(7).ToString();
txtContactNo.Text = myReader.GetValue(8).ToString();
txtSubject.Text = myReader.GetValue(9).ToString();
}
myReader.Close();
myReader.Dispose();
con.Close();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private void button1_Click(object sender, EventArgs e)
{
}
public void Reset()
{
cmbID.Text = ""; cmbStatus.SelectedItem = "Acedemic";
cmbTitle.Text = "";
txtName.Text = "";
txtlname.Text = "";
txtpos.Text ="";
txtAddress.Text = "";
txtContactNo.Text = "";
txtSubject.Text = "";
}
private void button1_Click_1(object sender, EventArgs e)
{
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
con.Open();
SqlCommand myCommand = new SqlCommand("UPDATE Staff SET Status=@Status,Title=@Title,First_Name=@First_Name,Last_Name=@Last_Name,Position=@Position,Address=@Address,Contact_No=@Contact_No,Subject=@Subject WHERE Staff_ID=@Staff_ID", con);
myCommand.Parameters.AddWithValue("@Status", cmbStatus.Text.ToString());
myCommand.Parameters.AddWithValue("@Staff_ID", cmbID.Text.ToString());
myCommand.Parameters.AddWithValue("@Title", cmbTitle.Text.ToString());
myCommand.Parameters.AddWithValue("@First_Name", txtName.Text.ToString());
myCommand.Parameters.AddWithValue("@Last_Name", txtlname.Text.ToString());
myCommand.Parameters.AddWithValue("@Position", txtpos.Text.ToString());
myCommand.Parameters.AddWithValue("@Address", txtAddress.Text.ToString());
myCommand.Parameters.AddWithValue("@Contact_No", txtContactNo.Text.ToString());
myCommand.Parameters.AddWithValue("@Subject", txtSubject.Text.ToString());
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully Updated!!!");
Reset();
}
private void button3_Click(object sender, EventArgs e)
{
if (MessageBox.Show("Are you sure???", "Delete", MessageBoxButtons.YesNo, MessageBoxIcon.Question) == DialogResult.Yes)
{
con.Open();
SqlCommand myCommand = new SqlCommand("DELETE FROM Staff WHERE Staff_ID=@Staff_ID", con);
myCommand.Parameters.AddWithValue("@Staff_ID", cmbID.Text.ToString());
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Record is deleted!!", "confirmation", MessageBoxButtons.OK, MessageBoxIcon.Information);
Reset();
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void UpdateStaff_FormClosing(object sender, FormClosingEventArgs e)
{
Staff st = (Staff)Application.OpenForms["Staff"];
if (st != null)
{
st.TopMost = true;
st.LoadStaff();
}
}
}
}
<file_sep>/Software/UpdateStudent.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class UpdateStudent : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
public UpdateStudent()
{
InitializeComponent();
}
private void btnUpdate_Click(object sender, EventArgs e)
{
try
{
con.Open();
SqlCommand myCommand = new SqlCommand("UPDATE StudentDetails SET Title=@title,FirstName=@first_name,LastName=@last_name,religion=@religion,Language=@mother_language,PermanantAddress=@permenent_address,Gender=@gender,NIC_No=@nic_num,ContactNumber=@contact_num,DateofBirth=@date_of_birth,AcedemicCourse=@academic_course,AcedemicYear=@academic_year,Hostel=@hostel_facilities,IndoorGames=@indoor_game,Athletics=@athletics,MajorGames=@major_games,Cultural=@cultural,EmailAddress=@email_address,FacebookId=@facebook_id,Status=@status WHERE RegNo=@reg_no", con);
myCommand.Parameters.AddWithValue("@title", comboBox7.Text.ToString());
myCommand.Parameters.AddWithValue("@first_name", txtFirstName.Text.ToString());
myCommand.Parameters.AddWithValue("@last_name", txtLastName.Text.ToString());
myCommand.Parameters.AddWithValue("@religion", comboBox8.Text.ToString());
myCommand.Parameters.AddWithValue("@mother_language", comboBox9.Text.ToString());
myCommand.Parameters.AddWithValue("@reg_no", comboBox2.Text.ToString());
myCommand.Parameters.AddWithValue("@permenent_address", txtAddress.Text.ToString());
myCommand.Parameters.AddWithValue("@gender", comboBox10.Text.ToString());
myCommand.Parameters.AddWithValue("@nic_num", txtNIC.Text.ToString());
myCommand.Parameters.AddWithValue("@contact_num", txtContactNo.Text.ToString());
myCommand.Parameters.AddWithValue("@date_of_birth", dateTimePicker1.Text.ToString());
myCommand.Parameters.AddWithValue("@academic_course", comboBox11.Text.ToString());
myCommand.Parameters.AddWithValue("@academic_year", comboBox12.Text.ToString());
if (radioButton1.Checked)
myCommand.Parameters.AddWithValue("@hostel_facilities", txtRoomNo.Text.ToString());
else if (radioButton2.Checked)
myCommand.Parameters.AddWithValue("@hostel_facilities", radioButton2.Text.ToString());
myCommand.Parameters.AddWithValue("@indoor_game", richTextBox1.Text.ToString());
myCommand.Parameters.AddWithValue("@athletics", richTextBox2.Text.ToString());
myCommand.Parameters.AddWithValue("@major_games", richTextBox3.Text.ToString());
myCommand.Parameters.AddWithValue("@cultural", richTextBox4.Text.ToString());
myCommand.Parameters.AddWithValue("@email_address", textBox57.Text.ToString());
myCommand.Parameters.AddWithValue("@facebook_id", textBox56.Text.ToString());
myCommand.Parameters.AddWithValue("@status", comboBox1.Text.ToString());
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully Updated!!!");
Reset();
}
catch (Exception ex)
{ MessageBox.Show("Input Error", "Error message", MessageBoxButtons.OK, MessageBoxIcon.Error); }
}
private void UpdateStudent_Load(object sender, EventArgs e)
{
tabPage1.Text = "Page 1";
tabPage2.Text = "Page 2";
tabPage3.Text = "Page 3";
tabPage5.Text = "Page 4";
con.Open();
SqlCommand cmd = new SqlCommand("Select RegNo from StudentDetails", con);
SqlDataReader rdr = cmd.ExecuteReader();
while (rdr.Read())
{
comboBox2.Items.Add(rdr.GetValue(0).ToString());
}
rdr.Close();
rdr.Dispose();
con.Close();
}
private void comboBox2_SelectedIndexChanged(object sender, EventArgs e)
{
con.Open();
SqlCommand cmd = new SqlCommand("SELECT*FROM StudentDetails WHERE RegNo=@reg_no", con);
cmd.Parameters.AddWithValue("@reg_no", comboBox2.Text.ToString());
SqlDataReader myReader = cmd.ExecuteReader();
while (myReader.Read())
{
comboBox2.Text = myReader.GetValue(5).ToString();
comboBox7.Text = myReader.GetValue(1).ToString();
txtFirstName.Text = myReader.GetValue(2).ToString();
txtLastName.Text = myReader.GetValue(3).ToString();
comboBox8.Text = myReader.GetValue(18).ToString();
comboBox9.Text = myReader.GetValue(17).ToString();
comboBox10.Text = myReader.GetValue(6).ToString();
txtAddress.Text = myReader.GetValue(4).ToString();
txtNIC.Text = myReader.GetValue(7).ToString();
txtContactNo.Text = myReader.GetValue(12).ToString();
dateTimePicker1.Text = myReader.GetValue(9).ToString();
comboBox12.Text = myReader.GetValue(6).ToString();
comboBox11.Text = myReader.GetValue(10).ToString();
if (myReader.GetValue(22).ToString() != "N/A" && myReader.GetValue(22).ToString()=="Yes")
{
radioButton1.Checked = true;
lblRoomNo.Visible = true;
txtRoomNo.Visible = true;
//txtRoomNo.Text = myReader.GetValue(13).ToString();
}
else
radioButton2.Checked = true;
richTextBox1.Text = myReader.GetValue(23).ToString();
richTextBox2.Text = myReader.GetValue(24).ToString();
richTextBox3.Text = myReader.GetValue(25).ToString();
richTextBox4.Text = myReader.GetValue(19).ToString();
textBox57.Text = myReader.GetValue(14).ToString();
textBox56.Text = myReader.GetValue(15).ToString();
}
myReader.Close();
myReader.Dispose();
con.Close();
}
public void Reset()
{
comboBox2.Text="";
comboBox7.Text = "";
txtFirstName.Text = "";
txtLastName.Text = "";
comboBox8.Text = "";
comboBox9.Text = "";
comboBox10.Text = "";
txtAddress.Text = "";
txtNIC.Text = "";
txtContactNo.Text = "";
dateTimePicker1.Text = "";
comboBox12.Text = "";
comboBox11.Text = "";
radioButton1.Checked = false;
radioButton2.Checked = false;
richTextBox1.Text = "";
richTextBox2.Text = "";
richTextBox3.Text = "";
richTextBox4.Text = "";
textBox57.Text = "";
textBox56.Text = "";
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void UpdateStudent_FormClosing(object sender, FormClosingEventArgs e)
{
ReportStudents rs = (ReportStudents)Application.OpenForms["ReportStudents"];
if (rs != null)
{
rs.TopMost = true;
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void button3_Click(object sender, EventArgs e)
{
this.Close();
}
private void button4_Click(object sender, EventArgs e)
{
this.Close();
}
}
}
<file_sep>/Software/viewAccounts.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class viewAccounts : Form
{
string conStr=Properties.Settings.Default.AccountDatabaseConnectionString;
int maxRows;
DataTable dt;
int pos;
DataRow drow;
public viewAccounts()
{
InitializeComponent();
}
private void viewAccounts_Load(object sender, EventArgs e)
{
try
{
SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da = new SqlDataAdapter("SELECT*FROM Accounts",con);
dt = new DataTable();
da.Fill(dt);
maxRows = dt.Rows.Count;
if(maxRows<1)
{
MessageBox.Show("No Accounts Created!");
this.Close();
}
navAcData();
}
catch (Exception ex)
{
}
}
private void navAcData()
{
drow = dt.Rows[pos];
lblNumber.Text = (pos + 1).ToString() + " Of " + dt.Rows.Count.ToString();
txtAcName.Text = drow.ItemArray.GetValue(0).ToString();
txtAcNum.Text = drow.ItemArray.GetValue(1).ToString();
txtAcType.Text = drow.ItemArray.GetValue(2).ToString();
txtAcIniBal.Text = drow.ItemArray.GetValue(3).ToString();
txtAcDisc.Text = drow.ItemArray.GetValue(4).ToString();
txtAcAvailBal.Text = drow.ItemArray.GetValue(6).ToString();
}
private void btnVwNxtAc_Click(object sender, EventArgs e)
{
}
private void btnVwPrvAc_Click(object sender, EventArgs e)
{
}
private void btnVwExit_Click(object sender, EventArgs e)
{
}
private void viewAccounts_FormClosing(object sender, FormClosingEventArgs e)
{
Form1 mf = (Form1)Application.OpenForms["Form1"];
if (mf != null)
{
mf.LoadAccounts();
mf.LoadTransactions(mf.dtpSt.Value, mf.dtpEn.Value);
mf.TopMost = true;
}
}
private void btnUpdate_Click(object sender, EventArgs e)
{
if (pos != maxRows - 1)
{
pos++;
navAcData();
}
else
MessageBox.Show("No More Accounts");
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
if (pos != 0)
{
pos--;
navAcData();
}
else
MessageBox.Show("No More Accounts");
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void button1_Click(object sender, EventArgs e)
{
pos = dt.Rows.Count - 1;
}
private void btnAllDetails_Click(object sender, EventArgs e)
{
pos = 0;
}
}
}
<file_sep>/Software/ReportStudents.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class ReportStudents : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataTable dt;
DataRow row;
int pos = 0;
public ReportStudents()
{
InitializeComponent();
}
private void ReportStudents_Load(object sender, EventArgs e)
{
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM StudentDetails", con);
dt=new DataTable();
da.Fill(dt);
navStudentDetails();
}
private void navStudentDetails()
{
row = dt.Rows[pos];
txtTitle.Text = row.ItemArray.GetValue(1).ToString();
txtFname.Text=row.ItemArray.GetValue(2).ToString();
txtLname.Text=row.ItemArray.GetValue(3).ToString();
txtPerAddress.Text=row.ItemArray.GetValue(4).ToString();
txtRegNo.Text=row.ItemArray.GetValue(5).ToString();
txtYear.Text=row.ItemArray.GetValue(6).ToString();
txtNIC.Text=row.ItemArray.GetValue(7).ToString();
txtGender.Text=row.ItemArray.GetValue(8).ToString();
txtBday.Text=row.ItemArray.GetValue(9).ToString();
txtCourse.Text=row.ItemArray.GetValue(10).ToString();
txtStatus.Text=row.ItemArray.GetValue(11).ToString();
txtContact.Text=row.ItemArray.GetValue(12).ToString();
txtContact2.Text = row.ItemArray.GetValue(13).ToString();
txtEmail.Text=row.ItemArray.GetValue(14).ToString();
txtFb.Text=row.ItemArray.GetValue(15).ToString();
txtDistrict.Text=row.ItemArray.GetValue(16).ToString();
txtLanguage.Text=row.ItemArray.GetValue(17).ToString();
txtReligion.Text=row.ItemArray.GetValue(18).ToString();
txtCultural.Text=row.ItemArray.GetValue(19).ToString();
txtSchool.Text=row.ItemArray.GetValue(20).ToString();
txtALRes.Text=row.ItemArray.GetValue(21).ToString();
txtHostel.Text=row.ItemArray.GetValue(22).ToString();
txtIndorgame.Text=row.ItemArray.GetValue(23).ToString();
txtAthletic.Text=row.ItemArray.GetValue(24).ToString();
txtmajorGames.Text=row.ItemArray.GetValue(25).ToString();
txtMedical.Text = row.ItemArray.GetValue(26).ToString();
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
ReportStudents rs = (ReportStudents)Application.OpenForms["ReportStudents"];
if (rs != null)
{
rs.TopMost = true;
}
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void btnFirst_Click(object sender, EventArgs e)
{
pos = 0;
navStudentDetails();
}
private void btnPrev_Click(object sender, EventArgs e)
{
if (pos != 0)
{
pos--;
navStudentDetails();
}
}
private void btnNext_Click(object sender, EventArgs e)
{
if (pos != dt.Rows.Count - 1)
{
pos++;
navStudentDetails();
}
}
private void btnLast_Click(object sender, EventArgs e)
{
pos = dt.Rows.Count - 1;
navStudentDetails();
}
private void ReportStudents_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
}
}
<file_sep>/Software/AddIncome.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddIncome : Form
{
static string conStr = Properties.Settings.Default.AccountDatabaseConnectionString;
static SqlConnection con = new SqlConnection(conStr);
public AddIncome()
{
InitializeComponent();
}
private void btnCnclIncm_Click(object sender, EventArgs e)
{
}
private void btnAddIncome_Click(object sender, EventArgs e)
{
}
private void AddIncome_Load(object sender, EventArgs e)
{
LoadFields();
}
private void LoadFields()
{
con.Open();
SqlDataAdapter da = new SqlDataAdapter("SELECT*FROM Categories WHERE CategoryType='Income'", con);
DataTable dt = new DataTable();
da.Fill(dt);
foreach (DataRow row in dt.Rows)
{
cmbCategry.Items.Add(row["Category"].ToString());
}
da.Dispose();
dt.Clear();
da = new SqlDataAdapter("SELECT UserName FROM Users", con);
da.Fill(dt);
con.Close();
}
private void AddIncome_FormClosing(object sender, FormClosingEventArgs e)
{
Form1 mf = (Form1)Application.OpenForms["Form1"];
if (mf != null)
{
mf.LoadTransactions(mf.dtpSt.Value, mf.dtpEn.Value);
mf.LoadAccounts();
mf.TopMost = true;
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
con.Open();
SqlDataAdapter uBalAd = new SqlDataAdapter();
DataTable udt = new DataTable();
uBalAd.SelectCommand = new SqlCommand("SELECT Availablebal,TotalIncome FROM Accounts WHERE AccountNumber=@SelectedAc", con);
uBalAd.SelectCommand.Parameters.AddWithValue("@SelectedAc", Properties.Settings.Default.SelectedAcNum);
uBalAd.Fill(udt);
int availBal = Convert.ToInt16(udt.Rows[0].ItemArray[0].ToString());
int newBal = availBal + Convert.ToInt16(txtAmount.Text.ToString());
int curIncome = Convert.ToInt16(udt.Rows[0].ItemArray[1].ToString());
int newIncome = curIncome + Convert.ToInt16(txtAmount.Text.ToString());
SqlCommand cm = new SqlCommand("UPDATE Accounts SET AvailableBal=@newBal,TotalIncome=@newIncome WHERE AccountNumber=@SelectedAc", con);
cm.Parameters.AddWithValue("@SelectedAc", Properties.Settings.Default.SelectedAcNum);
cm.Parameters.AddWithValue("@newBal", newBal);
cm.Parameters.AddWithValue("@newIncome", newIncome);
cm.ExecuteNonQuery();
string sql = "INSERT INTO Transactions(AccountNumber,Category,Date,UserName,TransType,Amount,TransDicsription) VALUES(@AcNum,@Category,@Date,@UserName,'Income',@Amount,@TransDisc)";
SqlCommand cmd = new SqlCommand(sql, con);
cmd.Parameters.AddWithValue("@AcNum", Properties.Settings.Default.SelectedAcNum);
cmd.Parameters.AddWithValue("@Category", cmbCategry.Text);
cmd.Parameters.AddWithValue("@Date", dtTransDate.Text);
cmd.Parameters.AddWithValue("@UserName", Properties.Settings.Default.User);
cmd.Parameters.AddWithValue("@Amount", Convert.ToInt16(txtAmount.Text));
cmd.Parameters.AddWithValue("@TransDisc", txtTrnsDisc.Text);
cmd.ExecuteNonQuery();
con.Close();
DialogResult dg = MessageBox.Show("Transaction Successfuly added.", "Succes", MessageBoxButtons.OK);
if (dg == DialogResult.OK)
{
this.Close();
}
}
private void button1_Click(object sender, EventArgs e)
{
clearFields();
}
private void clearFields()
{
dtTransDate.Value = DateTime.Today;
cmbCategry.Text = string.Empty;
txtAmount.Text = string.Empty;
txtTrnsDisc.Text = string.Empty;
}
private void txtAmount_Validating(object sender, CancelEventArgs e)
{
string x = txtAmount.Text.ToString();
if (check_number(x))
errorProvider1.SetError(txtAmount, "only enter numbers");
else
errorProvider1.Clear();
}
public Boolean check_number(string s)
{
Boolean flag = false;
foreach (char c in s)
{
if (char.IsLetter(c))
{
flag = true;
break;
}
}
return flag;
}
}
}
<file_sep>/Software/UpdateInventory.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class UpdateInventory : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataTable dt;
public UpdateInventory()
{
InitializeComponent();
}
private void UpdateInventory_Load(object sender, EventArgs e)
{
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM Inventory", con);
dt = new DataTable();
da.Fill(dt);
foreach (DataRow row in dt.Rows)
{
comboBox1.Items.Add(row.ItemArray[1].ToString());
}
con.Close();
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
foreach (DataRow row in dt.Rows)
{
if (row.ItemArray[1].ToString().Contains(comboBox1.Text.ToString()))
{
txtCurrentTotal.Text = row.ItemArray[2].ToString();
txtNew.Text = row.ItemArray[2].ToString();
txtDamaged.Text = row.ItemArray[3].ToString();
txtDispoced.Text = row.ItemArray[4].ToString();
}
}
}
private void btnAdd_Click(object sender, EventArgs e)
{
con.Open();
da.UpdateCommand = new SqlCommand("UPDATE Inventory SET CurrentTotal=@CrntTtl, New=@New, Damaged=@Dmgd, Disposed=@Disposd", con);
da.UpdateCommand.Parameters.AddWithValue("@CrntTtl", Convert.ToInt16(txtCurrentTotal.Text));
da.UpdateCommand.Parameters.AddWithValue("@New", Convert.ToInt16(txtNew.Text));
da.UpdateCommand.Parameters.AddWithValue("@Dmgd", Convert.ToInt16(txtDamaged.Text));
da.UpdateCommand.Parameters.AddWithValue("@Disposd", Convert.ToInt16(txtDispoced.Text));
da.UpdateCommand.ExecuteNonQuery();
con.Close();
}
private void btnClose_Click(object sender, EventArgs e)
{
this.Close();
}
private void button1_Click(object sender, EventArgs e)
{
if (MessageBox.Show("Are you sure???", "Delete", MessageBoxButtons.YesNo, MessageBoxIcon.Question) == DialogResult.Yes)
{
con.Open();
SqlCommand myCommand = new SqlCommand("DELETE FROM Inventory WHERE Item=@Item", con);
myCommand.Parameters.AddWithValue("@Item", comboBox1.Text.ToString());
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Record is deleted!!", "confirmation", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
}
private void panel8_Paint(object sender, PaintEventArgs e)
{
}
}
}
<file_sep>/Software/AddCategories.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddCategories : Form
{
static string conStr = Properties.Settings.Default.AccountDatabaseConnectionString;
SqlConnection con = new SqlConnection(conStr);
public AddCategories()
{
InitializeComponent();
}
private void btnAddIncome_Click(object sender, EventArgs e)
{
con.Open();
SqlCommand cmd = new SqlCommand("INSERT INTO Categories (Category,CategoryType) Values(@category,@cattype)", con);
cmd.Parameters.AddWithValue("@category", txtCategory.Text.ToString());
cmd.Parameters.AddWithValue("@cattype",cmbCategryType.Text.ToString());
cmd.ExecuteNonQuery();
con.Close();
DialogResult dg = MessageBox.Show("Category Successfuly added. Add Another Category?", "Succes", MessageBoxButtons.YesNo);
if (dg == DialogResult.No)
{
this.Close();
}
}
private void btnCnclIncm_Click(object sender, EventArgs e)
{
}
private void AddCategories_FormClosing(object sender, FormClosingEventArgs e)
{
Form1 mf = (Form1)Application.OpenForms["Form1"];
if (mf != null)
{
mf.TopMost = true;
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
con.Open();
SqlCommand cmd = new SqlCommand("INSERT INTO Categories (Category,CategoryType) Values(@category,@cattype)", con);
cmd.Parameters.AddWithValue("@category", txtCategory.Text.ToString());
cmd.Parameters.AddWithValue("@cattype", cmbCategryType.Text.ToString());
cmd.ExecuteNonQuery();
con.Close();
DialogResult dg = MessageBox.Show("Category Successfuly added. Add Another Category?", "Succes", MessageBoxButtons.YesNo);
if (dg == DialogResult.No)
{
this.Close();
}
}
}
}
<file_sep>/Software/Champion.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Champion : Form
{
public Champion()
{
InitializeComponent();
}
private void txtyear_Leave(object sender, EventArgs e)
{
string champ="";
SqlConnection myConnection = new SqlConnection(@"Data Source=.\SQLEXPRESS;AttachDbFilename=C:\Users\admin\Desktop\Software\Software\Achievement.mdf;Integrated Security=True;User Instance=True");
try
{
SqlCommand myCommand = new SqlCommand("SELECT First FROM Sports WHERE Year='" + txtyear.Text.Trim() + "'", myConnection);
SqlDataAdapter myAdapter = new SqlDataAdapter(myCommand);
DataTable myTable = new DataTable();//create a table
myAdapter.Fill(myTable);//fill da table using adapter
this.dataGridView1.DataSource = myTable;
int x = 0;
for (int i = 0; i < dataGridView1.Rows.Count - 1; i++)
{
int y = 0;
for (int j = dataGridView1.Rows.Count - 1 - i; j > 0; j--)
{
if (dataGridView1.Rows[i].Cells[2].Value.ToString() == dataGridView1.Rows[j].Cells[2].Value.ToString())
y++;
}
if (x < y)
{
x = y;
champ = dataGridView1.Rows[i].Cells[2].Value.ToString();
}
}
label2.Text = champ;
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
}
}
<file_sep>/Software/ManageUsers.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
using System.Security.Cryptography;
namespace Software
{
public partial class ManageUsers : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
public ManageUsers()
{
InitializeComponent();
}
private void button2_Click(object sender, EventArgs e)
{
string user = txtUser.Text.ToString().Trim();
string pass = txtPass.Text.ToString().Trim();
string encrypted = hashPass(pass, user);
con.Open();
SqlCommand cmd = new SqlCommand("INSERT INTO Login (Username,Password,UserType) VALUES (@user,@passEnc,'Admin')", con);
cmd.Parameters.AddWithValue("@user", user);
cmd.Parameters.AddWithValue("@passEnc", encrypted);
cmd.ExecuteNonQuery();
con.Close();
}
private string hashPass(string pass, string user)
{
SHA256 sha = new SHA256CryptoServiceProvider();
sha.ComputeHash(ASCIIEncoding.ASCII.GetBytes(pass + user));
byte[] result = sha.Hash;
StringBuilder stb = new StringBuilder();
for (int i = 0; i < result.Length; i++)
{
stb.Append(result[i].ToString("X2"));
}
return stb.ToString();
}
}
}
<file_sep>/Software/Schedule.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Schedule : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataView dv;
public static int butn,combo;
public Schedule()
{
InitializeComponent();
}
private void button2_Click(object sender, EventArgs e)
{
}
private void Schedule_Load(object sender, EventArgs e)
{
dtpEn.Value = DateTime.Today.AddMonths(+1);
if (Login.usr == 1)
{
}
else if (Login.usr == 2)
{
}
LoadSchedule(dtpSt.Value,dtpEn.Value);
}
public void LoadSchedule(DateTime dtSt,DateTime dtEn)
{
textBox1.Text = "Start search with what you know...";
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT * FROM Schedule WHERE Date >=@dtSt AND Date <=@dtEn", con);
da.SelectCommand.Parameters.AddWithValue("@dtSt", dtSt);
da.SelectCommand.Parameters.AddWithValue("@dtEn", dtEn);
DataTable dt = new DataTable();
dv = new DataView();
da.Fill(dt);
dv = dt.DefaultView;
dgvSch.DataSource = dv;
dgvSch.Columns[0].HeaderText = "Schedule Id";
dgvSch.Columns[0].Width = 100;
con.Close();
}
private void button3_Click(object sender, EventArgs e)
{
}
private void button4_Click(object sender, EventArgs e)
{
butn = 2;
combo = 1;
UpdateSchedule s = new UpdateSchedule(Convert.ToInt16(dgvSch.SelectedRows[0].Cells[0].Value));
s.Show();
}
private void textBox1_KeyUp(object sender, KeyEventArgs e)
{
string outInfo = "";
string[] keywords = textBox1.Text.Split(' ');
foreach (string word in keywords)
{
if (outInfo.Length == 0)
{
outInfo = "(Schedule LIKE '%" + word + "%' OR Remarks LIKE '%" + word + "%' OR Venue LIKE '%" + word + "%' )";
}
else
{
outInfo += " AND (Schedule LIKE '%" + word + "%' OR Remarks LIKE '%" + word + "%' OR Venue LIKE '%" + word + "%' )";
}
}
dv.RowFilter = outInfo;
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnUpdate_Click(object sender, EventArgs e)
{
if (dgvSch.SelectedRows.Count == 1)
{
UpdateSchedule r = (UpdateSchedule)Application.OpenForms["UpdateRecord"];
if(r!=null){
r.TopMost = true;
}
else
{
UpdateSchedule s = new UpdateSchedule(Convert.ToInt16(dgvSch.SelectedRows[0].Cells[0].Value));
s.Show();
}
}
else
{
MessageBox.Show("Please select an item to update");
}
}
private void btnAdd_Click(object sender, EventArgs e)
{
AddSchedule ads = (AddSchedule)Application.OpenForms["AddSchedule"];
if (ads != null)
{
ads.TopMost = true;
}
else
{
AddSchedule s = new AddSchedule();
s.Show();
}
}
private void button6_Click(object sender, EventArgs e)
{
butn = 2;
combo = 1;
UpdateSchedule s = new UpdateSchedule(Convert.ToInt16(dgvSch.SelectedRows[0].Cells[0].Value));
s.Show();
}
private void textBox1_Enter(object sender, EventArgs e)
{
textBox1.Text = string.Empty;
}
private void dtpEn_ValueChanged(object sender, EventArgs e)
{
}
private void btnFilter_Click(object sender, EventArgs e)
{
if((dtpEn.Value - dtpSt.Value).TotalDays<0)
{
MessageBox.Show("Set valid range !");
}
else
LoadSchedule(dtpSt.Value, dtpEn.Value);
}
private void dtpSt_ValueChanged(object sender, EventArgs e)
{
}
private void button2_Click_1(object sender, EventArgs e)
{
LoadSchedule(dtpSt.Value.AddYears(-10), dtpEn.Value.AddYears(+10));
}
private void Schedule_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
private void Schedule_FormClosing(object sender, FormClosingEventArgs e)
{
CoverPage cs = (CoverPage)Application.OpenForms["CoverPage"];
if (cs != null)
{
cs.TopMost = true;
}
}
}
}
<file_sep>/Software/Achivements.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Achivements : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataView dv;
Color rCol;
public Achivements()
{
InitializeComponent();
}
private void Record_Load(object sender, EventArgs e)
{
LoadRecords();
rCol = Color.LightSalmon;
button3.BackColor = Color.LightSalmon;
}
public void LoadRecords()
{
try
{
con.Open();
da = new SqlDataAdapter("SELECT RecordId,Year,Event,First,Second,Third,Record,BstRecord FROM Sports", con);
DataTable dt = new DataTable();
da.Fill(dt);
con.Close();
dv = dt.DefaultView;
dv.Sort = "Year";
dgvAch.DataSource = dv;
dgvAch.Columns[0].Visible = false;
foreach (DataGridViewRow row in dgvAch.Rows)
{
if (Convert.ToInt16(row.Cells[6].Value) == Convert.ToInt16(row.Cells[7].Value))
{
row.DefaultCellStyle.BackColor = rCol;
}
}
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private void btnAdd_Click(object sender, EventArgs e)
{
AddRecord ar = (AddRecord)Application.OpenForms["AddRecord"];
if (ar != null)
{
ar.TopMost = true;
}
else
{
AddRecord s = new AddRecord();
s.Show();
}
}
private void Record_FormClosing(object sender, FormClosingEventArgs e)
{
CoverPage cs = (CoverPage)Application.OpenForms["CoverPage"];
if(cs!=null)
{
cs.TopMost = true;
}
}
private void btnUpdate_Click_1(object sender, EventArgs e)
{
if (dgvAch.SelectedRows.Count == 1)
{
UpdateRecord ur = (UpdateRecord)Application.OpenForms["Achievement"];
if (ur != null)
{
ur.TopMost = true;
}
else
{
UpdateRecord s = new UpdateRecord(Convert.ToInt16(dgvAch.SelectedRows[0].Cells[0].Value));
s.Show();
}
}
else
{
MessageBox.Show("Please select a row to update");
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void textBox1_KeyUp(object sender, KeyEventArgs e)
{
string outInfo = "";
string[] keywords = textBox1.Text.Split(' ');
foreach (string word in keywords)
{
if (outInfo.Length == 0)
{
outInfo = "(Event LIKE '%" + word + "%' OR Year LIKE '%" + word + "%' OR First LIKE '%" + word + "%'OR Second LIKE '%" + word + "%'OR Third LIKE '%" + word + "%' )";
}
else
{
outInfo += " AND (Event LIKE '%" + word + "%' OR Year LIKE '%" + word + "%' OR First LIKE '%" + word + "%'OR Second LIKE '%" + word + "%'OR Third LIKE '%" + word + "%' )";
}
}
dv.RowFilter = outInfo;
}
private void Record_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
private void button3_Click(object sender, EventArgs e)
{
ColorDialog cd = new ColorDialog();
if (cd.ShowDialog() == DialogResult.OK)
{
button3.BackColor = cd.Color;
rCol = cd.Color;
}
}
private void button1_Click(object sender, EventArgs e)
{
viewBestSprtRecords br = (viewBestSprtRecords)Application.OpenForms["viewBestSprtRecords"];
if (br != null)
{
br.TopMost = true;
}
else
{
viewBestSprtRecords brn = new viewBestSprtRecords();
brn.Show();
}
}
}
}
<file_sep>/Software/AddStaff.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddStaff : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
public AddStaff()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
}
public void Reset()
{
cmbTitle.Text="";
txtName.Text="";
txtlname.Text="";
txtID.Text="";
txtpos.Text="";
txtAddress.Text="";
txtContactNo.Text="";
txtSubject.Text="";
}
private void button1_Click_1(object sender, EventArgs e)
{
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
try
{
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("INSERT INTO Staff (Status,Title,First_Name,Last_Name,Staff_ID,Position,Address,Contact_No,Subject)VALUES (@Status,@Title,@First_Name,@Last_Name,@Staff_ID,@Position,@Address,@Contact_No,@Subject)", con);
da.SelectCommand.Parameters.AddWithValue("@Status", cmbStatus.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Title", cmbTitle.SelectedItem.ToString());
da.SelectCommand.Parameters.AddWithValue("@First_Name", txtName.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Last_Name", txtlname.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Staff_ID", txtID.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Position", txtpos.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Address", txtAddress.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Contact_No", txtContactNo.Text.ToString());
da.SelectCommand.Parameters.AddWithValue("@Subject", txtSubject.Text.ToString());
da.SelectCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully added!!!");
Reset();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void AddStaff_Load(object sender, EventArgs e)
{
cmbStatus.SelectedItem = "Acedemic";
}
private void AddStaff_FormClosing(object sender, FormClosingEventArgs e)
{
Staff st = (Staff)Application.OpenForms["Staff"];
if(st!=null)
{
st.TopMost = true;
st.LoadStaff();
}
}
}
}
<file_sep>/Software/ExpenseStats.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class ExpenseStats : Form
{
BindingSource bs;
static string conStr = Properties.Settings.Default.AccountDatabaseConnectionString;
public static SqlConnection con = new SqlConnection(conStr);
DataTable dt;
public ExpenseStats()
{
InitializeComponent();
}
private void ExpenseStats_Load(object sender, EventArgs e)
{
dtpStExp.Value = DateTime.Today.AddMonths(-1);
bs = new BindingSource();
dgvIncSum.DataSource = bs;
LoadTransactions(dtpStExp.Value, dtpEnExp.Value);
}
private void LoadTransactions(DateTime stDt, DateTime enDt)
{
lblAcNum.Text = Properties.Settings.Default.SelectedAcNum.ToString();
con.Open();
SqlDataAdapter da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT Category,Date,UserName,TransDicsription,Amount FROM Transactions WHERE AccountNumber=@selectedAc AND TransType='Expense' AND Date >= @stDt AND Date <=@enDt", con);
da.SelectCommand.Parameters.AddWithValue("@selectedAc", Properties.Settings.Default.SelectedAcNum);
da.SelectCommand.Parameters.AddWithValue("@stDt", stDt);
da.SelectCommand.Parameters.AddWithValue("@enDt", enDt);
dt = new DataTable();
da.Fill(dt);
bs.DataSource = dt;
con.Close();
string[] xAxis = new string[dt.Rows.Count];
int[] yAxis = new int[dt.Rows.Count];
DataTable cdt = new DataTable();
DataView dv = new DataView(dt);
cdt = dv.ToTable("Category,Amount");
chartExp.DataSource = cdt;
chartExp.Series[0].XValueMember = "Category";
chartExp.Series[0].YValueMembers = "Amount";
chartExp.DataBind();
}
private void btnFilterExp_Click(object sender, EventArgs e)
{
LoadTransactions(dtpStExp.Value, dtpEnExp.Value);
}
private void ExpenseStats_FormClosing(object sender, FormClosingEventArgs e)
{
Form1 mf = (Form1)Application.OpenForms["Form1"];
if (mf != null)
{
mf.LoadAccounts();
mf.LoadTransactions(mf.dtpSt.Value, mf.dtpEn.Value);
mf.TopMost = true;
}
}
}
}
<file_sep>/Software/Staff.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Staff : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
DataTable dt;
SqlDataAdapter da;
DataView dv;
public Staff()
{
InitializeComponent();
}
private void btnUpdate_Click(object sender, EventArgs e)
{
}
private void btnAdd_Click(object sender, EventArgs e)
{
}
private void Staff_Load(object sender, EventArgs e)
{
if (Login.usr == 1)
{
btnUpdate.Visible = false;
}
else if (Login.usr == 2)
{
btnUpdate.Visible = false;
btnAddStaff.Visible = false;
}
LoadStaff();
}
public void LoadStaff()
{
textBox1.Text = "Start search with waht you know...";
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT Title,First_Name,Last_Name,Staff_ID,Contact_No FROM Staff", con);
dt = new DataTable();
da.Fill(dt);
dv = dt.DefaultView;
staffsDataGridView.DataSource = dv;
staffsDataGridView.Columns[0].Width = 50;
staffsDataGridView.Columns[1].HeaderText = "First Name";
staffsDataGridView.Columns[2].HeaderText = "Last Name";
staffsDataGridView.Columns[3].HeaderText = "Staff Id";
staffsDataGridView.Columns[4].HeaderText = "Contact Number";
con.Close();
}
private void button2_Click(object sender, EventArgs e)
{
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void button3_Click(object sender, EventArgs e)
{
}
private void textBox1_KeyUp(object sender, KeyEventArgs e)
{
string outInfo = "";
string[] keywords = textBox1.Text.Split(' ');
foreach (string word in keywords)
{
if (outInfo.Length == 0)
{
outInfo = "(First_Name LIKE '%" + word + "%' OR Last_Name LIKE '%" + word + "%' OR Staff_ID LIKE '%" + word + "%')";
}
else
{
outInfo += " AND (First_Name LIKE '%" + word + "%' Last_Name Remarks LIKE '%" + word + "%' OR Staff_ID LIKE '%" + word + "%')";
}
}
dv.RowFilter = outInfo;
}
private void button4_Click(object sender, EventArgs e)
{
ReportStaff s = new ReportStaff();
s.Show();
}
private void button5_Click(object sender, EventArgs e)
{
AddStaff s = new AddStaff();
s.Show();
}
private void button2_Click_1(object sender, EventArgs e)
{
UpdateStaff s = new UpdateStaff();
s.Show();
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void textBox1_Enter(object sender, EventArgs e)
{
textBox1.Text = string.Empty;
}
private void Staff_FormClosing(object sender, FormClosingEventArgs e)
{
CoverPage cs = (CoverPage)Application.OpenForms["CoverPage"];
if (cs != null)
{
cs.TopMost = true;
}
else
{
CoverPage csn = new CoverPage();
csn.Show();
}
}
private void Staff_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
}
}
<file_sep>/Software/UpdateRecord.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
using System.Text.RegularExpressions;
namespace Software
{
public partial class UpdateRecord : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
int recordId;
string Event;
int bstRec;
public UpdateRecord(int Id)
{
InitializeComponent();
this.recordId = Id;
}
public void Reset()
{
txtFirst.Text = "";
txtScnd.Text = "";
txtThrd.Text = "";
txtRecd.Text = "";
}
private void Achievement_Load(object sender, EventArgs e)
{
try
{
con.Open();
SqlCommand myCommand = new SqlCommand("SELECT * FROM Sports WHERE RecordId=@rId", con);
myCommand.Parameters.AddWithValue("@rId", recordId);
SqlDataReader myReader = myCommand.ExecuteReader();
while (myReader.Read())
{
txtYear.Text = myReader.GetValue(2).ToString();
txtEvent.Text = myReader.GetValue(1).ToString();
txtFirst.Text = myReader.GetValue(3).ToString();
txtScnd.Text = myReader.GetValue(4).ToString();
txtThrd.Text = myReader.GetValue(5).ToString();
txtRecd.Text = myReader.GetValue(6).ToString();
Event = myReader.GetValue(1).ToString();
}
myReader.Close();
myReader.Dispose();
con.Close();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void UpdtRecord()
{
con.Open();
SqlDataAdapter da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM BestRecord WHERE Event=@event",con);
da.SelectCommand.Parameters.AddWithValue("@event", Event);
DataTable dt = new DataTable();
da.Fill(dt);
MessageBox.Show("first");
if (dt.Rows.Count > 0)
{
if (Event == "High Jump" || Event == "Long Jump" || Event == "Tripple jump" || Event == "disc throw" || Event == "shot put" || Event == "javelin throw")
{
if (Convert.ToInt32(dt.Rows[0].ItemArray[3].ToString()) < Convert.ToInt32(txtRecd.Text.ToString()))
{
da.UpdateCommand = new SqlCommand("UPDATE BestRecord SET Record=@Record,Owner=@Name,Year=@Year WHERE Event=@Event", con);
da.UpdateCommand.Parameters.AddWithValue("@Event", Event);
da.UpdateCommand.Parameters.AddWithValue("@Name", txtFirst.Text.ToString());
da.UpdateCommand.Parameters.AddWithValue("@Year", txtYear.Text.ToString());
da.UpdateCommand.Parameters.AddWithValue("@Record", txtRecd.Text.ToString());
da.UpdateCommand.ExecuteNonQuery();
bstRec = Convert.ToInt16(txtRecd.Text.ToString());
}
else
{
bstRec = Convert.ToInt16(dt.Rows[0].ItemArray[3].ToString());
}
}
else
{
if (Convert.ToInt32(dt.Rows[0].ItemArray[3].ToString()) > Convert.ToInt32(txtRecd.Text.ToString()))
{
da.UpdateCommand = new SqlCommand("UPDATE BestRecord SET Record=@Record,Owner=@Name,Year=@Yea WHERE Event=@Event", con);
da.UpdateCommand.Parameters.AddWithValue("@Event", txtEvent.Text.ToString());
da.UpdateCommand.Parameters.AddWithValue("@Name", txtFirst.Text.ToString());
da.UpdateCommand.Parameters.AddWithValue("@Year", txtYear.Text.ToString());
da.UpdateCommand.Parameters.AddWithValue("@Record", txtRecd.Text.ToString());
da.UpdateCommand.ExecuteNonQuery();
bstRec = Convert.ToInt16(txtRecd.Text.ToString());
}
else
{
bstRec = Convert.ToInt16(dt.Rows[0].ItemArray[3].ToString());
}
}
}
else
{
bstRec = Convert.ToInt16(txtRecd.Text.ToString());
}
con.Close();
MessageBox.Show("Done");
}
private void Achievement_FormClosing(object sender, FormClosingEventArgs e)
{
Achivements rc = (Achivements)Application.OpenForms["Record"];
if (rc != null)
{
rc.TopMost = true;
rc.LoadRecords();
}
}
private void btnAddStaff_Click_1(object sender, EventArgs e)
{
try
{
UpdtRecord();
con.Open();
SqlCommand myCommand = new SqlCommand("UPDATE Sports SET First=@First,Second=@Second,Third=@Third,Record=@Record,BstRecord=@bstRec WHERE RecordId=@rId", con);
myCommand.Parameters.AddWithValue("@rId", recordId);
myCommand.Parameters.AddWithValue("@first", txtFirst.Text.ToString());
myCommand.Parameters.AddWithValue("@Second", txtScnd.Text.ToString());
myCommand.Parameters.AddWithValue("@Third", txtThrd.Text.ToString());
myCommand.Parameters.AddWithValue("@Record", txtRecd.Text.ToString());
myCommand.Parameters.AddWithValue("@bstRec", bstRec);
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully Updated!!!");
Reset();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
}
}
<file_sep>/Software/Login.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Login : Form
{
public static int usr;
string type;
public Login()
{
InitializeComponent();
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
#region
private void button1_Click(object sender, EventArgs e)
{
if (textBox1.Text == "")
{
MessageBox.Show("Please enter user name", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
textBox1.Focus();
return;
}
if (textBox2.Text == "")
{
MessageBox.Show("Please enter password", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
textBox2.Focus();
return;
}
try
{
SqlConnection myConnection = default(SqlConnection);
myConnection = new SqlConnection(@"Data Source=.\SQLEXPRESS;AttachDbFilename=C:\Users\admin\Desktop\Software\Software\Login.mdf;Integrated Security=True;User Instance=True");
SqlCommand myCommand = default(SqlCommand);
myCommand = new SqlCommand(@"SELECT Usertype,Username,Password FROM LoginRegisterform WHERE Usertype = @Usertype AND Username = @Username AND Password = @<PASSWORD>", myConnection);
SqlParameter uType = new SqlParameter("@Usertype", SqlDbType.NChar);
SqlParameter uName = new SqlParameter("@username", SqlDbType.NChar);
SqlParameter uPassword = new SqlParameter("<PASSWORD>", SqlDbType.NChar);
uType.Value = type;
uName.Value = textBox1.Text.Trim();
uPassword.Value = textBox2.Text.Trim();
myCommand.Parameters.Add(uType);
myCommand.Parameters.Add(uName);
myCommand.Parameters.Add(uPassword);
myCommand.Connection.Open();
SqlDataReader myReader = myCommand.ExecuteReader(CommandBehavior.CloseConnection);
if (myReader.Read() == true)
{
int i;
ProgressBar1.Visible = true;
ProgressBar1.Maximum = 5000;
ProgressBar1.Minimum = 0;
ProgressBar1.Value = 4;
ProgressBar1.Step = 1;
for (i = 0; i <= 5000; i++)
{
ProgressBar1.PerformStep();
}
this.Hide();
CoverPage s = new CoverPage();
s.FormClosed += new FormClosedEventHandler(form_FormClosed);
s.Show();
this.Hide();
}
else
{
MessageBox.Show("Login is Failed...Try again !", "Login Denied", MessageBoxButtons.OK, MessageBoxIcon.Error);
textBox1.Clear();
textBox2.Clear();
textBox1.Focus();
}
if (myConnection.State == ConnectionState.Open)
{
myConnection.Dispose();
}
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
#endregion
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void changestyle(object sender, EventArgs e)
{
pictureBox1.BorderStyle = BorderStyle.Fixed3D;
}
private void pictureBox1_MouseLeave(object sender, EventArgs e)
{
pictureBox1.BorderStyle = BorderStyle.None;
}
private void changestyle1(object sender, EventArgs e)
{
pictureBox2.BorderStyle = BorderStyle.Fixed3D;
}
private void pictureBox2_MouseLeave(object sender, EventArgs e)
{
pictureBox2.BorderStyle = BorderStyle.None;
}
private void changestyle2(object sender, EventArgs e)
{
pictureBox3.BorderStyle = BorderStyle.Fixed3D;
}
private void pictureBox3_MouseLeave(object sender, EventArgs e)
{
pictureBox3.BorderStyle = BorderStyle.None;
}
private void pictureBox1_Click(object sender, EventArgs e)
{
type = "Admin";
usr = 0;
visible();
}
private void pictureBox2_Click(object sender, EventArgs e)
{
type = "Viewer";
usr = 2;
visible();
}
private void pictureBox3_Click(object sender, EventArgs e)
{
type = "User";
usr = 1;
visible();
}
public void visible()
{
pictureBox4.Visible = true;
label2.Visible = true; ;
label3.Visible = true;
textBox1.Visible = true;
textBox2.Visible = true;
button1.Visible = true;
}
}
}
<file_sep>/Software/AddAcount.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddAcount : Form
{
string conStr=Properties.Settings.Default.AccountDatabaseConnectionString;
public AddAcount()
{
InitializeComponent();
}
protected void btnCancelAdd_Click(object sender, EventArgs e)
{
}
private void btnResetFields_Click(object sender, EventArgs e)
{
}
public void clearFields()
{
txtAdAcDisc.Clear();
txtAdAcIniBal.Clear();
txtAdAcName.Clear();
txtAdAcNum.Clear();
cmbAdAcType.Text = "Select Account Type";
}
private void btnAddAc_Click(object sender, EventArgs e)
{
}
private void AddAcount_FormClosing(object sender, FormClosingEventArgs e)
{
Form1 mf = (Form1)Application.OpenForms["Form1"];
if (mf != null)
{
mf.LoadAccounts();
mf.LoadTransactions(mf.dtpSt.Value, mf.dtpEn.Value);
mf.TopMost = true;
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
SqlConnection con = new SqlConnection(conStr);
con.Open();
string sql = "INSERT INTO Accounts (AccountName,AccountNumber,AccountType,InitialBalance,AccountDiscription,AvailableBal,TotalIncome,TotalExpense) VALUES (@ActName,@ActNum,@ActType,@IniBal,@ActDisc,@IniBal,0,0)";
SqlCommand cmd = new SqlCommand(sql, con);
cmd.Parameters.AddWithValue("@ActName", txtAdAcName.Text);
cmd.Parameters.AddWithValue("@ActNum", Convert.ToInt64(txtAdAcNum.Text));
cmd.Parameters.AddWithValue("@ActType", cmbAdAcType.Text);
cmd.Parameters.AddWithValue("@IniBal", Convert.ToInt64(txtAdAcIniBal.Text));
cmd.Parameters.AddWithValue("@ActDisc", txtAdAcDisc.Text);
cmd.ExecuteNonQuery();
con.Close();
DialogResult dg = MessageBox.Show("Account Successfully Added", "Success", MessageBoxButtons.OK);
if (dg == DialogResult.OK)
{
this.Close();
}
}
private void button1_Click(object sender, EventArgs e)
{
clearFields();
}
private void txtAdAcNum_Validating(object sender, CancelEventArgs e)
{
string x = txtAdAcNum.Text.ToString();
if (check_number(x))
errorProvider1.SetError(txtAdAcNum, "only enter numbers");
else
errorProvider1.Clear();
}
public Boolean check_number(string s)
{ Boolean flag = false;
foreach(char c in s)
{
if(char.IsLetter(c))
{ flag = true;
break;
}
}
return flag;
}
private void txtAdAcIniBal_Validating(object sender, CancelEventArgs e)
{
string x = txtAdAcIniBal.Text.ToString();
if (check_number(x))
errorProvider1.SetError(txtAdAcIniBal, "only enter numbers");
else
errorProvider1.Clear();
}
}
}
<file_sep>/Software/Form1.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Data.SqlClient;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Software
{
public partial class Form1 : Form
{
bool pnAcExpanded = false;
bool pnTrExpanded = false;
bool pnStatExpanded = false;
bool pnMnExpanded = false;
static string conStr = Properties.Settings.Default.AccountDatabaseConnectionString;
public static SqlConnection con = new SqlConnection(conStr);
DataTable dt;
DataTable dtInc;
DataTable dtExp;
BindingSource bsIncome;
BindingSource bsExpense;
public Form1()
{
InitializeComponent();
pnDtAccount.Visible = false;
pnDtTrans.Visible = false;
pnDtStat.Visible = false;
pnDtMang.Visible = false;
}
private void button5_Click(object sender, EventArgs e)
{
if (!pnAcExpanded)
{
pnAcExpanded = !pnAcExpanded;
pnDtAccount.Visible = true;
}
else
{
pnAcExpanded = !pnAcExpanded;
pnDtAccount.Visible = false;
}
}
private void button4_Click(object sender, EventArgs e)
{
if (!pnMnExpanded)
{
pnMnExpanded = !pnMnExpanded;
pnDtMang.Visible = true;
}
else
{
pnMnExpanded = !pnMnExpanded;
pnDtMang.Visible = false;
}
}
private void btnTrans_Click(object sender, EventArgs e)
{
if (!pnTrExpanded)
{
pnTrExpanded = !pnTrExpanded;
pnDtTrans.Visible = true;
}
else
{
pnTrExpanded = !pnTrExpanded;
pnDtTrans.Visible = false;
}
}
private void btnStat_Click(object sender, EventArgs e)
{
if (!pnStatExpanded)
{
pnStatExpanded = !pnStatExpanded;
pnDtStat.Visible = true;
}
else
{
pnStatExpanded = !pnStatExpanded;
pnDtStat.Visible = false;
}
}
private void button1_MouseEnter(object sender, EventArgs e)
{
btnAddAccount.ForeColor = Color.Gray;
}
private void button1_MouseLeave(object sender, EventArgs e)
{
btnAddAccount.ForeColor = Color.Black;
}
private void btnAddAccount_Click(object sender, EventArgs e)
{
AddAcount ac = (AddAcount)Application.OpenForms["AddAcount"];
if (ac != null)
{
ac.TopMost = true;
}
else
{
AddAcount adac = new AddAcount();
adac.Show();
}
}
private void button2_Click(object sender, EventArgs e)
{
viewAccounts va = (viewAccounts)Application.OpenForms["viewAccounts"];
if (va != null)
{
va.TopMost = true;
}
else
{
viewAccounts vac = new viewAccounts();
vac.Show();
}
}
private void Form1_Load(object sender, EventArgs e)
{
dtpSt.Value = DateTime.Today.AddMonths(-1);
LoadAccounts();
bsIncome = new BindingSource();
bsExpense = new BindingSource();
dgvIncm.DataSource = bsIncome;
dgvExpense.DataSource = bsExpense;
LoadTransactions(dtpSt.Value,dtpEn.Value);
}
public void LoadAccounts()
{
con.Open();
SqlDataAdapter da = new SqlDataAdapter("SELECT*FROM Accounts",con);
dt = new DataTable();
da.Fill(dt);
con.Close();
contextMenuStrip1.Items.Clear();
foreach (DataRow row in dt.Rows)
{
contextMenuStrip1.Items.Add(row["AccountName"].ToString());
}
dt.Clear();
if (contextMenuStrip1.Items.Count==1)
{
btnSelectAcount.Text = contextMenuStrip1.Items[0].Text;
Properties.Settings.Default.SelectedAc = contextMenuStrip1.Items[0].Text.Trim();
getSelectedAcNum();
}
else if (contextMenuStrip1.Items.Count > 1)
{
if(Properties.Settings.Default.SelectedAc==string.Empty)
{
btnSelectAcount.Text = Properties.Settings.Default.SelectedAc.ToString();
}
else
btnSelectAcount.Text = contextMenuStrip1.Items[0].Text;
}
else
{
btnSelectAcount.Text = "Select Acount";
}
if (contextMenuStrip1.Items.Count == 0)
{
lblInfoAc.Text = "No Accounts Available!";
lblInfoAc.Visible = true;
lbIncome.Visible = false;
label2.Visible = false;
}
else
{
lblInfoAc.Visible = false;
lbIncome.Visible = true;
label2.Visible = true;
}
}
public void getSelectedAcNum()
{
con.Open();
SqlDataAdapter daa = new SqlDataAdapter();
daa.SelectCommand = new SqlCommand("SELECT AccountNumber,TotalIncome,TotalExpense,AvailableBal From Accounts WHERE AccountName = @selectedAcName ", con);
daa.SelectCommand.Parameters.AddWithValue("@selectedAcName", btnSelectAcount.Text.ToString());
DataTable dtt = new DataTable();
daa.Fill(dtt);
con.Close();
Properties.Settings.Default.SelectedAcNum=Convert.ToInt64(dtt.Rows[0].ItemArray[0].ToString());
lbIncome.Text = dtt.Rows[0].ItemArray[3].ToString()+".00";
string[] xAxis = new string[2];
int[] yAxis=new int[2];
xAxis[0] = "Income";
xAxis[1] = "Expense";
yAxis[0] = Convert.ToInt16(dtt.Rows[0].ItemArray[1].ToString());
yAxis[1] = Convert.ToInt16(dtt.Rows[0].ItemArray[2].ToString());
chart1.Series[0].Points.DataBindXY(xAxis, yAxis);
}
private void button1_Click(object sender, EventArgs e)
{
contextMenuStrip1.Show(btnSelectAcount,new Point(0,btnSelectAcount.Height));
}
private void button6_Click(object sender, EventArgs e)
{
if (Properties.Settings.Default.SelectedAc.ToString().Trim() == string.Empty)
MessageBox.Show("No Account selected");
else
{
AddIncome ai = (AddIncome)Application.OpenForms["AddIncome"];
if (ai != null)
{
ai.TopMost = true;
}
else
{
AddIncome adinc = new AddIncome();
adinc.Show();
}
}
}
private void button7_Click(object sender, EventArgs e)
{
if (Properties.Settings.Default.SelectedAc.ToString().Trim() == string.Empty)
MessageBox.Show("No Account selected");
else
{
AddExpence ae = (AddExpence)Application.OpenForms["AddExpence"];
if (ae != null)
{
ae.TopMost = true;
}
else
{
AddExpence adex = new AddExpence();
adex.Show();
}
}
}
public void LoadTransactions(DateTime stDate,DateTime enDate)
{
con.Open();
SqlDataAdapter daInc = new SqlDataAdapter();
daInc.SelectCommand =new SqlCommand("SELECT Category,Date,TransDicsription,Amount FROM Transactions WHERE TransType='Income' AND AccountNumber=@selectedAc AND Date>=@stDt AND Date<=@enDt", con);
daInc.SelectCommand.Parameters.AddWithValue("@selectedAc", Properties.Settings.Default.SelectedAcNum);
daInc.SelectCommand.Parameters.AddWithValue("@stDt", stDate);
daInc.SelectCommand.Parameters.AddWithValue("@enDt", enDate);
dtInc = new DataTable();
daInc.Fill(dtInc);
bsIncome.DataSource = dtInc;
con.Close();
SqlDataAdapter daExp = new SqlDataAdapter();
daExp.SelectCommand = new SqlCommand("SELECT Category,Date,TransDicsription,Amount FROM Transactions WHERE TransType='Expense' AND AccountNumber=@selectedAc", con);
daExp.SelectCommand.Parameters.AddWithValue("@selectedAc", Properties.Settings.Default.SelectedAcNum);
dtExp = new DataTable();
daExp.Fill(dtExp);
bsExpense.DataSource = dtExp;
con.Open();
SqlDataAdapter dUp = new SqlDataAdapter();
dUp.SelectCommand=new SqlCommand("SELECT TotalIncome,TotalExpense,AvailableBal FROM Accounts WHERE AccountNumber=@selectedAcNum", con);
dUp.SelectCommand.Parameters.AddWithValue("@selectedAcNum", Properties.Settings.Default.SelectedAcNum);
DataTable dtU = new DataTable();
dUp.Fill(dtU);
con.Close();
if(dtU.Rows.Count>0)
{
lbIncome.Text = dtU.Rows[0].ItemArray[2].ToString() + ".00";
string[] xAxis = new string[2];
int[] yAxis = new int[2];
xAxis[0] = "Income";
xAxis[1] = "Expense";
yAxis[0] = Convert.ToInt16(dtU.Rows[0].ItemArray[0].ToString());
yAxis[1] = Convert.ToInt16(dtU.Rows[0].ItemArray[1].ToString());
chart1.Series[0].Points.DataBindXY(xAxis, yAxis);
}
}
private void contextMenuStrip1_ItemClicked_1(object sender, ToolStripItemClickedEventArgs e)
{
Properties.Settings.Default.SelectedAc = e.ClickedItem.Text;
btnSelectAcount.Text = Properties.Settings.Default.SelectedAc;
getSelectedAcNum();
}
private void dtpSt_ValueChanged(object sender, EventArgs e)
{
}
private void btnFilter_Click(object sender, EventArgs e)
{
LoadTransactions(dtpSt.Value, dtpEn.Value);
}
private void button9_Click(object sender, EventArgs e)
{
IncomeStats ist=(IncomeStats)Application.OpenForms["IncomeStats"];
if (ist != null)
{
ist.TopMost = true;
}
else
{
IncomeStats istat = new IncomeStats();
istat.Show();
}
}
private void button10_Click(object sender, EventArgs e)
{
ExpenseStats est = (ExpenseStats)Application.OpenForms["ExpenseStats"];
if (est != null)
{
est.TopMost = true;
}
else
{
ExpenseStats exStat = new ExpenseStats();
exStat.Show();
}
}
private void button11_Click(object sender, EventArgs e)
{
MnthlyAnalyze mst = (MnthlyAnalyze)Application.OpenForms["MnthlyAnalyze"];
if (mst != null)
{
mst.TopMost = true;
}
else
{
MnthlyAnalyze sStat = new MnthlyAnalyze();
sStat.Show();
}
}
private void button13_Click(object sender, EventArgs e)
{
AddCategories ac = (AddCategories)Application.OpenForms["AddCategories"];
if (ac != null)
{
ac.TopMost = true;
}
else
{
AddCategories adCat = new AddCategories();
adCat.Show();
}
}
private void Form1_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
private void Form1_FormClosing(object sender, FormClosingEventArgs e)
{
CoverPage cs = (CoverPage)Application.OpenForms["CoverPage"];
if (cs != null)
{
cs.TopMost = true;
}
}
}
}
<file_sep>/Software/Inventories.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Inventories : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
public int a, b, c;
public Inventories()
{
InitializeComponent();
}
private void Inventories_Load(object sender, EventArgs e)
{
LoadItem();
}
private void LoadItem()
{
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM Inventory",con);
DataTable dt = new DataTable();
da.Fill(dt);
dataGridView1.DataSource = dt;
con.Close();
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void button5_Click(object sender, EventArgs e)
{
}
public void visible()
{
}
public void Reset()
{
}
private void button6_Click(object sender, EventArgs e)
{
}
private void button7_Click(object sender, EventArgs e)
{
}
private void Delete_Click(object sender, EventArgs e)
{
}
private void button8_Click(object sender, EventArgs e)
{
}
private void ovalShape1_MouseLeave(object sender, EventArgs e)
{
}
private void ovalShape1_MouseHover(object sender, EventArgs e)
{
ovalShape2.BackColor = Color.Blue;
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnUpdate_Click(object sender, EventArgs e)
{
AddInventory ai = new AddInventory();
ai.Show();
}
private void button2_Click(object sender, EventArgs e)
{
UpdateInventory ui = new UpdateInventory();
ui.Show();
}
private void btnAdd_Click(object sender, EventArgs e)
{
UpdateInventory ui = new UpdateInventory();
ui.Show();
}
}
}
<file_sep>/Software/AddSchedule.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddSchedule : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
public AddSchedule()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
}
public void Reset()
{
dateTimePicker1.Text = "";
textBox2.Text = "";
radioButton1.Checked = false;
radioButton2.Checked = false;
textBox1.Text = "";
textBox3.Text = "";
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void button2_Click(object sender, EventArgs e)
{
}
private void AddSchedule_Load(object sender, EventArgs e)
{
}
private void button3_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
try
{
con.Open();
SqlCommand myCommand = new SqlCommand("INSERT INTO Schedule (Date,Schedule,Remarks,Time,Venue)VALUES (@Date,@Schedule,@Remarks,@Time,@Venue)", con);
myCommand.Parameters.AddWithValue("@Date", dateTimePicker1.Text.ToString());
myCommand.Parameters.AddWithValue("@Schedule", textBox2.Text.ToString());
if (radioButton1.Checked)
myCommand.Parameters.AddWithValue("@Remarks", "Event");
else if (radioButton2.Checked)
myCommand.Parameters.AddWithValue("@Remarks", "Important Day");
myCommand.Parameters.AddWithValue("@Time", textBox1.Text.ToString());
myCommand.Parameters.AddWithValue("@Venue", textBox3.Text.ToString());
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully added!!!");
Reset();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
}
private void AddSchedule_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
private void AddSchedule_FormClosing(object sender, FormClosingEventArgs e)
{
Schedule s = (Schedule)Application.OpenForms["Schedule"];
if (s != null)
{
s.TopMost = true;
}
else
{
Schedule sc = new Schedule();
sc.Show();
}
}
}
}
<file_sep>/Software/AddPhoto.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
using System.IO;
namespace Software
{
public partial class AddPhoto : Form
{
static string conStr = Properties.Settings.Default.GalleryConStr;
static SqlConnection con = new SqlConnection(conStr);
public static string albumName;
public AddPhoto()
{
InitializeComponent();
}
private void textBox1_TextChanged(object sender, EventArgs e)
{
if (textBox1.Text != "")
{
pictureBox1.Visible = true;
}
}
private void btnAddMore_Click(object sender, EventArgs e)
{
byte[] img;
pictureBox1.Image = null;
pictureBox2.Image = null;
pictureBox3.Image = null;
pictureBox4.Image = null;
pictureBox5.Image = null;
pictureBox6.Image = null;
pictureBox7.Image = null;
pictureBox8.Image = null;
pictureBox9.Image = null;
pictureBox10.Image = null;
pictureBox11.Image = null;
pictureBox12.Image = null;
SqlCommand command = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image)", con);
img = File.ReadAllBytes(pictureBox1.ImageLocation);
command.Parameters.AddWithValue("@Image", img);
command.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
SqlCommand command1 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image)", con);
img = File.ReadAllBytes(pictureBox2.ImageLocation);
command1.Parameters.AddWithValue("@Image", img);
command1.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
SqlCommand command2 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName1,@Photos)", con);
img = File.ReadAllBytes(pictureBox3.ImageLocation);
command2.Parameters.AddWithValue("@Photos", img);
command2.Parameters.AddWithValue("@AlbumName1", textBox1.Text.ToString());
SqlCommand command3 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName2,@Image2)", con);
img = File.ReadAllBytes(pictureBox4.ImageLocation);
command3.Parameters.AddWithValue("@Image2", img);
command3.Parameters.AddWithValue("@AlbumName2", textBox1.Text.ToString());
SqlCommand command4 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName3,@Image3)", con);
img = File.ReadAllBytes(pictureBox5.ImageLocation);
command4.Parameters.AddWithValue("@Image3", img);
command4.Parameters.AddWithValue("@AlbumName3", textBox1.Text.ToString());
SqlCommand command5 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName4,@Image4)", con);
img = File.ReadAllBytes(pictureBox6.ImageLocation);
command5.Parameters.AddWithValue("@Image4", img);
command5.Parameters.AddWithValue("@AlbumName4", textBox1.Text.ToString());
SqlCommand command6 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName5,@Image5)", con);
img = File.ReadAllBytes(pictureBox7.ImageLocation);
command6.Parameters.AddWithValue("@Image5", img);
command6.Parameters.AddWithValue("@AlbumName5", textBox1.Text.ToString());
SqlCommand command7 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName6,@Image6)", con);
img = File.ReadAllBytes(pictureBox8.ImageLocation);
command7.Parameters.AddWithValue("@Image6", img);
command7.Parameters.AddWithValue("@AlbumName6", textBox1.Text.ToString());
SqlCommand command8 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName7,@Image7)", con);
img = File.ReadAllBytes(pictureBox9.ImageLocation);
command8.Parameters.AddWithValue("@Image7", img);
command8.Parameters.AddWithValue("@AlbumName7", textBox1.Text.ToString());
SqlCommand command9 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName8,@Image8)", con);
img = File.ReadAllBytes(pictureBox10.ImageLocation);
command9.Parameters.AddWithValue("@Image8", img);
command9.Parameters.AddWithValue("@AlbumName8", textBox1.Text.ToString());
SqlCommand command10 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName9,@Image9)", con);
img = File.ReadAllBytes(pictureBox11.ImageLocation);
command10.Parameters.AddWithValue("@Image9", img);
command10.Parameters.AddWithValue("@AlbumName9", textBox1.Text.ToString());
SqlCommand command11 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName10,@Image10)", con);
img = File.ReadAllBytes(pictureBox12.ImageLocation);
command11.Parameters.AddWithValue("@Image10", img);
command11.Parameters.AddWithValue("@AlbumName10", textBox1.Text.ToString());
con.Open();
command.ExecuteNonQuery();
command1.ExecuteNonQuery();
command2.ExecuteNonQuery();
command3.ExecuteNonQuery();
command4.ExecuteNonQuery();
command5.ExecuteNonQuery();
command6.ExecuteNonQuery();
command7.ExecuteNonQuery();
command8.ExecuteNonQuery();
command9.ExecuteNonQuery();
command10.ExecuteNonQuery();
command11.ExecuteNonQuery();
con.Close();
// MessageBox.Show("Successfully updated!");
}
private void pictureBox2_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox2.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox3.Visible = true;
}
}
private void pictureBox3_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox3.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox4.Visible = true;
}
}
private void pictureBox4_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox4.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox5.Visible = true;
}
}
private void pictureBox5_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox5.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox6.Visible = true;
}
}
private void pictureBox6_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox6.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox7.Visible = true;
}
}
private void pictureBox7_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox7.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox8.Visible = true;
}
}
private void pictureBox8_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox8.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox9.Visible = true;
}
}
private void pictureBox9_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox9.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox10.Visible = true;
}
}
private void pictureBox10_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox10.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox11.Visible = true;
}
}
private void pictureBox11_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox11.ImageLocation = file.FileName;
}
if (pictureBox1.Image != null)
{
pictureBox12.Visible = true;
}
}
private void pictureBox12_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox12.ImageLocation = file.FileName;
}
}
private void pictureBox1_Click(object sender, EventArgs e)
{
OpenFileDialog file = new OpenFileDialog();
file.Filter = "Image files (*.jpg)|*.jpg|All files(*.*)|*.*";
if (file.ShowDialog() == DialogResult.OK)
{
pictureBox1.ImageLocation = file.FileName;
}
pictureBox2.Visible = true;
}
private void btnSave_Click_1(object sender, EventArgs e)
{
byte[] img1;
albumName = this.textBox1.Text.ToString();
try
{
con.Open();
SqlCommand command = new SqlCommand("INSERT INTO AddPhotos(AlbumName,Image) VALUES(@AlbumName,@Image)", con);
if (pictureBox1.Image != null)
{
img1 = File.ReadAllBytes(pictureBox1.ImageLocation);
command.Parameters.AddWithValue("@Image", img1);
command.Parameters.AddWithValue("@AlbumName", albumName);
command.ExecuteNonQuery();
}
SqlCommand command1 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image)", con);
if (pictureBox2.Image != null)
{
img1 = File.ReadAllBytes(pictureBox2.ImageLocation);
command1.Parameters.AddWithValue("@Image", img1);
command1.Parameters.AddWithValue("@AlbumName", albumName);
command1.ExecuteNonQuery();
}
SqlCommand command2 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image3)", con);
if (pictureBox3.Image != null)
{
img1 = File.ReadAllBytes(pictureBox3.ImageLocation);
command2.Parameters.AddWithValue("@Image3", img1);
command2.Parameters.AddWithValue("@AlbumName", albumName);
command2.ExecuteNonQuery();
}
SqlCommand command3 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image4)", con);
if (pictureBox4.Image != null)
{
img1 = File.ReadAllBytes(pictureBox4.ImageLocation);
command3.Parameters.AddWithValue("@Image4", img1);
command3.Parameters.AddWithValue("@AlbumName", albumName);
command3.ExecuteNonQuery();
}
SqlCommand command4 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image5)", con);
if (pictureBox5.Image != null)
{
img1 = File.ReadAllBytes(pictureBox5.ImageLocation);
command4.Parameters.AddWithValue("@Image5", img1);
command4.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command4.ExecuteNonQuery();
}
SqlCommand command5 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image6)", con);
if (pictureBox6.Image != null)
{
img1 = File.ReadAllBytes(pictureBox6.ImageLocation);
command5.Parameters.AddWithValue("@Image6", img1);
command5.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command5.ExecuteNonQuery();
}
SqlCommand command6 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image7)", con);
if (pictureBox7.Image != null)
{
img1 = File.ReadAllBytes(pictureBox7.ImageLocation);
command6.Parameters.AddWithValue("@Image7", img1);
command6.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command6.ExecuteNonQuery();
}
SqlCommand command7 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image8)", con);
if (pictureBox8.Image != null)
{
img1 = File.ReadAllBytes(pictureBox8.ImageLocation);
command7.Parameters.AddWithValue("@Image8", img1);
command7.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command7.ExecuteNonQuery();
}
SqlCommand command8 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image9)", con);
if (pictureBox9.Image != null)
{
img1 = File.ReadAllBytes(pictureBox9.ImageLocation);
command8.Parameters.AddWithValue("@Image9", img1);
command8.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command8.ExecuteNonQuery();
}
SqlCommand command9 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image10)", con);
if (pictureBox10.Image != null)
{
img1 = File.ReadAllBytes(pictureBox10.ImageLocation);
command9.Parameters.AddWithValue("@Image10", img1);
command9.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command9.ExecuteNonQuery();
}
SqlCommand command10 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image11)", con);
if (pictureBox11.Image != null)
{
img1 = File.ReadAllBytes(pictureBox11.ImageLocation);
command10.Parameters.AddWithValue("@Image11", img1);
command10.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command10.ExecuteNonQuery();
}
SqlCommand command11 = new SqlCommand("INSERT INTO AddPhotos (AlbumName,Image) VALUES(@AlbumName,@Image12)", con);
if (pictureBox12.Image != null)
{
img1 = File.ReadAllBytes(pictureBox12.ImageLocation);
command11.Parameters.AddWithValue("@Image12", img1);
command11.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
command11.ExecuteNonQuery();
}
MessageBox.Show("Successfully uploaded!");
SqlCommand comm = new SqlCommand("INSERT INTO Albums (AlbumName,Image) VALUES (@AlbumName,@Image)", con);
img1 = File.ReadAllBytes(pictureBox1.ImageLocation);
comm.Parameters.AddWithValue("@Image", img1);
comm.Parameters.AddWithValue("@AlbumName", textBox1.Text.ToString());
comm.ExecuteNonQuery();
con.Close();
Gallery glr = (Gallery)Application.OpenForms["Gallery"];
if (glr != null){
glr.TopMost = true;
this.Close();
}
else
{
Gallery s = new Gallery();
s.Show();
this.Close();
}
}
catch (System.Exception a)
{
MessageBox.Show(a.Message);
}
}
}
}
<file_sep>/Software/LoginTemp.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Security.Cryptography;
using System.Data.SqlClient;
namespace Software
{
public partial class LoginTemp : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
public LoginTemp()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
string getuser = txtUser.Text.ToString().Trim();
string getpass = txtPass.Text.ToString().Trim();
con.Open();
SqlDataAdapter da = new SqlDataAdapter();
DataTable dt = new DataTable();
da.SelectCommand=new SqlCommand("SELECT*FROM Login WHERE Username=@getuser AND Password=@<PASSWORD>", con);
da.SelectCommand.Parameters.AddWithValue("@getuser", getuser);
da.SelectCommand.Parameters.AddWithValue("@encPass", hashPass(getpass, getuser));
da.Fill(dt);
con.Close();
if (dt.Rows.Count == 1)
{
Properties.Settings.Default.User = getuser;
Properties.Settings.Default.UserType = dt.Rows[0].ItemArray[3].ToString().Trim();
CoverPage cs = new CoverPage();
cs.Show();
this.Hide();
}
else
{
MessageBox.Show("Invalid username or password!");
}
}
private void textBox1_Enter(object sender, EventArgs e)
{
if(txtUser.Text=="Username")
txtUser.Text = string.Empty;
}
private void textBox2_Enter(object sender, EventArgs e)
{
if(txtPass.Text=="<PASSWORD>")
txtPass.Text = string.Empty;
}
private void LoginTemp_Load(object sender, EventArgs e)
{
}
private void pictureBox3_Click(object sender, EventArgs e)
{
this.Close();
this.Dispose();
}
private void textBox1_Leave(object sender, EventArgs e)
{
if (txtUser.Text == string.Empty)
txtUser.Text = "Username";
}
private void textBox2_Leave(object sender, EventArgs e)
{
if (txtPass.Text == string.Empty)
txtPass.Text = "<PASSWORD>";
}
private void button2_Click(object sender, EventArgs e)
{
}
private string hashPass(string pass,string user)
{
SHA256 sha = new SHA256CryptoServiceProvider();
sha.ComputeHash(ASCIIEncoding.ASCII.GetBytes(pass + user));
byte[] result = sha.Hash;
StringBuilder stb = new StringBuilder();
for (int i = 0; i < result.Length; i++)
{
stb.Append(result[i].ToString("X2"));
}
return stb.ToString();
}
}
}
<file_sep>/Software/PhotoViewer.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
using System.IO;
namespace Software
{
public partial class PhotoViewer : Form
{
public static int btn = 0,ind;
public static string albm = Gallery.name;
PictureBox[] list;
public static PictureBox pi;
public static ImageList il;
public static PictureBox[] pic;
static string conStr = Properties.Settings.Default.GalleryConStr;
static SqlConnection con = new SqlConnection(conStr);
public PhotoViewer()
{
InitializeComponent();
}
private void Photoviewr_Load(object sender, EventArgs e)
{
SqlCommand myCommand = new SqlCommand("Select * from AddPhotos WHERE AlbumName='" + albm.ToString().Trim() + "'", con);
SqlDataAdapter myAdapter = new SqlDataAdapter(myCommand);
DataTable myTable = new DataTable();//create a table
myAdapter.Fill(myTable);//fill da table using adapter
// addPhotosDataRepeater.DataSource = myTable;
addPhotosDataGridView.DataSource = myTable;
int x = 13, y = 13, kk = 0;
pic = new PictureBox[addPhotosDataGridView.Rows.Count - 1];
Button[] lbl;
//Button[] lbl = new Button[addPhotosDataGridView.Rows.Count - 1];
lbl = new Button[addPhotosDataGridView.Rows.Count - 1];
for (int i = 0; i < addPhotosDataGridView.Rows.Count - 1; i++)
{
string RowType = addPhotosDataGridView.Rows[i].Cells[0].Value.ToString();
// textBox1.Text = RowType;
MemoryStream stream = new MemoryStream();
con.Open();
SqlCommand command = new SqlCommand("select Image from Albums where AlbumName='" + RowType.Trim() + "'", con);
byte[] image = (byte[])addPhotosDataGridView.Rows[i].Cells[1].Value;
stream.Write(image, 0, image.Length);
con.Close();
Bitmap bitmap = new Bitmap(stream);
pic[i] = new PictureBox();
lbl[i] = new Button();
pic[i].Image = bitmap;
//il.Images[i] = bitmap;
pic[i].Click += new EventHandler(pic_Click);
/* PictureBox pic = new PictureBox();
pic.Click+=new EventHandler(pic_Click);*/
// lbl[i].Click += new EventHandler(lbl_Click);
//MessageBox.Show(lbl[i].Name);
//pic.Visible = true;
this.Controls.Add(pic[i]);
// this.Controls.Add(lbl[i]);
pic[i].BorderStyle = BorderStyle.Fixed3D;
pic[i].Location = new Point(x, y);
lbl[i].Location = new Point(x, y + 140);
pic[i].Size = new Size(110, 130);
kk++;
pic[i].SizeMode = System.Windows.Forms.PictureBoxSizeMode.StretchImage;
lbl[i].Text = RowType;
lbl[i].Name = RowType;
if (kk < 4)
{
x += 152;
}
else
{
kk = 0;
x = 13;
y += 163;
}
}
}
private void button1_Click(object sender, EventArgs e)
{
btn = 1;
AddPhoto s = new AddPhoto();
s.FormClosed += new FormClosedEventHandler(form_FormClosed);
s.Show();
this.Hide();
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void pic_Click(object sender, EventArgs e)
{
//Image img = (Image)sender;
PictureBox p = (PictureBox)sender;
pi = p;
int bk = 0;
for(bk=0;bk<pic.Length;bk++)
{
if (pic[bk].Image == p.Image)
{
ind = bk;
}
}
Viewphoto s = new Viewphoto();
s.Show();
}
private void button1_Click_1(object sender, EventArgs e)
{
Gallery s = new Gallery();
s.FormClosed += new FormClosedEventHandler(form_FormClosed);
s.Show();
this.Hide();
}
private void button1_Click_2(object sender, EventArgs e)
{
Gallery s = new Gallery();
s.FormClosed += new FormClosedEventHandler(form_FormClosed);
s.Show();
this.Hide();
}
}
}
<file_sep>/Software/SumeryStat.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class MnthlyAnalyze : Form
{
static string conStr = Properties.Settings.Default.AccountDatabaseConnectionString;
public static SqlConnection con = new SqlConnection(conStr);
DataTable dt;
BindingSource bsSum;
public MnthlyAnalyze()
{
InitializeComponent();
}
private void SumeryStat_Load(object sender, EventArgs e)
{
dtpStSum.Value = DateTime.Today.AddMonths(-1);
bsSum = new BindingSource();
dgvSum.DataSource = bsSum;
LoadTransactions(dtpStSum.Value, dtpEnSum.Value);
}
private void LoadTransactions(DateTime dtSt, DateTime dtEn)
{
con.Open();
SqlDataAdapter da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT Category,Date,TransType,TransDicsription,Amount FROM Transactions WHERE AccountNumber=@selectedAc AND Date>=@dtSt AND Date<=@dtEn",con);
da.SelectCommand.Parameters.AddWithValue("@selectedAc", Properties.Settings.Default.SelectedAcNum);
da.SelectCommand.Parameters.AddWithValue("@dtSt", dtSt);
da.SelectCommand.Parameters.AddWithValue("@dtEn", dtEn);
dt=new DataTable();
da.Fill(dt);
dt.DefaultView.Sort = "Date";
dt = dt.DefaultView.ToTable();
bsSum.DataSource = dt;
con.Close();
}
}
}
<file_sep>/Software/AddRecord.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddRecord : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
string Event;
int bstRec;
public AddRecord()
{
InitializeComponent();
}
public void Reset()
{
comboBox1.Text = "";
cmbYear.Text = "";
txtFirst.Text = "";
textBox2.Text = "";
textBox3.Text = "";
txtRecd.Text = "";
}
private void AddRecord_FormClosing(object sender, FormClosingEventArgs e)
{
Achivements rc = (Achivements)Application.OpenForms["Achivements"];
if (rc != null)
{
rc.TopMost = true;
rc.LoadRecords();
}
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
try
{
UpdtRecord();
con.Open();
SqlCommand myCommand = new SqlCommand("INSERT INTO Sports (Event,Year,First,Second,Third,Record,BstRecord) VALUES (@Event,@Year,@First,@Second,@Third,@Record,@bstRec)", con);
myCommand.Parameters.AddWithValue("@Event", comboBox1.SelectedItem.ToString());
myCommand.Parameters.AddWithValue("@Year", cmbYear.SelectedItem.ToString());
myCommand.Parameters.AddWithValue("@First", txtFirst.Text.ToString());
myCommand.Parameters.AddWithValue("@Second", textBox2.Text.ToString());
myCommand.Parameters.AddWithValue("@Third", textBox3.Text.ToString());
myCommand.Parameters.AddWithValue("@Record", txtRecd.Text.ToString());
myCommand.Parameters.AddWithValue("@bstRec", bstRec);
myCommand.ExecuteNonQuery();
SqlCommand cupd = new SqlCommand("UPDATE Sports SET BstRecord=@bstRec WHERE Event=@event", con);
cupd.Parameters.AddWithValue("@bstRec", bstRec);
cupd.Parameters.AddWithValue("@event", Event);
cupd.ExecuteNonQuery();
SqlCommand cmd = new SqlCommand("UPDATE BestRecord SET Record=@Record,Owner=@Name,Year=@Year WHERE Event=@Event", con);
cmd.Parameters.AddWithValue("@Event", Event);
cmd.Parameters.AddWithValue("@Name", txtFirst.Text.ToString());
cmd.Parameters.AddWithValue("@Year", cmbYear.Text.ToString());
cmd.Parameters.AddWithValue("@Record", txtRecd.Text.ToString());
cmd.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully added!!!");
Reset();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private void UpdtRecord()
{
con.Open();
SqlDataAdapter da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM BestRecord WHERE Event=@event", con);
da.SelectCommand.Parameters.AddWithValue("@event", Event);
DataTable dt = new DataTable();
da.Fill(dt);
if (dt.Rows.Count > 0)
{
if (Event == "High Jump" || Event == "Long Jump" || Event == "Tripple jump" || Event == "disc throw" || Event == "shot put" || Event == "javelin throw")
{
if (Convert.ToInt32(dt.Rows[0].ItemArray[3].ToString()) < Convert.ToInt32(txtRecd.Text.ToString()))
{
bstRec = Convert.ToInt16(txtRecd.Text.ToString());
}
else
{
bstRec = Convert.ToInt16(dt.Rows[0].ItemArray[3].ToString());
}
}
else
{
if (Convert.ToInt32(dt.Rows[0].ItemArray[3].ToString()) > Convert.ToInt32(txtRecd.Text.ToString()))
{
bstRec = Convert.ToInt16(txtRecd.Text.ToString());
}
else
{
bstRec = Convert.ToInt16(dt.Rows[0].ItemArray[3].ToString());
}
}
}
else
{
bstRec = Convert.ToInt16(txtRecd.Text.ToString());
SqlCommand cmd = new SqlCommand("INSERT INTO BestRecord (Event,Owner,Record,Year) VALUES (@event,@owner,@recd,@year)", con);
cmd.Parameters.AddWithValue("@event",Event);
cmd.Parameters.AddWithValue("@owner",txtFirst.Text.ToString());
cmd.Parameters.AddWithValue("@recd",bstRec);
cmd.Parameters.AddWithValue("@year",cmbYear.Text.ToString());
cmd.ExecuteNonQuery();
}
con.Close();
MessageBox.Show("Done");
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
Event = comboBox1.Text.ToString().Trim();
}
}
}
<file_sep>/Software/CoverPage.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
using Software;
namespace Software
{
public partial class CoverPage : Form
{
static string conStr = Properties.Settings.Default.MainConString;
public static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataTable dtTE;
int iNum = 0;
public static string st,ap,d;
public System.Windows.Forms.Timer ti = new System.Windows.Forms.Timer();
public CoverPage()
{
InitializeComponent();
}
private void ti_Tick(object sender, EventArgs e)
{
checkTodayEvents();
}
private void checkTodayEvents()
{
if (dtTE.Rows.Count > 0 && dtTE.Rows.Count>=iNum)
{
lblToday.Text = dtTE.Rows[iNum].ItemArray[1].ToString() + " Is scheduled to today!!!";
iNum++;
if (iNum == dtTE.Rows.Count)
{
iNum = 0;
}
}
}
private void CoverPage_Load(object sender, EventArgs e)
{
if (Properties.Settings.Default.UserType == "Admin")
lnkManageusers.Visible = true;
lblUserName.Text = Properties.Settings.Default.User.ToString();
dtpEn.Value = DateTime.Today.AddMonths(+1);
dtpSt.Value = DateTime.Today.AddDays(-1);
LoadEvents(dtpSt.Value, dtpEn.Value);
if (dtTE.Rows.Count > 0)
{
ti.Tick += ti_Tick;
ti.Interval = 5000;
ti.Enabled = true;
}
else
{
lblToday.Text = "No any event scheduled to today";
}
}
#region Load Events
private void LoadEvents(DateTime dtSt, DateTime dtEn)
{
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT Date,Schedule,Remarks,Time,Venue FROM Schedule WHERE Date >= @stDt AND Date <=@enDt", con);
da.SelectCommand.Parameters.AddWithValue("@stDt", dtSt);
da.SelectCommand.Parameters.AddWithValue("@enDt", dtEn);
DataTable dt = new DataTable();
dt.Clear();
da.Fill(dt);
con.Close();
DataTable dtUE = new DataTable();
dtTE = new DataTable();
dtUE = dt.Clone();
dtTE = dt.Clone();
foreach (DataRow row in dt.Rows)
{
if (Convert.ToDateTime(row.ItemArray[0]).ToShortDateString() == DateTime.Today.ToShortDateString())
{
dtTE.ImportRow(row);
}
dtUE.ImportRow(row);
}
dataGridView3.DataSource = dtUE;
dataGridView2.DataSource = dtTE;
}
#endregion
private void CoverPage_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
private void dtpEn_ValueChanged(object sender, EventArgs e)
{
}
private void linkLabel1_LinkClicked(object sender, LinkLabelLinkClickedEventArgs e)
{
ResetPassword re = new ResetPassword();
re.Show();
}
private void dtpSt_ValueChanged(object sender, EventArgs e)
{
}
private void CoverPage_FormClosing(object sender, FormClosingEventArgs e)
{
LoginTemp lt = (LoginTemp)Application.OpenForms["LoginTemp"];
if (lt != null)
{
lt.Close();
lt.Dispose();
}
this.Dispose();
}
private void button9_Click(object sender, EventArgs e)
{
LoadEvents(dtpSt.Value, dtpEn.Value);
}
private void lnkManageusers_LinkClicked(object sender, LinkLabelLinkClickedEventArgs e)
{
ManageUsers mu = (ManageUsers)Application.OpenForms["ManageUsers"];
if (mu != null)
{
mu.TopMost = true;
}
else
{
ManageUsers mun = new ManageUsers();
mun.Show();
}
}
private void linkLabel2_LinkClicked(object sender, LinkLabelLinkClickedEventArgs e)
{
Login l = new Login();
l.Show();
}
private void btnAchvmnt_Click(object sender, EventArgs e)
{
Achivements rc = (Achivements)Application.OpenForms["Record"];
if (rc != null)
{
rc.TopMost = true;
}
else
{
Achivements rcn = new Achivements();
rcn.Show();
}
}
private void btnStudents_Click(object sender, EventArgs e)
{
Students st = (Students)Application.OpenForms["Students"];
if (st != null)
{
st.TopMost = true;
}
else
{
Students s = new Students();
s.Show();
}
}
private void btnStaff_Click(object sender, EventArgs e)
{
Staff st = (Staff)Application.OpenForms["Staff"];
if (st != null)
st.TopMost = true;
else
{
Staff s = new Staff();
s.Show();
}
}
private void btnInventory_Click(object sender, EventArgs e)
{
Inventories iv = (Inventories)Application.OpenForms["Inventories"];
if (iv != null)
{
iv.TopMost = true;
}
else
{
Inventories ivn = new Inventories();
ivn.Show();
}
}
private void btnSchedule_Click(object sender, EventArgs e)
{
Schedule sc = (Schedule)Application.OpenForms["Schedule"];
if (sc != null)
{
sc.TopMost = true;
}
else
{
Schedule s = new Schedule();
s.Show();
}
}
private void btnManageUsers_Click(object sender, EventArgs e)
{
ManageUsers lg = (ManageUsers)Application.OpenForms["ManageUsers"];
if (lg != null)
{
lg.TopMost = true;
}
else
{
ManageUsers lgn = new ManageUsers();
lgn.Show();
}
}
private void btnGallery_Click(object sender, EventArgs e)
{
Gallery gl = (Gallery)Application.OpenForms["Gallery"];
if (gl != null)
{
gl.TopMost = true;
}
else
{
Gallery gln = new Gallery();
gln.Show();
}
}
private void btnAccount_Click(object sender, EventArgs e)
{
Form1 ac = (Form1)Application.OpenForms["Form1"];
if (ac != null)
{
ac.TopMost = true;
}
else
{
Form1 acn = new Form1();
acn.Show();
}
}
}
}
<file_sep>/Software/Viewphoto.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Software
{
public partial class Viewphoto : Form
{
int num = PhotoViewer.ind, i;
PictureBox[] p = PhotoViewer.pic;
int len = PhotoViewer.pic.Length;
public Viewphoto()
{
InitializeComponent();
i = num;
}
private void button2_Click(object sender, EventArgs e)
{
if (num < len - 1)
pictureBox1.Image = PhotoViewer.pic[++num].Image;
else if(num==len-1)
{
num = len - 2;
button2.Enabled = false;
button1.Enabled = true;
}
else
{
button2.Enabled = false;
button1.Enabled = true;
}
}
private void button1_Click(object sender, EventArgs e)
{
if (num > -1)
pictureBox1.Image = PhotoViewer.pic[num--].Image;
else if(num==-1)
{
num = 0;
button1.Enabled = false;
button2.Enabled = true;
}
else
{
button1.Enabled = false;
button2.Enabled = true;
}
}
private void Viewphoto_Load(object sender, EventArgs e)
{
pictureBox1.Image = PhotoViewer.pic[num].Image;
}
}
}
<file_sep>/Software/UpdateSchedule.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class UpdateSchedule : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataTable dt;
int id;
public UpdateSchedule(int Id)
{
InitializeComponent();
this.id = Id;
}
private void UpdateRecord_Load(object sender, EventArgs e)
{
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM Schedule WHERE SId=@id", con);
da.SelectCommand.Parameters.AddWithValue("@id", id);
dt=new DataTable();
da.Fill(dt);
txtDate.Text = Convert.ToDateTime(dt.Rows[0].ItemArray[1].ToString()).ToShortDateString();
txtSchedule.Text = dt.Rows[0].ItemArray[2].ToString();
txtRemarks.Text = dt.Rows[0].ItemArray[3].ToString();
txtTime.Text = dt.Rows[0].ItemArray[4].ToString();
txtVenue.Text = dt.Rows[0].ItemArray[5].ToString();
con.Close();
}
private void button3_Click(object sender, EventArgs e)
{
}
private void button4_Click(object sender, EventArgs e)
{
}
private void button2_Click(object sender, EventArgs e)
{
}
private void panel7_Paint(object sender, PaintEventArgs e)
{
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
try
{
con.Open();
SqlCommand myCommand = new SqlCommand("UPDATE Schedule SET Date=@Date,Schedule=@Schedule,Remarks=@Remarks,Time=@Time,Venue=@Venue WHERE SId=@Id", con);
myCommand.Parameters.AddWithValue("@Date", txtDate.Text.ToString());
myCommand.Parameters.AddWithValue("@Schedule", txtSchedule.Text.ToString());
myCommand.Parameters.AddWithValue("@Remarks", txtRemarks.Text.ToString());
myCommand.Parameters.AddWithValue("@Time", txtTime.Text.ToString());
myCommand.Parameters.AddWithValue("@Venue", txtVenue.Text.ToString());
myCommand.Parameters.AddWithValue("@Id", id);
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Successfully Updated!!!");
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private void button5_Click(object sender, EventArgs e)
{
if (MessageBox.Show("Are you sure???", "Delete", MessageBoxButtons.YesNo, MessageBoxIcon.Question) == DialogResult.Yes)
{
con.Open();
SqlCommand myCommand = new SqlCommand("DELETE FROM Schedule WHERE SId=@Id", con);
myCommand.Parameters.AddWithValue("@Id", id);
myCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Record is deleted!!");
}
}
private void UpdateRecord_FormClosing(object sender, FormClosingEventArgs e)
{
Schedule sc = (Schedule)Application.OpenForms["Schedule"];
if (sc != null)
{
sc.TopMost = true;
sc.LoadSchedule(sc.dtpSt.Value, sc.dtpEn.Value);
}
else
{
Schedule s = new Schedule();
s.Show();
}
}
private void UpdateRecord_Activated(object sender, EventArgs e)
{
this.TopMost = false;
}
}
}
<file_sep>/Software/viewBestSprtRecords.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class viewBestSprtRecords : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
public viewBestSprtRecords()
{
InitializeComponent();
}
private void viewBestSprtRecords_Load(object sender, EventArgs e)
{
con.Open();
SqlDataAdapter da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT Event FROM BestRecord", con);
da.SelectCommand.Parameters.AddWithValue("@event", comboBox1.Text.ToString().Trim());
DataTable dt = new DataTable();
da.Fill(dt);
con.Close();
if (dt.Rows.Count > 0)
{
foreach (DataRow row in dt.Rows)
{
comboBox1.Items.Add(row.ItemArray[0].ToString().Trim());
}
}
else
{
MessageBox.Show("No records in database");
}
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
con.Open();
SqlDataAdapter daa = new SqlDataAdapter();
daa.SelectCommand = new SqlCommand("SELECT*FROM BestRecord WHERE Event=@event", con);
daa.SelectCommand.Parameters.AddWithValue("@event", comboBox1.Text.ToString().Trim());
DataTable dtt = new DataTable();
daa.Fill(dtt);
txtOwner.Text = dtt.Rows[0].ItemArray[2].ToString();
txtRecord.Text = dtt.Rows[0].ItemArray[3].ToString();
txtYear.Text = dtt.Rows[0].ItemArray[4].ToString();
con.Close();
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void viewBestSprtRecords_FormClosing(object sender, FormClosingEventArgs e)
{
Achivements ac = (Achivements)Application.OpenForms["Achivements"];
if (ac != null)
{
ac.TopMost = true;
}
}
}
}
<file_sep>/Software/AddInventory.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddInventory : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
public AddInventory()
{
InitializeComponent();
}
private void btnClose_Click(object sender, EventArgs e)
{
}
private void btnAdd_Click(object sender, EventArgs e)
{
}
private void button1_Click(object sender, EventArgs e)
{
this.Close();
}
private void button2_Click(object sender, EventArgs e)
{
con.Open();
da = new SqlDataAdapter();
da.InsertCommand = new SqlCommand("INSERT INTO Inventory (Item,CurrentTotal,New,Damaged,Disposed) VALUES (@Itm,@crntTotal,@New,@Damaged,@Disposed)", con);
da.InsertCommand.Parameters.AddWithValue("@Itm", txtItem.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@crntTotal", Convert.ToInt16(txtCurrentTotal.Text));
da.InsertCommand.Parameters.AddWithValue("@New", Convert.ToInt16(txtNew.Text));
da.InsertCommand.Parameters.AddWithValue("@Damaged", Convert.ToInt16(txtDamaged.Text));
da.InsertCommand.Parameters.AddWithValue("@Disposed", Convert.ToInt16(txtDispoced.Text));
da.InsertCommand.ExecuteNonQuery();
con.Close();
MessageBox.Show("Done");
}
}
}
<file_sep>/Software/ReportStaff.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class ReportStaff : Form
{
static string constr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(constr);
DataTable dt;
SqlDataAdapter da;
int pos = 0;
DataRow row;
public ReportStaff()
{
InitializeComponent();
}
private void ReportStaff_Load(object sender, EventArgs e)
{
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT*FROM Staff", con);
dt = new DataTable();
da.Fill(dt);
con.Close();
navStaffDetails();
}
private void navStaffDetails()
{
if (dt.Rows.Count > 0)
{
row = dt.Rows[pos];
lblNumber.Text = (pos+1).ToString() + " Of " + dt.Rows.Count.ToString();
txtName.Text = row.ItemArray.GetValue(1).ToString().Trim() + " " + row.ItemArray.GetValue(2).ToString().Trim() + " " + row.ItemArray.GetValue(3).ToString().Trim() + " " + row.ItemArray.GetValue(4).ToString().Trim();
txtStaffId.Text = row.ItemArray.GetValue(5).ToString();
txtPosition.Text = row.ItemArray.GetValue(6).ToString();
txtAddress.Text = row.ItemArray.GetValue(7).ToString();
txtContactNo.Text = row.ItemArray.GetValue(8).ToString();
txtSubject.Text = row.ItemArray.GetValue(9).ToString();
}
else
{
MessageBox.Show("No records to Show");
}
}
private void button1_Click(object sender, EventArgs e)
{
}
private void btnFirst_Click(object sender, EventArgs e)
{
}
private void btnPrev_Click(object sender, EventArgs e)
{
}
private void btnNext_Click(object sender, EventArgs e)
{
}
private void btnLast_Click(object sender, EventArgs e)
{
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void btnAllDetails_Click(object sender, EventArgs e)
{
pos = 0;
navStaffDetails();
}
private void btnAddStaff_Click(object sender, EventArgs e)
{
if (pos != 0)
{
pos--;
navStaffDetails();
}
}
private void btnUpdate_Click(object sender, EventArgs e)
{
if (pos != dt.Rows.Count - 1)
{
pos++;
navStaffDetails();
}
}
private void button1_Click_1(object sender, EventArgs e)
{
pos = dt.Rows.Count - 1;
navStaffDetails();
}
}
}
<file_sep>/Software/Gallery.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
using System.IO;
namespace Software
{
public partial class Gallery : Form
{
public static string name;
Button[] lbl;
public static Image ig;
static string conStr = Properties.Settings.Default.GalleryConStr;
static SqlConnection con = new SqlConnection(conStr);
public Gallery()
{
InitializeComponent();
}
private void Gallery_Load(object sender, EventArgs e)
{
con.Open();
SqlCommand myCommand = new SqlCommand("Select * from Albums", con);
SqlDataAdapter myAdapter = new SqlDataAdapter(myCommand);
DataTable myTable = new DataTable();//create a table
myAdapter.Fill(myTable);//fill da table using adapter
// dataGridView1.DataSource = myTable;
albumsDataGridView.DataSource = myTable;
int x = 165, y = 35, kk = 1;
PictureBox[] pic = new PictureBox[albumsDataGridView.Rows.Count - 1];
//Button[] lbl = new Button[albumsDataGridView.Rows.Count - 1];
lbl = new Button[albumsDataGridView.Rows.Count - 1];
for (int i = 0; i < albumsDataGridView.Rows.Count - 1; i++)
{
string RowType = albumsDataGridView.Rows[i].Cells[0].Value.ToString();
// textBox1.Text = RowType;
MemoryStream stream = new MemoryStream();
SqlCommand command = new SqlCommand("select Image from Albums where AlbumName='" + RowType.Trim() + "'", con);
byte[] image = (byte[])command.ExecuteScalar();
stream.Write(image, 0, image.Length);
Bitmap bitmap = new Bitmap(stream);
pic[i] = new PictureBox();
lbl[i] = new Button();
pic[i].Image = bitmap;
// pic[i].Click += new EventHandler(pic_Click);
/* PictureBox pic = new PictureBox();
pic.Click+=new EventHandler(pic_Click);*/
lbl[i].Click += new EventHandler(lbl_Click);
//MessageBox.Show(lbl[i].Name);
//pic.Visible = true;
this.Controls.Add(pic[i]);
this.Controls.Add(lbl[i]);
pic[i].BorderStyle = BorderStyle.Fixed3D;
pic[i].Location = new Point(x, y);
lbl[i].Location = new Point(x+10, y + 140);
pic[i].Size = new Size(110, 130);
kk++;
pic[i].SizeMode = System.Windows.Forms.PictureBoxSizeMode.StretchImage;
lbl[i].Text = RowType;
lbl[i].Name = RowType;
if (kk < 4)
{
x += 152;
}
else
{
kk = 0;
x = 13;
y += 163;
}
}
con.Close();
}
private void button1_Click(object sender, EventArgs e)
{
AddPhoto s = new AddPhoto();
s.Show();
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Close();
}
private void lbl_Click(object sender, EventArgs e)
{
Button vb = (Button)sender;
// MessageBox.Show(vb.Text);
name = vb.Text;
PhotoViewer s = new PhotoViewer();
s.FormClosed += new FormClosedEventHandler(form_FormClosed);
s.Show();
this.Hide();
}
}
}
<file_sep>/Software/AddStudent.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class AddStudent : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da,daf;
//SqlConnection myConnection = new SqlConnection(@"Data Source=.\SQLEXPRESS;AttachDbFilename=|DataDirectory|\Student_personal1.mdf;Integrated Security=True;User Instance=True");
public static string hostelfacility="N/A";
public static string mt="N/A";
public AddStudent()
{
InitializeComponent();
this.tabPage1.Text = "page 1";
this.tabPage2.Text = "page 2";
this.tabPage3.Text = "page 3";
this.tabPage5.Text = "page 4";
this.tabPage6.Text = "page 5";
}
#region Add Details
private void button1_Click(object sender, EventArgs e)
{
if (radioButton1.Checked)
hostelfacility = radioButton1.Text.ToString();
else if (radioButton2.Checked)
hostelfacility = radioButton2.Text.ToString();
if (radioButton6.Checked)
mt = radioButton6.Text.ToString();
else if (radioButton5.Checked)
mt = radioButton5.Text.ToString();
try
{
con.Open();
da = new SqlDataAdapter();
daf = new SqlDataAdapter();
da.InsertCommand = new SqlCommand("INSERT INTO StudentDetails (Title,FirstName,LastName,Religion,Language,RegNo,PermanantAddress,Gender,NIC_No,ContactNumber,DateofBirth,AcedemicCourse,AcedemicYear,Hostel,School,AL_Results,IndoorGames,Athletics,MajorGames,Cultural,District,EmailAddress,FacebookId,MedicalTreatment,Status)VALUES (@title,@first_name,@last_name,@religion,@mother_language,@reg_no,@permenent_address,@gender,@nic_num,@contact_num,@date_of_birth,@academic_course,@academic_year,@hostel_facilities,@school_attended,@al_results,@indoor_game,@athletics,@major_games,@cultural,@district,@email_address,@facebook_id,@medical_treatment,@status)", con);
da.InsertCommand.Parameters.AddWithValue("@title", comboBox7.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@first_name", txtFirstName.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@last_name", txtLastName.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@religion", comboBox8.SelectedItem.ToString());
da.InsertCommand.Parameters.AddWithValue("@mother_language", comboBox9.SelectedItem.ToString());
da.InsertCommand.Parameters.AddWithValue("@reg_no", txtRegNo.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@permenent_address", txtAddress.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@gender", comboBox10.SelectedItem.ToString());
da.InsertCommand.Parameters.AddWithValue("@nic_num", txtNIC.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@contact_num", txtContactNo.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@date_of_birth", dateTimePicker1.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@academic_course", comboBox11.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@academic_year", comboBox12.SelectedItem.ToString());
da.InsertCommand.Parameters.AddWithValue("@hostel_facilities", hostelfacility);
da.InsertCommand.Parameters.AddWithValue("@school_attended", textBox1.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@al_results", comboBox1.SelectedItem.ToString() + comboBox5.SelectedItem.ToString() + comboBox3.SelectedItem.ToString() + comboBox4.SelectedItem.ToString() + comboBox2.SelectedItem.ToString() + comboBox6.SelectedItem.ToString());
da.InsertCommand.Parameters.AddWithValue("@indoor_game", richTextBox1.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@athletics", richTextBox2.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@major_games", richTextBox3.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@cultural", richTextBox4.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@district", textBox59.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@email_address", textBox57.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@facebook_id", textBox56.Text.ToString());
da.InsertCommand.Parameters.AddWithValue("@medical_treatment", mt);
da.InsertCommand.Parameters.AddWithValue("@status", "Following");
da.InsertCommand.ExecuteNonQuery();
daf.InsertCommand = new SqlCommand("INSERT INTO FamilyDetails (RegNo,Name,Age,Relationship,Job,Salery) VALUES(@reg_no,@name,@age,@relationship,@job,@salary)", con);
daf.InsertCommand.Parameters.AddWithValue("@reg_no", txtRegNo.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@name", txtNameFam.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@age", txtAgeFam.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@relationship", cmbRelation.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@job", txtJobFam.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@salary", txtSalaryFam.Text.ToString());
con.Close();
MessageBox.Show("Successfully added!!!");
Reset();
}
catch (Exception ex)
{ MessageBox.Show("Input Error", "Error message", MessageBoxButtons.OK, MessageBoxIcon.Error); }
}
#endregion
#region Add More Relations
private void button2_Click(object sender, EventArgs e)
{
try
{
daf.InsertCommand = new SqlCommand("INSERT INTO FamilyDetails (RegNo,Name,Age,Relationship,Job,Salery) VALUES(@reg_no,@name,@age,@relationship,@job,@salary)", con);
daf.InsertCommand.Parameters.AddWithValue("@reg_no", txtRegNo.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@name", txtNameFam.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@age", txtAgeFam.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@relationship", cmbRelation.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@job", txtJobFam.Text.ToString());
daf.InsertCommand.Parameters.AddWithValue("@salary", txtSalaryFam.Text.ToString());
con.Open();
daf.InsertCommand.ExecuteNonQuery();
con.Close();
txtNameFam.Text = "";
txtAgeFam.Text = "";
cmbRelation.Text = "";
txtJobFam.Text = "";
txtSalaryFam.Text = "";
}
catch(Exception ex)
{ MessageBox.Show(ex.Message); }
}
#endregion
private void radioButton1_CheckedChanged(object sender, EventArgs e)
{
txtRoomNo.Visible = true;
lblRoomNo.Visible = true;
}
private void radioButton6_CheckedChanged(object sender, EventArgs e)
{
textBox55.Visible = true;
}
public void Reset()
{
comboBox2.Text = "";
comboBox7.Text = "";
txtFirstName.Text = "";
txtLastName.Text = "";
comboBox8.Text = "";
comboBox9.Text = "";
comboBox10.Text = "";
txtAddress.Text = "";
txtNIC.Text = "";
txtContactNo.Text = "";
dateTimePicker1.Text = "";
comboBox12.Text = "";
comboBox11.Text = "";
txtRegNo.Text = "";
radioButton1.Checked = false;
radioButton2.Checked = false;
radioButton5.Checked = false;
radioButton6.Checked = false;
richTextBox1.Text = "";
richTextBox2.Text = "";
richTextBox3.Text = "";
richTextBox4.Text = "";
textBox57.Text = "";
textBox55.Visible = false;
textBox56.Text = "";
dateTimePicker1.Text = "";
txtRoomNo.Visible = false;
lblRoomNo.Visible = false;
comboBox1.Text = "";
comboBox3.Text = "";
comboBox4.Text = "";
comboBox5.Text = "";
comboBox6.Text = "";
textBox1.Text = "";
txtNameFam.Text = "";
txtAgeFam.Text = "";
cmbRelation.Text = "";
txtJobFam.Text = "";
txtSalaryFam.Text = "";
textBox59.Text = "";
}
private void button3_Click(object sender, EventArgs e)
{
this.Close();
}
private Boolean Check_Index(string s)
{string gh="";
bool a = false;
if (s.Length == 11)
gh = s.Substring(9, 2);
else
gh = s.Substring(9, 3);
foreach (char c in gh)
{
if (char.IsLetter(c))
{
a = true;
}
else
{
a = false;
}
}
return a;
}
private Boolean Check_course(string s)
{
bool a = false;
if (s.Substring(5, 3) == "ICT" || s.Substring(5, 3) == "ASB" || s.Substring(5, 3) == "ASP")
{
a = true;
}
else
{
a = false;
}
return a;
}
private Boolean Check_year(string s)
{
bool a = false;
if (s.Substring(0, 4) == "2007" || s.Substring(0, 4) == "2008" || s.Substring(0, 4) == "2009" || s.Substring(0, 4) == "2010" || s.Substring(0, 4) == "2011" || s.Substring(0, 4) == "2012" || s.Substring(0, 4) == "2013" || s.Substring(0, 4) == "2014" || s.Substring(0, 4) == "2015" || s.Substring(0, 4) == "2016")
{
a = true;
}
else
{
a = false;
}
return a;
}
private Boolean Check_format(string s)
{
bool a = false;
if (s.Substring(4, 1) == "/" || s.Substring(8, 1) == "/" )
{
a = true;
}
else
{
a = false;
}
return a;
}
private Boolean Check_subject(string s)
{
bool c = false;
for (int i = 3; i < s.Length; i++)
{
if (Char.IsNumber(s[i]))
{
c = true;
}
else
{
c = false;
break;
}
}
return c;
}
private void txtRegNo_Leave(object sender, EventArgs e)
{
try
{
con.Open();
SqlCommand cmd = new SqlCommand("SELECT RegNo FROM StudentDetails where RegNo=@find", con);
cmd.Parameters.AddWithValue("@find", txtRegNo.Text);
SqlDataReader rdr = cmd.ExecuteReader();
if (rdr.Read())
{
MessageBox.Show("Registration Number Already Exists", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
txtRegNo.Text = "";
txtRegNo.Focus();
if ((rdr != null))
{
rdr.Close();
}
return;
}
con.Close();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
}
private void textBox57_Validating(object sender, CancelEventArgs e)
{
System.Text.RegularExpressions.Regex rEMail = new System.Text.RegularExpressions.Regex(@"^[a-zA-Z][\w\.-]{2,28}[a-zA-Z0-9]@[a-zA-Z0-9][\w\.-]*[a-zA-Z0-9]\.[a-zA-Z][a-zA-Z\.]*[a-zA-Z]$");
if (textBox57.Text.Length > 0)
{
if (!rEMail.IsMatch(textBox57.Text))
{
errorProvider1.SetError(textBox57,"invalid email address");
textBox57.SelectAll();
e.Cancel = true;
}
else
errorProvider1.Clear();
}
else
{
errorProvider1.Clear();
}
}
private void radioButton2_CheckedChanged(object sender, EventArgs e)
{
txtRoomNo.Visible = false;
lblRoomNo.Visible = false;
}
private void radioButton5_CheckedChanged(object sender, EventArgs e)
{
textBox55.Visible = false;
}
private void AddStudent_FormClosing(object sender, FormClosingEventArgs e)
{
ReportStudents rs = (ReportStudents)Application.OpenForms["ReportStudents"];
if (rs != null)
{
rs.TopMost = true;
}
}
private void txtRegNo_TextChanged(object sender, EventArgs e)
{
btnAddAnother.Enabled = true;
}
private void AddStudent_Load(object sender, EventArgs e)
{
tabPage1.Text = "Page 1";
tabPage2.Text = "Page 2";
tabPage3.Text = "Page 3";
tabPage5.Text = "Page 4";
tabPage6.Text = "Page 5";
btnAddAnother.Enabled = false;
button2.Enabled = false;
}
private void tabPage6_Click(object sender, EventArgs e)
{
}
private void txtNameFam_TextChanged(object sender, EventArgs e)
{
button2.Enabled = true;
}
private void tabPage5_Click(object sender, EventArgs e)
{
}
private void txtContactNo_Validating(object sender, CancelEventArgs e)
{
string wrd = txtContactNo.Text.ToString();
Boolean flag = true;
foreach (char c in wrd)
{
if (char.IsLetter(c))
{
flag = false;
break;
}
}
if (txtContactNo.TextLength != 10)
{
errorProvider2.SetError(txtContactNo, "enter correct Number ");
txtContactNo.SelectAll(); e.Cancel = true;
}
else if (flag == false)
{
errorProvider2.SetError(txtContactNo, "enter digits");
txtContactNo.SelectAll(); e.Cancel = true;
}
else
{
errorProvider2.Clear();
}
}
private void txtNIC_Validating(object sender, CancelEventArgs e)
{
if (txtNIC.TextLength != 10)
{
errorProvider1.SetError(txtNIC, "Enter only 10 charatcers");
txtNIC.SelectAll(); e.Cancel = true;
}
else if (txtNIC.Text != "")
{
Boolean flag = false;
string s = txtNIC.Text.ToString().Substring(0, 9);
string t = txtNIC.Text.ToString().Substring(9, 1);
foreach (char c in s)
{
if (char.IsLetter(c))
{
flag = true;
break;
}
}
if (flag)
{
errorProvider1.SetError(txtNIC, "enter numbers only");
txtNIC.SelectAll(); e.Cancel = true;
}
else
errorProvider1.Clear();
if (t != "X" || t != "V")
{
errorProvider1.SetError(txtRegNo, "Last character should be X or V");
txtNIC.SelectAll(); e.Cancel = true;
}
else
errorProvider1.Clear();
}
else
errorProvider1.Clear();
}
private void txtRegNo_Validating(object sender, CancelEventArgs e)
{
if (txtRegNo.TextLength < 11)
{
errorProvider1.SetError(txtRegNo, "enter corect format \ne.g2011/ICT/00");
txtRegNo.SelectAll(); e.Cancel = true;
}
else if (txtRegNo.Text != "")
{
string x = txtRegNo.Text;
if (!Check_year(x))
{
errorProvider1.SetError(txtRegNo, "enter year code corectly");
txtRegNo.SelectAll(); e.Cancel = true;
}
if (!Check_course(x))
{
errorProvider1.SetError(txtRegNo, "enter course code corectly\n(ICT,ASP,ASB)");
txtRegNo.SelectAll(); e.Cancel = true;
}
if(Check_Index(x))
{
errorProvider1.SetError(txtRegNo, "enter index number correctly");
txtRegNo.SelectAll(); e.Cancel = true;
}
else
errorProvider1.Clear();
}
else
errorProvider1.Clear();
}
}
}
<file_sep>/Software/Students.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Students : Form
{
static string conStr = Properties.Settings.Default.MainConString;
public static SqlConnection con = new SqlConnection(conStr);
SqlDataAdapter da;
DataView dv;
DataTable dt;
public Students()
{
InitializeComponent();
}
#region Form Load
private void Students_Load(object sender, EventArgs e)
{
if (Login.usr == 1)
{
}
else if (Login.usr == 2)
{
}
con.Open();
da = new SqlDataAdapter();
da.SelectCommand = new SqlCommand("SELECT Title,FirstName,LastName,RegNo,AcedemicCourse,ContactNumber FROM StudentDetails", con);
dt = new DataTable();
da.Fill(dt);
dv = dt.DefaultView;
dataGridView1.DataSource = dv;
dataGridView1.Columns[0].Width = 50;
con.Close();
}
#endregion
private void button2_Click(object sender, EventArgs e)
{
}
private void btnAdd_Click(object sender, EventArgs e)
{
}
private void btnUpdate_Click(object sender, EventArgs e)
{
}
private void BtnRemove_Click(object sender, EventArgs e)
{
}
private void button3_Click(object sender, EventArgs e)
{
this.Close();
}
private void form_FormClosed(object sender, FormClosedEventArgs e)
{
this.Dispose();
}
#region Auto Search
private void textBox1_KeyUp(object sender, KeyEventArgs e)
{
string outInfo = "";
string []keywords=textBox1.Text.Split(' ');
foreach (string word in keywords)
{
if (outInfo.Length == 0)
{
outInfo = "(FirstName LIKE '%" + word + "%' OR LastName LIKE '%" +word + "%')";
}
else
{
outInfo += " AND (FirstName LIKE '%" + word + "%' OR LastName LIKE '%" + word + "%')";
}
}
dv.RowFilter = outInfo;
}
#endregion
private void Students_FormClosing(object sender, FormClosingEventArgs e)
{
CoverPage cp = (CoverPage)Application.OpenForms["CoverPage"];
if (cp != null)
{
cp.TopMost = true;
}
}
private void button5_Click(object sender, EventArgs e)
{
this.Close();
}
private void button6_Click(object sender, EventArgs e)
{
ReportStudents rs = (ReportStudents)Application.OpenForms["ReportStudents"];
if (rs != null)
{
rs.TopMost = true;
}
else
{
ReportStudents s = new ReportStudents();
s.Show();
}
}
private void button7_Click(object sender, EventArgs e)
{
AddStudent ast=(AddStudent)Application.OpenForms["AddStudent"];
if (ast != null)
{
ast.TopMost = true;
}
else
{
AddStudent s = new AddStudent();
s.Show();
}
}
private void button8_Click(object sender, EventArgs e)
{
UpdateStudent us = (UpdateStudent)Application.OpenForms["UpdateStudent"];
if (us != null)
{
us.TopMost = true;
}
else
{
UpdateStudent s = new UpdateStudent();
s.Show();
}
}
private void button2_Click_1(object sender, EventArgs e)
{
Delete ds = (Delete)Application.OpenForms["Delete"];
if (ds != null)
{
ds.TopMost = true;
}
else
{
Delete s = new Delete();
s.Show();
}
}
private void button1_Click(object sender, EventArgs e)
{
}
private void textBox1_KeyUp_1(object sender, KeyEventArgs e)
{
string outInfo = "";
string[] keywords = textBox1.Text.Split(' ');
foreach (string word in keywords)
{
if (outInfo.Length == 0)
{
outInfo = "(FirstName LIKE '%" + word + "%' OR LastName LIKE '%" + word + "%' OR AcedemicCourse LIKE '%" + word + "%')";
}
else
{
outInfo += " AND (FirstName LIKE '%" + word + "%' OR LastName LIKE '%" + word + "%' OR AcedemicCourse LIKE '%" + word + "%')";
}
}
dv.RowFilter = outInfo;
}
}
}
<file_sep>/Software/Delete.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
namespace Software
{
public partial class Delete : Form
{
static string conStr = Properties.Settings.Default.MainConString;
static SqlConnection con = new SqlConnection(conStr);
public Delete()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
try
{
if (MessageBox.Show("Are you sure???", "Delete", MessageBoxButtons.YesNo, MessageBoxIcon.Question) == DialogResult.Yes)
{
con.Open();
SqlCommand cmd1 = new SqlCommand("DELETE FROM StudentDetails WHERE RegNo=@reg_no", con);
SqlCommand cmd2 = new SqlCommand("DELETE FROM FamilyDetails WHERE RegNo=@reg_no", con);
cmd1.Parameters.AddWithValue("@reg_no", comboBox2.Text.ToString());
cmd2.Parameters.AddWithValue("@reg_no", comboBox2.Text.ToString());
cmd1.ExecuteNonQuery();
cmd2.ExecuteNonQuery();
con.Close();
MessageBox.Show("Record is deleted!!", "confirmation", MessageBoxButtons.OK, MessageBoxIcon.Information);
Reset();
}
}
catch (Exception ex)
{
{ MessageBox.Show("Input Error", "Error message", MessageBoxButtons.OK, MessageBoxIcon.Error); }
}
}
private void Delete_Load(object sender, EventArgs e)
{
con.Open();
SqlCommand myCommand = new SqlCommand("SELECT RegNo FROM StudentDetails", con);
SqlDataReader myReader = myCommand.ExecuteReader();
while (myReader.Read())
{
comboBox2.Items.Add(myReader.GetValue(0).ToString());
}
myReader.Close();
myReader.Dispose();
con.Close();
}
private void comboBox2_SelectedIndexChanged(object sender, EventArgs e)
{
button1.Enabled = true;
con.Open();
SqlCommand myCommand = new SqlCommand("SELECT * FROM StudentDetails WHERE RegNo=@reg_no", con);
myCommand.Parameters.AddWithValue("@reg_no", comboBox2.Text.ToString());
SqlDataReader myReader = myCommand.ExecuteReader();
while (myReader.Read())
{
txtname.Text = myReader.GetValue(0).ToString() + "." + myReader.GetValue(1).ToString() + " " + myReader.GetValue(2).ToString();
txtyear.Text = myReader.GetValue(6).ToString();
txtadrs.Text = myReader.GetValue(4).ToString();
txtgendr.Text = myReader.GetValue(8).ToString();
txtnic.Text = myReader.GetValue(7).ToString();
}
myReader.Close();
myReader.Dispose();
con.Close();
}
public void Reset()
{
comboBox2.Text="";
txtname.Text ="";
txtyear.Text = "";
txtadrs.Text = "";
txtgendr.Text = "";
txtnic.Text = "";
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void Delete_FormClosing(object sender, FormClosingEventArgs e)
{
ReportStudents rs = (ReportStudents)Application.OpenForms["ReportStudents"];
if (rs != null)
{
rs.TopMost = true;
}
}
}
}
| 53e04223b7148af6b8b7f927113ff4acfccde24f | [
"C#"
] | 35 | C# | dchathu30/SWProject | 8c04169962e45fcee6bc00c66487b2ac0ca664c3 | b07b6be4414034b1ed7d08d21d44f6462abd033a |
refs/heads/master | <file_sep>package com.company.algorithm.Find;
import java.util.Scanner;
public class MaxFind {
public static void main(String[] args) {
Scanner scanner = new Scanner(System.in);
System.out.println(" 세 정수의 최댓값을 구합니다. ");
System.out.println(" a의 값 : "); int a = scanner.nextInt();
System.out.println(" b의 값 : "); int b = scanner.nextInt();
System.out.println(" c의 값 : "); int c = scanner.nextInt();
int max = a;
if(b > max) max = b;
if (c > max) max = c;
System.out.println("최댓값은 " + max + "입니다.");
System.out.println("최솟값 구하기를 시작하겠습니다. 계속하시려면 1, 그만하시려면 2를 입력해주세요");
int continueResult = scanner.nextInt();
if(continueResult == 1 ) {
System.out.println(" 세 정수의 최소값을 구합니다. ");
System.out.println(" A의 값 : "); int A = scanner.nextInt();
System.out.println(" B의 값 : "); int B = scanner.nextInt();
System.out.println(" C의 값 : "); int C = scanner.nextInt();
int min = A;
// 비교 기준을 앞에 ?
if(min > B ) min = B;
if (min > C ) min = C;
System.out.println("최소값은 " + min + "입니다.");
} else {
System.out.println("프로그램이 종료 되었습니다.");
}
}
}
| 4518a1d9a770ec1682e493b0471a1974cf2ab6de | [
"Java"
] | 1 | Java | tjdans345/JavaAlgorithm | 61a991f130c0fbf454f712cb2b9f5466504c8cec | 2c6802b271929ffb343842947d0fe126deb99b5e |
refs/heads/master | <file_sep>Accumuwinner-Betting-Tips
=========================
Betting Tips Application
<file_sep>package accumuwinner;
import android.app.Activity;
import android.app.LoaderManager.LoaderCallbacks;
import android.content.Loader;
import android.database.Cursor;
import android.os.Bundle;
/**
* An account creation screen that offers the user the ability to create an account to access
* the app.
*/
public class CreateAccountActivity extends Activity implements LoaderCallbacks<Cursor> {
@Override
public Loader<Cursor> onCreateLoader(int id, Bundle args) {
return null;
}
@Override
public void onLoadFinished(Loader<Cursor> loader, Cursor data) {
}
@Override
public void onLoaderReset(Loader<Cursor> loader) {
}
}
<file_sep>package accumuwinner;
import android.net.Uri;
import android.os.Bundle;
import android.support.v4.app.ActionBarDrawerToggle;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentActivity;
import android.support.v4.widget.DrawerLayout;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.widget.AdapterView;
import android.widget.ListView;
import com.example.accumuwinnerbettingtips.R;
import accumuwinner.fragments.NewsFeed;
import accumuwinner.fragments.NewsFeedSliderActivity;
import accumuwinner.listadapters.DrawerListAdapter;
import accumuwinner.notification.GcmProvider;
public class MainActivity extends FragmentActivity implements NewsFeed.OnFragmentInteractionListener {
private DrawerLayout mDrawerLayout;
private ListView mDrawerList;
private ActionBarDrawerToggle mDrawerToggle;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
this.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_ADJUST_NOTHING);
this.setContentView(R.layout.startscreen);
if (savedInstanceState == null) {
getSupportFragmentManager().beginTransaction()
.add(R.id.main_content, new PlaceholderFragment())
.commit();
}
mDrawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);
mDrawerList = (ListView) findViewById(R.id.drawer_list);
mDrawerList.setAdapter(new DrawerListAdapter(this, R.id.drawer_list));
mDrawerList.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
switch(position) {
//TODO: Handle click events for each drawer item list
default:
if(getSupportFragmentManager().findFragmentByTag("NEWS_TAG") != null){
getSupportFragmentManager().beginTransaction().show(NewsFeedSliderActivity.getInstance());
}
else {
getSupportFragmentManager().beginTransaction()
.add(R.id.main_content, NewsFeedSliderActivity.getInstance(), "NEWS_TAG")
.commit();
}
}
}
});
mDrawerToggle = new ActionBarDrawerToggle(this, mDrawerLayout,
R.drawable.ic_drawer, R.string.project_title, R.string.project_title) {
/** Called when a drawer has settled in a completely closed state. */
public void onDrawerClosed(View view) {
super.onDrawerClosed(view);
invalidateOptionsMenu(); // creates call to onPrepareOptionsMenu()
}
/** Called when a drawer has settled in a completely open state. */
public void onDrawerOpened(View drawerView) {
super.onDrawerOpened(drawerView);
invalidateOptionsMenu(); // creates call to onPrepareOptionsMenu()
}
};
// Set the drawer toggle as the DrawerListener
mDrawerLayout.setDrawerListener(mDrawerToggle);
getActionBar().setIcon(R.drawable.ic_drawer);
getActionBar().setHomeButtonEnabled(true);
// Perform the initialisation check for push notifications
GcmProvider gcmProvider = new GcmProvider(this);
if(gcmProvider.checkForGoogleServices()) {
gcmProvider.registerGcmToken();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (mDrawerToggle.onOptionsItemSelected(item)) {
return true;
}
switch(item.getItemId()) {
//TODO: Handle click events for each drawer item list
default:
getSupportFragmentManager().beginTransaction()
.add(R.id.main_content, NewsFeedSliderActivity.getInstance())
.commit();
}
super.onOptionsItemSelected(item);
return true;
}
@Override
public void onFragmentInteraction(Uri uri) {
}
/**
* A placeholder fragment containing a simple view.
*/
public static class PlaceholderFragment extends Fragment {
public PlaceholderFragment() {
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
return inflater.inflate(R.layout.fragment_main, container, false);
}
}
}
<file_sep>package accumuwinner.network;
import retrofit.Callback;
import retrofit.RestAdapter;
/**
* Created by mmckillion on 02/12/14.
*/
public class AppRegistration {
public static void loginUser(String username, String password, Callback<String> cb) {
RestAdapter restAdapter = new RestAdapter.Builder()
.setEndpoint(NetworkGlobals.SERVER_URL)
.build();
IAppRegistration loginService = restAdapter.create(IAppRegistration.class);
loginService.loginUser(username, password, cb);
}
}
<file_sep>package com.example.accumuwinnerbettingtips;
import android.app.Activity;
import android.os.Bundle;
import android.widget.EditText;
import android.widget.TextView;
public class Home extends Activity{
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.home);
Bundle b = getIntent().getExtras();
String email = b.getString("email");
final TextView uname = (TextView) findViewById(R.id.storedUser);
uname.setText(email);
}
}
| 011f532abad962edbee96419516a368dcf685e13 | [
"Markdown",
"Java"
] | 5 | Markdown | LockeTBB/Accumuwinner-Betting-Tips | 992786b3e1972f3550f12b92a520ccc992741999 | c219acae26044dedb851a4f05ace2ecb97c67d59 |
refs/heads/master | <repo_name>jhleedoc/ExData_Plotting1<file_sep>/plot1.R
file <- "./data/household_power_consumption.txt"
data1 <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data2 <- data1[data1$Date %in% c("1/2/2007","2/2/2007") ,]
data3 <- as.numeric(data2$Global_active_power)
png("plot1.png", width=480, height=480)
hist(data3, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
<file_sep>/plot2.R
file <- "./data/household_power_consumption.txt"
data1 <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data2 <- data1[data1$Date %in% c("1/2/2007","2/2/2007") ,]
DATE <- strptime(paste(data2$Date, data2$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- as.numeric(data2$Global_active_power)
png("plot2.png", width=480, height=480)
plot(DATE, GAP, type="l", ylab="Global Active Power (kilowatts)")
dev.off()
<file_sep>/plot4.R
file <- "./data/household_power_consumption.txt"
data1 <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data2 <- data1[data1$Date %in% c("1/2/2007","2/2/2007") ,]
DATE <- strptime(paste(data2$Date, data2$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- as.numeric(data2$Global_active_power)
GRP <- as.numeric(data2$Global_reactive_power)
Voltage <- as.numeric(data2$Voltage)
sub_Metering1 <- as.numeric(data2$Sub_metering_1)
sub_Metering2 <- as.numeric(data2$Sub_metering_2)
sub_Metering3 <- as.numeric(data2$Sub_metering_3)
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
plot(DATE, GAP, type = "l", xlab = "", ylab = "Global Active Power", cex=0.2)
plot(DATE, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(DATE, sub_Metering1, type = "l", ylab = "Energy Submetering", xlab="")
lines(DATE, sub_Metering2, type = "l", col = "red")
lines(DATE, sub_Metering3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1 , lwd = 2.5,
col = c("black", "red", "blue"), bty = "n")
plot(DATE, GRP, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
| 2d6d65bc23fed3dd7f0444ad673e48e981a5d914 | [
"R"
] | 3 | R | jhleedoc/ExData_Plotting1 | f3607c5e8f808f181b018c11256dbc99538b6679 | 10f56e0531d66cf9ea9dea75cc50b35f4dd3a5cc |
refs/heads/master | <repo_name>nornex/react-server<file_sep>/packages/babel-plugin-react-server/src/index.js
import loggerSpec from 'react-server-module-tagger';
import path from 'path';
module.exports = function() {
return {
visitor: {
Identifier(p, state) {
const {node} = p;
const {name} = node;
console.log(`name: ${name}`);
const trim = state.opts.trim;
const prefix = state.opts.prefix;
const parent = path.resolve(path.join(process.cwd(), '..')) + path.sep;
const normalized = path.normalize(this.file.opts.filename);
const filePath = normalized.replace(parent, '');
//TODO: Support labels
const moduleTag = loggerSpec({ filePath, trim, prefix });
console.log(`JSON: ${JSON.stringify(moduleTag)}`);
let tokens;
if (state.opts.tokens) {
tokens = new Set(state.opts.tokens);
} else {
tokens = new Set(["__LOGGER__", "__CHANNEL__", "__CACHE__"]);
}
if (tokens.has(name)) {
// this strikes me as a dirty, nasty hack. I think it would be better
// to parse the object as json and coerce it to an array of
// ObjectProperties to construct an ObjectExpression
p.node.name = moduleTag;
}
},
},
};
}
| fa5858263436afb0514ab51234234f7e0bf112c4 | [
"JavaScript"
] | 1 | JavaScript | nornex/react-server | 60e8a3aa92aa9dcbcc9c45d7caf37f0e75caf803 | 1562071e980763f0df1da9fc4576d04a84a09505 |
refs/heads/main | <repo_name>seijuroseta/react-cubic-spline<file_sep>/open.js
const path = require('path').join(__dirname, 'build', 'index.html');
const exist = require('fs').existsSync(path);
if (!exist) {
const cmd = 'npm run build';
process.stdout.write(`Build not found\n> ${cmd}\n`);
require('child_process').execSync(cmd);
}
process.stdout.write(`Opening ${path}`);
require('open')(path);
<file_sep>/test.js
console.log(3 ^ 1);
<file_sep>/src/core/CubicSpline.ts
import { ISpline } from '../core';
export default class CubicSpline {
private readonly interpolationTurn: number;
private readonly fn: any;
private readonly xStart: number;
private readonly xEnd: number;
private readonly xTurn: number;
private xGrid: number[] = [];
private yGrid: number[] = [];
private spline: ISpline[] = [];
private alpha: number[] = [];
private beta: number[] = [];
public minY: number;
public maxY: number;
public points: any[] = [];
public maxDifference: number = -1;
public table: any[] = [];
constructor(
fn: Function,
xStart: number,
xEnd: number,
xTurn: number,
interpolationTurn: number = 0.01
) {
this.fn = fn;
this.xStart = xStart;
this.xEnd = xEnd;
this.xTurn = xTurn;
this.interpolationTurn = interpolationTurn;
}
public solve(): CubicSpline {
this.initGridAndSpline();
this.calculateAlphaAndBeta();
this.reverseSwapMethod();
this.directSwapMethod();
this.calculateInterpolation();
this.calculateMaxDifference();
this.initSplineAndCoeffTable();
return this;
}
private initGridAndSpline(): void {
for (let x = this.xStart; x <= this.xEnd; x += this.xTurn) {
this.alpha.push(0);
this.beta.push(0);
this.xGrid.push(x);
this.yGrid.push(this.fn(x));
this.spline.push({
X: this.xGrid[this.xGrid.length - 1],
A: this.yGrid[this.yGrid.length - 1],
B: 0,
C: 0,
D: 0,
});
}
this.minY = Math.min.apply(Math, this.yGrid);
this.maxY = Math.max.apply(Math, this.yGrid);
}
private calculateAlphaAndBeta(): void {
for (let i = 1; i < this.spline.length - 1; i++) {
const hi = this.xGrid[i] - this.xGrid[i - 1];
const hi1 = this.xGrid[i + 1] - this.xGrid[i];
const A = hi;
const B = hi1;
const C = 2.0 * (hi + hi1);
const F =
6.0 *
((this.yGrid[i + 1] - this.yGrid[i]) / hi1 -
(this.yGrid[i] - this.yGrid[i - 1]) / hi);
const z = A * this.alpha[i - 1] + C;
this.alpha[i] = -B / z;
this.beta[i] = (F - A * this.beta[this.beta.length - 1]) / z;
}
}
private reverseSwapMethod(): void {
for (let i = this.spline.length - 2; i > 0; i--) {
this.spline[i].C =
this.alpha[i] * this.spline[i + 1].C + this.beta[i];
}
}
private directSwapMethod(): void {
for (let i = this.spline.length - 1; i > 1; i--) {
const hi = this.xGrid[i] - this.xGrid[i - 1];
this.spline[i].D = (this.spline[i].C - this.spline[i - 1].C) / hi;
this.spline[i].B =
(hi * (2.0 * this.spline[i].C + this.spline[i - 1].C)) / 6.0 +
(this.yGrid[i] - this.yGrid[i - 1]) / hi;
}
}
private interpolatePoint(x: number) {
let spline: ISpline;
const size: number = this.spline.length - 1;
if (x <= this.spline[0].X) {
spline = this.spline[0];
} else if (x >= this.spline[size].X) {
spline = this.spline[size];
} else {
spline = this.binarySearch(x);
}
const dx = x - spline.X;
return (
spline.A +
(spline.B + (spline.C / 2.0 + (spline.D * dx) / 6.0) * dx) * dx
);
}
private binarySearch(x: number): ISpline {
let left = 0;
let right = this.spline.length - 1;
while (left + 1 < right) {
const center = Math.ceil(left + (right - left) / 2);
if (x <= this.spline[center].X) {
right = center;
} else {
left = center;
}
}
return this.spline[right];
}
private initSplineAndCoeffTable(): void {
for (let i = 0; i < this.spline.length; i++) {
this.table.push({
...this.spline[i],
alpha: this.alpha[i],
beta: this.beta[i],
});
}
}
private calculateMaxDifference(): void {
for (let i = 0; i < this.spline.length - 1; i++) {
const xCenter = (this.xGrid[i] + this.xGrid[i + 1]) / 2.0;
const xDifference = Math.abs(
this.fn(xCenter) - this.interpolatePoint(xCenter)
);
if (xDifference > this.maxDifference) {
this.maxDifference = xDifference;
}
}
}
private calculateInterpolation(): void {
for (let x = this.xStart; x < this.xEnd; x += this.interpolationTurn) {
this.points.push([x, this.interpolatePoint(x)]);
}
}
}
<file_sep>/src/core/Utils.ts
import { Parser } from 'expr-eval';
export const Func = (expression: string): Function => {
return (x: number): number => Parser.evaluate(expression, { x: x });
};
export const Hashify = (size: number): string =>
[...Array(size)].map((i) => (~~(Math.random() * 36)).toString(36)).join('');
export const Hash = (size: number): string =>
Hashify(Math.ceil(size / 2)) + Hashify(Math.floor(size / 2));
<file_sep>/README.md
# spline-cubic-interpolation
Numerical Methods Spline Cubic Interpolation
## Running
```shell
npm run start
```
## Result

<file_sep>/src/core/index.ts
import CubicSpline from './CubicSpline';
import ISpline from './ISpline';
import { Func, Hash } from './Utils';
export { CubicSpline, ISpline, Func, Hash };
| 234b446c9aa688e4b81ecf70d83cc43491573a09 | [
"JavaScript",
"TypeScript",
"Markdown"
] | 6 | JavaScript | seijuroseta/react-cubic-spline | 08353e6a72e905dd837e21c5457f1b6f874852c0 | 2f3b5547e2de5c9ec7b90f3b9f12f52bda497992 |
refs/heads/master | <file_sep>class GameController < ApplicationController
before_action :notAuthorized
def index
end
end
<file_sep>class LeaderboardController < ApplicationController
before_action :notAuthorized
before_action :get_users
before_action :get_leaderboard
def index
@i = 1
end
def get_users
@users = User.all.collect{|u| [u.name, u.id]}
end
end
<file_sep>class HomeController < ApplicationController
before_action :Authorized
before_action :get_leaderboard_landing
def show
@i = 1
end
end
<file_sep>class ApplicationController < ActionController::Base
# Prevent CSRF attacks by raising an exception.
# For APIs, you may want to use :null_session instead.
protect_from_forgery with: :exception
helper_method :current_user
helper_method :admin_user
def notAuthorized
if !current_user
redirect_to root_path
end
end
def Authorized
if current_user
redirect_to game_url
end
end
def current_user
@current_user ||= User.find(session[:user_id]) if session[:user_id]
end
def admin_user
current_user.email == '<EMAIL>'
current_user.email == '<EMAIL>'
end
def get_leaderboard_landing
@scores2 = User.joins(:scores).where("users.id = scores.user_id").select('users.name as name', 'scores.score as score', 'users.image as image', 'users.email as email').order('score desc').limit(5)
end
def get_leaderboard
@scores2 = User.joins(:scores).where("users.id = scores.user_id").select('users.name as name', 'scores.score as score', 'users.image as image', 'users.email as email').order('score desc').limit(100)
end
end
| e0f56a3850b87108d944a61fecd9426f4d348f34 | [
"Ruby"
] | 4 | Ruby | dkurschner1/Group1Project | 628055a016121670fb2dc75376791d52117f4ca6 | 8381e73ba9f97c6f84497a23a91b307573882a40 |
refs/heads/master | <repo_name>JohnOutbottle/simple-pro-ready-angular2-webpack-seed<file_sep>/app/vendor.ts
// Angular
import '@angular/platform-browser';
import '@angular/platform-browser-dynamic';
import '@angular/core';
import '@angular/common';
import '@angular/http';
import '@angular/router';
// RxJS
import 'rxjs';
import '../node_modules/bootstrap/dist/css/bootstrap.min.css';
// Other vendors for example jQuery, Lodash or Bootstrap
// You can import js, ts, css, sass, ... (modules only for JS)
//To use js files normally, don't try to import them here, instead...
//1. Add the js file to the ext_resources (specifically that folder)
//2. Add the <script src="ext_resources/my_js.js?<%= htmlWebpackPlugin.options.cachebust %>"></script> to index.html.template
//all files in ext_resources will be served correctly from the devServer during "npm run live" and will be copied to the dis folder during "npm run build".
<file_sep>/hooks.plugin.js
function HooksPlugin(options) {
}
HooksPlugin.prototype.apply = function (compiler) {
compiler.plugin("compile", function (params) {
console.log("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
console.log("Build in progress.... " + hooksPluginHelper.getTimeString());
console.log("\n\n\n");
});
compiler.plugin("done", function (stats) {
var pkg = require("./package.json");
var notifier = require("node-notifier");
var duration = ((stats.endTime - stats.startTime) / 1000).toFixed(2);
notifier.notify({
title: pkg.name,
message: `Typescript build is done!\n${stats.compilation.errors.length} errors in ${duration}s`,
sound: true
},
function (error, response) {
var t = hooksPluginHelper.getTimeString();
//if (!stats.compilation.errors.length) {
hooksPluginHelper.printComplete(duration, t, hooksPluginHelper.prevBuildTime);
hooksPluginHelper.prevBuildTime = t;
//}
});
});
};
module.exports = HooksPlugin;
var hooksPluginHelper = (function() {
return {
printComplete: printComplete,
getTimeString: getTimeString,
prevBuildTime: "n/a"
};
function getTimeString() {
var d = new Date();
return n(d.getHours()) + ":" + n(d.getMinutes()) + ":" + n(d.getSeconds());
function n(n) {
return n > 9 ? "" + n : "0" + n;
}
}
function printComplete(duration, time, prevBuildTime) {
console.log("\n\n\nBuild complete ");
console.log(" in " + duration + " seconds");
console.log(" [" + time + "]");
console.log(" (prev: [" + prevBuildTime + "]) ");
console.log("\n");
}
})();<file_sep>/app/home/components/home.component.spec.ts
/// <reference path="../../../typings/globals/jasmine/index.d.ts" />
import { ComponentFixture, TestBed } from "@angular/core/testing";
import { NO_ERRORS_SCHEMA } from "@angular/core";
import { HomeComponent } from "./home.component";
let comp: HomeComponent;
let fixture: ComponentFixture<HomeComponent>;
describe("HomeComponent", () => {
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [HomeComponent],
providers: [],
schemas: [NO_ERRORS_SCHEMA]
});
fixture = TestBed.createComponent(HomeComponent);
comp = fixture.componentInstance;
});
/*it("getTestString returns 'Test me please' but will fail", () => {
expect(comp.getTestString()).toBe("Test me please WRONG");
});*/
it("getTestString returns 'Test me please'", () => {
expect(comp.getTestString()).toBe("Test me please");
});
});
<file_sep>/readme.md
# simple-pro-ready-angular2-webpack-seed
A simple yet production ready angular 2 webpack seed.
This can be used as a standalone application or within another project such as ASP.NET MVC or Web API, Java Spring etc.
Complete with karma, jasmine and istanbul
Questions welcome at [Outbottle.com](http://outbottle.com/angular-2-production-ready-webpack-seed-starter/)
## Running
1. `npm run build` - builds the application into the dist directory the contents of which are stand-alone and fully functional.
You can run it by navigating to the dist directory in a command terminal and serving it up using npm module http-server or httpster etc.
2. `npm run build-w` - This does the same as 1 but continues to listen for code changes, building automatically when changes are detected.
3. `npm live` - This fires up the webpack-dev-server and serves up the application on http://localhost:8081. It rebuilds and reloads the browser automatically when code changes are detected.
4. `npm test` - Runs unit tests once, result displayed in terminal
5. `npm test-w` - Runs unit tests as code changes
6. `npm run coverage` - launches chrome showing unit test code coverage (note that this is only available after all tests pass)
## About
The resultant compiled code is minimised and complete with source maps for debugging purposes.
There is just one build which is suitable for dev, pro and any other environment.
If you want to speed things up during development however feel free to simply use this command from the console `webpack --watch` which is a quicker build but not quite pro ready.
The generated index.html file cache busts the generated css and js files by adding a hash query string parameter.
The code is fully debuggable in chrome or firebug via the generated maps
## Standalone application
When built, the `/dist` folder contains the compiled standalone application. Launch in any application server to see it.
*E.g. in a console `cd` into the `dist` folder and use `http-server` (npm module) to launch the application.*
Obviously, during development use `npm run build-w` or `npm run live` for convenience.
## Integrated into another application
Such as Spring, Ninjaframework, ASP.NET MVC or WEB API etc.
1. Add this entire thing to the root of the project. (ok so the folder structure may not be exactly as you want but keeep in mind that an there are essentially two projects (angular2 and Spring/MVC or whatever) occuping the same root, this is somewhat unavoidable)
2. Edit `webpack.config.js` as so
```javascript
new HtmlWebpackPlugin({
cachebust: new Date().getTime(),
template: './index.html.template', //this should end in anything other than .html otherwise use [Another template option](http://https://github.com/ampedandwired/html-webpack-plugin/blob/master/docs/template-option.md)
filename: './Views/Shared/_Layout.cshtml', //MVC //In other words, make this path anywhere relative to root directory
hash: true,
inject: false
})
```
3. Update `index.html.template` `script` and `link` tags to point to the `dist` directory.
## Standalone Vs. Integrated into Spring or MVC or whatever
If you're integrating into Spring, Web API or whatever, it's unlikely that the devServer live feature will be of much benifet. The Angular2 aplication will be running on localhost:8081 while the Spring or Web API application will be running from ISS or Tomcat or whatever, there will be CORS issues, there may also be relative path issues. These can be solved of course but just be aware of it. It would probably be best to simply run `npm run build-w` rater than `npm run live` and hit F5 on the browser each time. #
If however, you're intend the Angular2 application be a stand-alone front end with a separate REST back end or something to that effect, then `npm run live` will work quite nicely.
## Adding external resources
Adding static resources such as images, js, css etrc that are not in the `/app` folder can be acheived by doing the following.
1. Add the file(s) to the `ext_resources` directory.
2. Include them in `index.html.template` e.g.
```
<body>
<my-app>Loading...</my-app>
<!-- External Resources -->
<script src="ext_resources/jquery.js?<%= htmlWebpackPlugin.options.cachebust %>"></script>
<script src="ext_resources/js.js?<%= htmlWebpackPlugin.options.cachebust %>"></script>
<script src="vendor.js?<%= htmlWebpackPlugin.options.cachebust %>"></script>
<script src="app.js?<%= htmlWebpackPlugin.options.cachebust %>"></script>
</body>
```
Similarly, add css files to the `<head>`
## Including vendor modules
in `vendor.ts` add "modules" as so
`import "../node_modules/some_module/src";`
## Including your application modules
In `main.ts` add "modules" as so
`import "../my_modules/some_module_I_made_earlier/src";`
That is pretty much it.
Feel perfectly free to post any questions here: [Outbottle.com](http://outbottle.com/angular-2-production-ready-webpack-seed-starter/)
<file_sep>/ext_resources/js.js
$(document).ready(function(){
var i = 0;
setInterval(function(){
var $el = $('#jqueryTarget');
$el.text(i);
i++;
},1000);
});<file_sep>/app/main.ts
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
import { AppModule } from './app.module';
import '../site.css';
//or import any other assets that are part of your app rather than vendor.
//Note however that all js imports must be "modules"
//To use js files normally, don't try to import them here, instead...
//1. Add the js file to the ext_resources (specifically that folder)
//2. Add the <script src="ext_resources/my_js.js?<%= htmlWebpackPlugin.options.cachebust %>"></script> to index.html.template
//all files in ext_resources will be served correctly from the devServer during "npm run live" and will be copied to the dis folder during "npm run build".
platformBrowserDynamic().bootstrapModule(AppModule);
| 8024592d9099fead0c2928208e270783163d4047 | [
"JavaScript",
"TypeScript",
"Markdown"
] | 6 | TypeScript | JohnOutbottle/simple-pro-ready-angular2-webpack-seed | 048f5d2bf51157597e9533ba010c0c74d682201b | 217c2cdbfdb0b42e2bea0eee34a8e95f7f29fcf6 |
refs/heads/master | <file_sep>import re
import requests
from bs4 import BeautifulSoup
url = 'http://news.sina.com.cn/china/'
id_start = 1100000
id_end = 1100100
respose = requests.get(url)
respose.encoding = 'utf-8'
text = respose.text
soup = BeautifulSoup(text, 'html.parser')
for news in soup.select('.blk122'):
for url in news.select('a'):
print(url.text)
print(url['href'])
article_text = requests.get(url['href'])
article_text.encoding = 'utf-8'
article = BeautifulSoup(article_text.text, 'html.parser')
print(article.select('.article')[0].text)<file_sep>import re
import requests
import xlwt
from openpyxl import Workbook
from bs4 import BeautifulSoup
from time import sleep
import random
import linecache
url_base = 'https://cloud.tencent.com/developer/article/'
headers = {
'cache-control':'no-cache',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding' : 'gzip, deflate, br',
'accept-language' : 'zh-CN,zh;q=0.9',
'cookie' : 'qcloud_uid=be57c3d2723f8dcf1fd48b1175bde70d; tencent_uid=f890f9e77266b46d9b8594b7ac14a8c0; _ga=GA1.2.2136348531.1531808120; pgv_pvi=9739491328; pt2gguin=o0481344077; language=zh; qcloud_from=qcloud.baidu.seo-1532924806740; lastLoginType=qq; intl=; qcloud_visitId=06c7c3088db367a47745bf33b98335e7; _gat=1; pgv_si=s6490221568',
'pragma' : 'no-cache',
'referer' : 'https://cloud.tencent.com/developer',
'upgrade-insecure-requests' : '1',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36',
}
id_start = 1004664
id_end = 1004714
bc = 50
data = []
def data_mysql(data):
'''
Save article to mysql.
'''
def data_save2(data,l,r):
'''
save article to file by xlwt
'''
book = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = book.add_sheet('云社区', cell_overwrite_ok=True)
keys =list(data[0].keys());
for i in range(len(keys)):
sheet.write(0, i, keys[i])
for i in range(len(data)):
for j in range(len(keys)):
sheet.write(i+1, j, data[i][keys[j]])
book.save(r'E:\test'+str(l)+'-'+str(r)+'.xls')
def data_save(data,l,r):
'''
save article to file by openpyxl
'''
filename = r'E:\test'+str(l)+'-'+str(r)+'.xlsx'
book = Workbook()
books = book.active
keys =list(data[0].keys());
for i in range(len(data)+1):
for j in range(len(keys)):
if i == 0:
books.cell(row=i+1, column=j+1, value = keys[j])
else:
books.cell(row=i+1, column=j+1, value = data[i-1][keys[j]])
book.save(filename)
print(filename,'已保存!\n')
def image2source(text):
res1 = re.sub(r'(<figure><div class="image-block"><span class="lazy-image-holder" dataurl=")(\S*)("></span></div></figure>)', r'<a href ="\2"><img src="\2"/></a>', text)
res = re.sub(r'\xa0', r' ', res1)
#res = ILLEGAL_CHARACTERS_RE.sub(r'', res)
return res
def source2soup(url):
the_line = 'http://'+linecache.getline(r'D:\cc\软件\ip.txt', random.randint(1,898))
proxies = {
'http':the_line.strip()
}
try:
respose = requests.get(url,headers = headers, proxies = proxies, timeout = 0.5)
except Timeout:
print('Time Out:', url)
return
global data
if respose.status_code == requests.codes.ok:
respose.encoding = 'utf-8'
Soup = BeautifulSoup(respose.text, 'html.parser')
author_name = Soup.select('.author-name')
column_name = Soup.select('.column-name')
article_time = Soup.select('.col-article-time > span > time')
article_title = Soup.select('.col-article-title')
article = Soup.select('.J-articleContent')
try:
data.append({
'article_url' : url,
'author_name' : author_name[0].text,
'author_url' : 'https://cloud.tencent.com/developer' + author_name[0]['href'],
'column_name' : column_name[0].text,
'column_url' : 'https://cloud.tencent.com/developer' + column_name[0]['href'],
'article_title' : article_title[0].text,
'article_time' : article_time[0]['datetime'],
'article_content' : image2source(str(article[0]))
})
except IndexError:
print('IndexError,sleep')
sleep(0.5)
data.append({
'article_url' : url,
'author_name' : 'Failed',
'author_url' : 'Failed',
'column_name' : 'Failed',
'column_url' : 'Failed',
'article_title' : 'Failed',
'article_time' : 'Failed',
'article_content' : 'Failed'
})
return
else:
data.append({
'article_url' : url,
'author_name' : '404 NOT FOUND',
'author_url' : '404 NOT FOUND',
'column_name' : '404 NOT FOUND',
'column_url' : '404 NOT FOUND',
'article_title' : '404 NOT FOUND',
'article_time' : '404 NOT FOUND',
'article_content' : '404 NOT FOUND'
})
sleep(0.2)
def main():
for start in range(id_start, id_end, bc):
if id_end-start>=bc:
for article_id in range(start, start+bc):
url = url_base + str(article_id)
source2soup(url)
data_save(data, start, start+bc)
del data[:]
sleep(0.1)
else:
for article_id in range(start, id_end):
url = url_base + str(article_id)
source2soup(url)
data_save(data, start, id_end)
del data[:]
if __name__ == '__main__':
main()<file_sep>import re
import requests
import os
import shutil
from time import sleep
path_base = '/image'
def save_image(url):
match = re.match(r'https://i.loli.net(\S+)/(\S+)', url)
image_name = match.group(2)
image_path = path_base + match.group(1)
is_exists = os.path.exists(image_path)
if not is_exists:
os.makedirs(image_path)
print('已创建'+str(image_path)+'目录')
img1 = path_base + '/' + image_name
img2 = image_path + '/' + image_name
shutil.move(img1, img2)
print(img1 + ' --> ' + img2 + '移动成功')
fail = []
with open(r'C:\Users\owen\Desktop\image_urls.txt') as urls:
for url in urls:
save_image(url)
print(url+'移动成功!')
print(fail)<file_sep>import re
import requests
import threading
from time import sleep
url_base = 'https://www.qcgzxw.cn/'
id_start = 0
id_end = 300
res = []
def valid_url(url, yz=3):
try:
respose = requests.get(url)
except:
print('异常等待5S!')
sleep(5)
yz -= 1
if yz > 0:
return valid_url(url, yz)
else:
return
if respose.status_code == requests.codes.ok:
print(url)
res.append(url)
else:
yz -= 1
if yz > 0:
sleep(2)
return valid_url(url, yz)
else:
return
for i in range(id_start, id_end):
url = url_base + str(i) + '.html'
valid_url(url)
print('done')
with open('res.txt', 'w') as res_file:
for url in res:
res_file.write(url+'\n')<file_sep>import re
import requests
import os
from time import sleep
path_urls = r'C:\Users\owen\Desktop\neilian.txt'# 图片路径(一行一条)
guize = r'https://www.qcgzxw.cn/wp-content/uploads(\S+)/(\S+)\.(png|jpg|gif|jpeg|webp)'# 图片目录,图片名称正则
path_base = '/image'# 图片保存路径
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
'Cookie':'love_2795=2795; _ga=GA1.2.1779309949.1534838090; pgv_pvi=6978058240; Hm_lvt_0d49c9511331d0f9de734c0672f05e0c=1534847357,1534916778,1534932956,1534993674; __cfduid=d0cf8573770b8b40c49bccdbacffc9be51535003911; love_2799=2799; wordpress_test_cookie=WP+Cookie+check; _gid=GA1.2.1503872837.1536925973; UM_distinctid=165db3e127f1b0-0ed896ea11ca88-5701631-144000-165db3e12809b2; wp-settings-1=libraryContent%3Dbrowse%26editor%3Dtinymce%26mfold%3Do%26hidetb%3D1%26post_dfw%3Doff%26align%3Dcenter%26imgsize%3Dfull%26advImgDetails%3Dshow%26urlbutton%3Dfile%26widgets_access%3Doff%26editor_plain_text_paste_warning%3D2; wp-settings-time-1=1537267999; wordpress_logged_in_c2cef18eb92a696cdc19704ce902fda4=admin%7C1537619979%7CJQDSsVjjr0dd9p1CI8Jabuci3IFAnpd3t8EVgoxPBhQ%7Cea03dd4a869c3f55295ebdc7f889be6868ae588a35bc4e03e6cb95633ae7dab0'
}
def save_image(url):
match = re.match(guize, url)
image_name = match.group(2) + '.' + match.group(3)
image_path = path_base + match.group(1)
is_exists = os.path.exists(image_path)
if not is_exists:
os.makedirs(image_path)
print('已创建'+str(image_path)+'目录')
img = image_path + '/' + image_name
image = requests.get(url)
with open(img, 'wb') as f:
f.write(image.content)
fail = []# 存放出错url
with open(path_urls, encoding='utf-8') as urls:
for url in urls.readlines():
try:
save_image(str(url.splitlines()[0]))
print(url+'下载成功!')
except:
fail.append(str(url.splitlines()[0]))
print(url+'下载失败!')
print(fail)<file_sep>import requests
import re
from bs4 import BeautifulSoup
from time import sleep
# data-original="(https://i.loli.net\S+)"
urls_path = r'C:\Users\owen\Desktop\urls.txt'
image_path = r'C:\Users\owen\Desktop\res.txt'
pattern = r'data-original="(https://i.loli.net\S+)"'
def get_image_url(text):
'''
获取图片外链地址,返回值为图片链接列表
'''
result = re.findall(pattern, text)
return set(result)
def get_content(url, num=3):
'''
获取网页内容,返回值为文章内容
'''
response = requests.get(url, timeout=5)
if response.status_code == requests.codes.ok:
response.encoding = 'utf-8'
Soup = BeautifulSoup(response.text, 'html.parser')
post = Soup.select('#article-post')
return str(post[0])
else:
sleep(1)
return get_content(url, num=num-1)
return [url]
def sava_to_text(urls):
'''
保存图片链接
'''
with open(image_path, 'a+') as res:
for url in urls:
res.write(str(url)+'\n')
if __name__ == '__main__':
with open(urls_path, 'r') as urls:
for url in urls:
try:
content = get_content(url)
sava_to_text(get_image_url(content))
print(str(url) + 'saved.')
except:
print(url)
sleep(1) | fd88ba1c649539d17191ca118b9704b6ed0031bf | [
"Python"
] | 6 | Python | ayuday/pachong | fe8768ac162a7ac00697cb8c20e521ee064bf81e | f542a8ee0f069c69a2628c035ff3e76155fd9e1a |
refs/heads/master | <file_sep>#include "MainMenu.h"
void main()
{
MainMenu p;
p.select();
}
<file_sep>#pragma once
#include "Student.h"
class MainMenu : public Student
{
private:
Student* mode;
public:
MainMenu();
~MainMenu();
int Menu();
void select();
};<file_sep>#include "Student.h"
Student::Student()
{
name = new char[20];
score1 = 0;
score2 = 0;
score3 = 0;
total = 0;
average = 0;
}
Student::~Student()
{
delete[] name;
}
void Student::showStudent()
{
}
void Student::setStudent()
{
Student list[3];
cout << "세명의 이름과 점수들을 입력하세요." << endl;
for (int i = 0; i < 3; i++)
{
cout << "학생" << i + 1 << " 이름 : ";
cin >> list[i].name;
cout << "학생" << i + 1 << " 점수1" << " : ";
cin >> list[i].score1;
if (list[i].score1 < 0 || list[i].score1 >100) {
cout << "입력범위초과" << endl;
}
else {
cout << "학생" << i + 1 << " 점수2" << " : ";
cin >> list[i].score2;
check();
cout << "학생" << i + 1 << " 점수3" << " : ";
cin >> list[i].score3;
check();
}
}
cout << endl;
}
void Student::searchStudent()
{
}
void Student::gradeStudent()
{
}
void Student::statsStudent()
{
}
bool Student::check()
{
if (score1 < 0 || score1 >100 || score2 < 0 || score2 >100 || score3 < 0 || score3 >100) {
cout << "점수 범위(0~100)을 벗어납니다." << endl;
return false;
}
else
return true;
}
<file_sep>#pragma once
#include <iostream>
using namespace std;
class Student
{
private:
char* name;
int score1, score2, score3, total;
double average;
public:
Student();
~Student();
void showStudent(); //학생 정보 출력
void setStudent(); //학생 이름, 성적123 입력
void searchStudent(); //학생 이름 입력 후 데이터화면 출력
void gradeStudent(); //학생 등급 출력
void statsStudent(); //학생 통계 출력
bool check(); //이름 입력 범위를 벗어나는지 확인
};<file_sep># studentmanager
c++ first project
<file_sep>#include "MainMenu.h"
MainMenu::MainMenu()
{
mode = new Student;
}
MainMenu::~MainMenu()
{
delete mode;
}
int MainMenu::Menu()
{
int num;
cout << endl;
cout << "<<성적 관리 프로그램>>" << endl;
cout << "1. 학생 성적 입력" << endl;
cout << "2. 학생 성적 검색" << endl;
cout << "3. 학생 성적 등급" << endl;
cout << "4. 학생 성적 통계" << endl;
cout << "5. 프로그램 종료" << endl;
cout << "---------------------" << endl;
cout << "메뉴 선택 : ";
cin >> num;
return num;
}
void MainMenu::select()
{
int key;
while ((key = Menu()) != 5)
{
switch (key)
{
case 1:
mode->setStudent();
break;
case 2:
mode->searchStudent();
break;
case 3:
mode->gradeStudent();
break;
case 4:
mode->statsStudent();
break;
default:
cout << endl;
cout << "잘못된 메뉴입니다. 재입력바랍니다.";
cout << endl;
break;
}
}
cout << "==프로그램을 종료합니다.==" << endl;
}
| ad1294e19de869d838523610b123b9d04d8beef4 | [
"Markdown",
"C++"
] | 6 | C++ | joon2038/studentmanager | b365606706faacb173d4d9220a7ff9b72f741993 | 76927d2ae524877dbab098dbf9c7f672e2764aa3 |
refs/heads/master | <repo_name>kunalatwork/AdvanceSFE<file_sep>/src/test/java/TestRunner/runnerClass.java
package TestRunner;
import org.junit.runner.RunWith;
import cucumber.api.CucumberOptions;
import cucumber.api.junit.Cucumber;
@RunWith(Cucumber.class)
@CucumberOptions(
features = ".//Features/Rep.feature",
glue = "RepStepDefination",
dryRun = false,
monochrome = true,
plugin = {
"html:Report/RepReport"
}
)
public class runnerClass {
}
<file_sep>/Report/AddProductReport/report.js
$(document).ready(function() {var formatter = new CucumberHTML.DOMFormatter($('.cucumber-report'));formatter.uri("./Features/Sales_Model_Management.feature");
formatter.feature({
"line": 1,
"name": "Sales Model Management",
"description": "",
"id": "sales-model-management",
"keyword": "Feature"
});
formatter.before({
"duration": 20506081600,
"status": "passed"
});
formatter.scenario({
"line": 2,
"name": "Add All fields of Sales Model Management",
"description": "",
"id": "sales-model-management;add-all-fields-of-sales-model-management",
"type": "scenario",
"keyword": "Scenario"
});
formatter.step({
"line": 3,
"name": "User launch browser",
"keyword": "Given "
});
formatter.step({
"line": 4,
"name": "Enter url \"https://testingadvance.advancesfe.com/Home/Login\"",
"keyword": "And "
});
formatter.step({
"line": 5,
"name": "User Enter Email \"<EMAIL>\" and password \"<PASSWORD>\"",
"keyword": "Then "
});
formatter.step({
"line": 6,
"name": "User click on login button",
"keyword": "And "
});
formatter.step({
"line": 7,
"name": "Select Company Logo and add Catagory",
"keyword": "Then "
});
formatter.step({
"line": 8,
"name": "Add compentency Element and Add Element",
"keyword": "Then "
});
formatter.step({
"line": 9,
"name": "Add Assignment option and Catagory Element Mapping",
"keyword": "Then "
});
formatter.step({
"line": 10,
"name": "Add model and Scale factory",
"keyword": "Then "
});
formatter.step({
"line": 11,
"name": "All model group and view mapping",
"keyword": "Then "
});
formatter.step({
"line": 12,
"name": "check Model Managament",
"keyword": "And "
});
formatter.match({
"location": "AdminSteps.user_launch_browser()"
});
formatter.result({
"duration": 912261200,
"status": "passed"
});
formatter.match({
"arguments": [
{
"val": "https://testingadvance.advancesfe.com/Home/Login",
"offset": 11
}
],
"location": "AdminSteps.enter_url(String)"
});
formatter.result({
"duration": 5555864700,
"status": "passed"
});
formatter.match({
"arguments": [
{
"val": "<EMAIL>",
"offset": 18
},
{
"val": "123456789",
"offset": 61
}
],
"location": "AdminSteps.user_Enter_Email_and_password(String,String)"
});
formatter.result({
"duration": 3290322700,
"status": "passed"
});
formatter.match({
"location": "AdminSteps.user_click_on_login_button()"
});
formatter.result({
"duration": 160429200,
"status": "passed"
});
formatter.match({
"location": "AdminSteps.select_Company_Logo_and_add_Catagory()"
});
formatter.result({
"duration": 14716707100,
"status": "passed"
});
formatter.match({
"location": "AdminSteps.add_compentency_Element_and_Add_Element()"
});
formatter.result({
"duration": 15521959500,
"status": "passed"
});
formatter.match({
"location": "AdminSteps.add_Assignment_option_and_Catagory_Element_Mapping()"
});
formatter.result({
"duration": 22526063500,
"status": "passed"
});
formatter.match({
"location": "AdminSteps.add_model_and_Scale_factory()"
});
<file_sep>/Data/testData.properties
***** Browser *****
browser= chrome
***** General Management *****
***** Add Role *****
Productname = Test_Product_Name3
ProductGenericName = Test_Generic_Name3
ProductSequence = 3
***** Add Role *****
Role = Test_Role_Name
RoleType = 3rd
ParentLocation = India
Location = Udupi
***** Add franchises *****
franchises_Name = Myraah India Pvt ltd
Select_RoleName = Test_Role_Name
Select_ProductName = Test_Product_Name2
Select_Location = Karnataka
***** Add Client *****
FirstName = Raj
LastName = Ram
UserSex = Male
Userf = Mojenta India Pvt Ltd
Email = <EMAIL>
RoleView = Admin
ChooseRole = Test_Role_Name
UserFLOcation = India
***** Sales Model Management *****
Company_Name = Myraah pvt ltd
Alias_Name = Show_Myraah
Company_Sequence = 2
Element_Name = Test_Element_Name
Element_Sequence = 2
Element_Scale_Factory = 4
Add_Element_Name = Test_Element_Name
Element_Alias_Name = Show_Element_Name
Add_Element_Sequence = 4
Choose_Element_Name = Call Plan
Choose_Product_Name = Test_Product_Name2
Add_Assignment_Option = Test_Assignment_Option
Choose_Category_Name = Other
Choose_Element_Name = KAM
Model_Name = Test_Model_Name
Model_Scale_Factor = 3
ModelType = Remote Call
Assessess_Level = 1 st Level/Rep
SelCom = Mojenta India Pvt Ltd
SelCat = Category 1
ScaleFactor = 3
ScaleFactorTest = test
scaleFactorDesc = test
modelGroupName = TestingGroupModel
SelModelName = QA_Test_Model
******* Coaching Form Management ******
FromDate = 2019-01-20
ToDate = 2021-05-30
Is_Custom_Ways_of_Working_Enable = yes
Is_Compliance_Form_Enable = yes
Is_Transactional_Call_Enable = yes
Is_SelfAssessment_Enable = yes
****** Rep ******
Model = Account
SellingSkils = % Level 2 <file_sep>/Report/RepReport/report.js
$(document).ready(function() {var formatter = new CucumberHTML.DOMFormatter($('.cucumber-report'));formatter.uri("./Features/Rep.feature");
formatter.feature({
"line": 1,
"name": "Sales Model Management",
"description": "",
"id": "sales-model-management",
"keyword": "Feature"
});
formatter.before({
"duration": 11936549500,
"status": "passed"
});
formatter.scenario({
"line": 2,
"name": "Add All fields of Sales Model Management",
"description": "",
"id": "sales-model-management;add-all-fields-of-sales-model-management",
"type": "scenario",
"keyword": "Scenario"
});
formatter.step({
"line": 3,
"name": "User launch browser",
"keyword": "Given "
});
formatter.step({
"line": 4,
"name": "Enter url \"https://testingadvance.advancesfe.com/Home/Login\"",
"keyword": "And "
});
formatter.step({
"line": 5,
"name": "User Enter Email \"<EMAIL>\" and password \"<PASSWORD>\"",
"keyword": "Then "
});
formatter.step({
"line": 6,
"name": "User click on login button",
"keyword": "And "
});
formatter.step({
"line": 7,
"name": "Select Company Logo",
"keyword": "Then "
});
formatter.step({
"line": 8,
"name": "Choose model",
"keyword": "Then "
});
formatter.step({
"line": 9,
"name": "Select Graph ploter and select required data",
"keyword": "Then "
});
formatter.match({
"location": "RepSteps.user_launch_browser()"
});
formatter.result({
"duration": 696449100,
"status": "passed"
});
formatter.match({
"arguments": [
{
"val": "https://testingadvance.advancesfe.com/Home/Login",
"offset": 11
}
],
"location": "RepSteps.enter_url(String)"
});
formatter.result({
"duration": 4359426500,
"status": "passed"
});
formatter.match({
"arguments": [
{
"val": "<EMAIL>",
"offset": 18
},
{
"val": "123456789",
"offset": 51
}
],
"location": "RepSteps.user_Enter_Email_and_password(String,String)"
});
formatter.result({
"duration": 2567421800,
"status": "passed"
});
formatter.match({
"location": "RepSteps.user_click_on_login_button()"
});
formatter.result({
"duration": 93690500,
"status": "passed"
});
formatter.match({
"location": "RepSteps.select_Company_Logo()"
});
formatter.result({
"duration": 10626204900,
"status": "passed"
});
formatter.match({
"location": "RepSteps.choose_model()"
});
formatter.result({
"duration": 4002740600,
"status": "passed"
});
formatter.match({
"location": "RepSteps.select_Graph_ploter_and_select_required_data()"
});
formatter.result({
"duration": 4664032900,
"status": "passed"
});
});<file_sep>/src/test/java/PageObjectClasses/PageObjectAddProduct.java
package PageObjectClasses;
import org.openqa.selenium.*;
import org.openqa.selenium.support.*;
import org.openqa.selenium.support.ui.Select;
public class PageObjectAddProduct {
public WebDriver driver;
public PageObjectAddProduct(WebDriver adddriver) {
driver=adddriver;
PageFactory.initElements(adddriver, this);
}
// Login Xpaths
@FindBy(how=How.XPATH,using= "//input[@id='userImage']")
WebElement enterEmail;
@FindBy(how=How.XPATH, using="//input[@id='passwordImage']")
WebElement EnterPassword;
@FindBy(how=How.XPATH, using="//input[@id='loginSubmit']")
WebElement LoginButton;
// Click on Ok popup
@FindBy(how=How.XPATH, using="//div[@class='advert-button btn_m']")
WebElement clickOnOk;
// Add Product
@FindBy(xpath="//input[@id='PName']")
WebElement ProductName;
@FindBy(xpath="//input[@id='Abbreviation']")
WebElement ProductGenericName;
@FindBy(xpath="//input[@id='Sequence']")
WebElement ProductSequencen;
@FindBy(xpath="//input[@id='save']")
WebElement Productsavebutton;
// Add Role & Select Role Type
@FindBy(xpath="//input[@id='CName']")
WebElement AddRole;
@FindBy(xpath="//select[@data-bind=\"options: RoleLevels,optionsText: 'LevelName', optionsValue:'RoleLevelID', value: SelectedRoleLevel\"]")
WebElement SelectRoleType;
// Action Class
public void Login(String id, String passwrd) throws InterruptedException {
enterEmail.clear();
Thread.sleep(1000);
enterEmail.sendKeys(id);
EnterPassword.clear();
Thread.sleep(1000);
EnterPassword.sendKeys(<PASSWORD>);
}
public void loginButton() {
LoginButton.click();
}
public void AddProducts(String name, String Gname , String Seq) throws InterruptedException {
Thread.sleep(500);
ProductName.sendKeys(name);
Thread.sleep(500);
ProductGenericName.sendKeys(Gname);
Thread.sleep(500);
ProductSequencen.sendKeys(Seq);
Thread.sleep(500);
//Productsavebutton.click();
}
public void AddRole_SelectType(String RoleName , String Role) throws InterruptedException {
Thread.sleep(2000);
driver.findElement(By.xpath("//a[normalize-space()='Add Roles']")).click();
Thread.sleep(1000);
AddRole.sendKeys(RoleName);
Select select = new Select(SelectRoleType);
if(Role.equalsIgnoreCase("1st")) {
select.selectByVisibleText("1 st Level/Rep");
}
else if(Role.equalsIgnoreCase("2nd")) {
select.selectByVisibleText("2 nd Level/FLSM");
}
else if(Role.equalsIgnoreCase("3rd")) {
select.selectByVisibleText("3 rd Level/SLSM");
}
else if(Role.equalsIgnoreCase("4th")) {
select.selectByVisibleText("4 th Level/TLSM");
}
else if(Role.equalsIgnoreCase("5th")) {
select.selectByVisibleText("5 th level/Compliance");
}
}
}
<file_sep>/src/test/java/Base/SetUp.java
package Base;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Properties;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.edge.EdgeDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
public class SetUp {
public WebDriver driver;
public Properties prop;
public SetUp() throws IOException {
prop = new Properties();
FileInputStream fis = new FileInputStream("C:\\Users\\lakhan\\eclipse-workspace\\NopCommerce\\Data\\testData");
prop.load(fis);
}
public void initialization() throws IOException {
String browsertype= prop.getProperty("browser");
if(browsertype.equalsIgnoreCase("chrome"))
{
System.setProperty("webdriver.chrome.driver", "C:\\Users\\lakhan\\eclipse-workspace\\NopCommerce\\Drivers\\chromedriver.exe");
driver= new ChromeDriver();
}
else if(browsertype.equalsIgnoreCase("firefox"))
{
System.setProperty("webdriver.chrome.driver", "C:\\Users\\lakhan\\eclipse-workspace\\NopCommerce\\Drivers\\geckodriver.exe");
driver = new FirefoxDriver();
}
else if(browsertype.equalsIgnoreCase("edge"))
{
System.setProperty("webdriver.chrome.driver", "C:\\Users\\lakhan\\eclipse-workspace\\NopCommerce\\Drivers\\msedgedriver.exe");
driver = new EdgeDriver();
}
}
}
| 11120768b1f414bc0972bbc4d06cea9b3d9a736d | [
"JavaScript",
"Java",
"INI"
] | 6 | Java | kunalatwork/AdvanceSFE | 48b1051082fae45bf3f690f2e18a9de48fe083b3 | acc47f91164748fd53c006eb4ed8b6e3ca2ad867 |
refs/heads/master | <file_sep>import sys
import datetime
def convert_date(date):
# converts dates from mm/dd/yyyy to yyyy/mm/dd
date = date.split("/")
date = [int(date[2]), int(date[0]), int(date[1])]
return datetime.date(*date)
def revert_date(date):
# converts dates from yyyy/mm/dd to mm/dd/yyyy
date = str(date).split("-")
date = str(date[1]) +"/" + str(date[2]) + "/" + str(date[0])
return date
def parse_one_record(line):
"""Take a line from reservations.csv and return a dictionary representing that record.
(hint: use the datetime type when parsing the start and end date columns)"""
_dict = {}
line = line.split(", ")
_dict["unit_id"] = line[0]
_dict["start_date"] = convert_date(line[1])
_dict["end_date"] = convert_date(line[2])
return _dict
def read_units():
"""Read in the file units.csv and returns a list of all known units."""
f = open("units.csv")
units = []
while True:
line = f.readline()
if line == "":
break
line = line.split(", ")
units.append((line[0], int(line[1].strip("\n"))))
return units
def read_existing_reservations():
"""Reads in the file reservations.csv and returns a list of reservations."""
f = open("reservations.csv")
reservations = []
while True:
line = f.readline()
if line == "":
break
line = parse_one_record(line)
reservations.append(line)
f.close()
return reservations
def available(units, reservations, start_date, occupants, stay_length):
# start_date: mm/dd/yyyy
starting_date = convert_date(start_date)
ending_date = starting_date + datetime.timedelta(int(stay_length))
# create a dictionary with unit_id as keys and all reserved dates as values
# room is available if neither the start nor end dates fall on dates where it is reserved
reserved_rooms = {}
for res in reservations:
res_start_date = res["start_date"]
if reserved_rooms.get(res["unit_id"]):
while res_start_date < res["end_date"]:
reserved_rooms[res["unit_id"]].append(res_start_date)
res_start_date += datetime.timedelta(1)
else:
reserved_rooms[res["unit_id"]] = [res_start_date]
res_start_date += datetime.timedelta(1)
while res_start_date < res["end_date"]:
reserved_rooms[res["unit_id"]].append(res_start_date)
res_start_date += datetime.timedelta(1)
room_avail = False
avail_rooms = []
for unit in units:
if unit[1] >= int(occupants):
if unit[0] not in reserved_rooms.keys():
print "Unit %s (Size %s) is available." % (unit[0], unit[1])
room_avail = True
avail_rooms.append(unit[0])
else:
if starting_date not in reserved_rooms[unit[0]] and ending_date not in reserved_rooms[unit[0]]:
print "Unit %s (Size %s) is available." % (unit[0], unit[1])
room_avail = True
avail_rooms.append(unit[0])
if not room_avail:
print "Sorry, no rooms are available on those dates."
return avail_rooms
def reserve(units, reservations, unit_id, start_date, occupants, stay_length):
# convert date to yyyy/mm/dd format in order to obtain ending date using timedelta
starting_date = convert_date(start_date)
ending_date = starting_date + datetime.timedelta(int(stay_length))
# convert ending date back to mm/dd/yyyy format used in reservations.csv
ending_date = revert_date(ending_date)
if unit_id in available(units, reservations, start_date, occupants, stay_length):
# r+ opens file for both reading and writing, but unlike w+ does not
# truncate the file to 0 bytes before writing
f = open("reservations.csv", "r+")
while True:
line = f.readline()
if line == "":
f.write("\n")
f.write("%s, %s, %s" % (unit_id, start_date, ending_date))
break
f.close()
print "Successfully reserved unit %s for %s nights." % (unit_id, stay_length)
else:
print "Unit %s is unavailable during those dates." % unit_id
def main():
units = read_units()
while True:
reservations = read_existing_reservations()
command = raw_input("SeaBnb> ")
cmd = command.split()
if cmd[0] == "available":
# *cmd[1:] maps elements in the list slice to argruments passed to the function
available(units, reservations, *cmd[1:])
elif cmd[0] == "reserve":
reserve(units, reservations, *cmd[1:])
elif cmd[0] == "quit":
sys.exit(0)
else:
print "Unknown command"
if __name__ == "__main__":
main() | 075296ca1cfadfa84392962ac8e30b493e16925b | [
"Python"
] | 1 | Python | contactjiayi/Reservation_System | 68bbccd1c1ff72c14385affd18ee6464b2080ad9 | 09addea2af0beafce50232344ea5b6f4ff6111f7 |
refs/heads/master | <repo_name>yashgandhi876/MESCOE-Alumni-Portal<file_sep>/precache-manifest.0b119d588d4c30f9a252aca9d131b909.js
self.__precacheManifest = (self.__precacheManifest || []).concat([
{
"revision": "aa6d7664168aaba7a466391926add271",
"url": "/creativetimofficial/argon-design-system/blob/master/LICENSE.md/index.html"
},
{
"revision": "e36e30b7eb85cd778803",
"url": "/creativetimofficial/argon-design-system/blob/master/LICENSE.md/static/js/main.e43310d5.chunk.js"
},
{
"revision": "5beb28df99dc7bd2580a",
"url": "/creativetimofficial/argon-design-system/blob/master/LICENSE.md/static/js/runtime-main.2d4bd73a.js"
}
]);<file_sep>/assets/js/remember.js
function setCookie(cname, cvalue, exdays) {
var d = new Date();
d.setTime(d.getTime() + (exdays*24*60*60*1000));
var expires = "expires="+ d.toUTCString();
document.cookie = cname + "=" + cvalue + ";" + expires + ";path=/";
}
function getCookie(cname) {
var name = cname + "=";
var decodedCookie = decodeURIComponent(document.cookie);
var ca = decodedCookie.split(';');
for(var i = 0; i <ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
}
if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
}
window.onload=function(){
var remember = getCookie('remember');
if (remember == 'true')
{
var email = getCookie('email');
var password = getCookie('password');
$('#txtEmail').val(email);
$('#txtPassword').val(<PASSWORD>);
document.getElementById("remember").checked = true;
}
}
document.getElementById('login-in-button').onclick=function() {
if ($('#remember').is(':checked')) {
var email = $('#txtEmail').val();
var password = $('#txtPassword').val();
// set cookies to expire in 31 days
setCookie('email', email, 31);
setCookie('password', <PASSWORD>, 31);
setCookie('remember', 'true', 31);
}
else
{
// reset cookies
setCookie('email', null, 0);
setCookie('password', null, 0);
setCookie('remember', null, 0);
}
};<file_sep>/assets/js/loginScript.js
const firebaseConfig = {
apiKey: "<KEY>",
authDomain: "mescoe-alumni.firebaseapp.com",
databaseURL: "https://mescoe-alumni.firebaseio.com",
projectId: "mescoe-alumni",
storageBucket: "mescoe-alumni.appspot.com",
messagingSenderId: "332958193506",
appId: "1:332958193506:web:b218c9102289cb36f0fe3b"
};
firebase.initializeApp(firebaseConfig)
const txtEmail=document.getElementById("txtEmail");
const txtPassword=document.getElementById("txtPassword");
const loginButton=document.getElementById("login-in-button");
const registerButton=document.getElementById("register-in-button");
const googleButton=document.getElementById("google-sign-in");
const twitterButton=document.getElementById("twitter-sign-in");
const githubButton=document.getElementById("github-sign-in");
const facebookButton=document.getElementById("facebook-sign-in");
const resetButton=document.getElementById("reset-button");
const showPassword=document.getElementById("show-password");
loginButton.addEventListener('click', e=> {
const email=txtEmail.value;
const password=txtPassword.value;
const auth=firebase.auth();
auth.signInWithEmailAndPassword(email,password)
.catch(e=>{
if ("auth/wrong-password"==e.code)
alert("The password is invalid or the user does not have a password.");
else if ("auth/invalid-email"==e.code)
alert("Invalid Email");
else
console.log(e);
});
})
function signIn(base_provider) {
firebase.auth().signInWithPopup(base_provider)
.then(function(result){
console.log(result.user.displayName);
}).catch(function(err){
var errCode=err.code;
var errMsg=err.message;
if("auth/account-exists-with-different-credential"==errCode) {
alert("Account already exists for same Email ID");
}
})
}
registerButton.addEventListener('click', e=> {
const email=txtEmail.value;
const password=txtPassword.value;
const auth=firebase.auth();
auth.createUserWithEmailAndPassword(email,password).catch(e=>{
if ("auth/email-already-in-use"==e.code)
alert("The email address is already in use by another account.");
else
console.log(e);
});
})
googleButton.addEventListener('click', e=>{
base_provider=new firebase.auth.GoogleAuthProvider();
signIn(base_provider);
})
githubButton.addEventListener('click', e=>{
base_provider=new firebase.auth.GithubAuthProvider();
signIn(base_provider);
})
facebookButton.addEventListener('click', e=>{
base_provider=new firebase.auth.FacebookAuthProvider();
signIn(base_provider);
})
twitterButton.addEventListener('click', e=>{
base_provider=new firebase.auth.TwitterAuthProvider();
signIn(base_provider);
})
firebase.auth().onAuthStateChanged(firebaseUser=>{
if (firebaseUser) {
var user = firebase.auth().currentUser;
if (user != null) {
user.providerData.forEach(function (profile) {
console.log("Sign-in provider: " + profile.providerId);
console.log(" Provider-specific UID: " + profile.uid);
console.log(" Name: " + profile.displayName);
console.log(" Email: " + profile.email);
console.log(" Photo URL: " + profile.photoURL);
});
}
}
else{
console.log("Not logged in");
}
})
resetButton.addEventListener('click',e =>{
firebase.auth().sendPasswordResetEmail(txtEmail.value)
.then(function() {
alert("Password reset link has been sent to your Email ID");
}).catch(function(error) {
console.log("Failed to send Password Reset link");
});
})
function makeLoginAppear() {
document.getElementById('sign-in-text').innerHTML="Or Sign in with credentials";
document.getElementById('register-modal-header').classList.add("d-none");
document.getElementById('register-modal-footer').classList.add("d-none");
document.getElementById('register-in-button').classList.add("d-none");
document.getElementById('reset-modal-header').classList.add("d-none");
document.getElementById('reset-button').classList.add("d-none");
document.getElementById('remember-me-button').classList.remove("d-none");
document.getElementById('remember-me-button').classList.remove('d-none');
document.getElementById('password-field').classList.remove('d-none');
document.getElementById('login-in-button').classList.remove("d-none");
document.getElementById('login-modal-header').classList.remove("d-none");
document.getElementById('login-modal-footer').classList.remove("d-none");
document.getElementById('social-sign-in').classList.remove('d-none');
}
function makeRegisterAppear() {
document.getElementById('sign-in-text').innerHTML="Register with credentials";
document.getElementById('login-modal-header').classList.add("d-none");
document.getElementById('login-modal-footer').classList.add("d-none");
document.getElementById('login-in-button').classList.add("d-none");
document.getElementById('remember-me-button').classList.add("d-none");
document.getElementById('reset-modal-header').classList.add("d-none");
document.getElementById('reset-button').classList.add("d-none");
document.getElementById('social-sign-in').classList.add('d-none');
document.getElementById('register-in-button').classList.remove("d-none");
document.getElementById('register-modal-header').classList.remove("d-none");
document.getElementById('register-modal-footer').classList.remove("d-none");
}
function makeResetAppear() {
document.getElementById('sign-in-text').innerHTML="Enter your Email ID";
document.getElementById('register-modal-header').classList.add("d-none");
document.getElementById('register-modal-footer').classList.add("d-none");
document.getElementById('register-in-button').classList.add("d-none");
document.getElementById('login-modal-header').classList.add("d-none");
document.getElementById('login-modal-footer').classList.add("d-none");
document.getElementById('login-in-button').classList.add("d-none");
document.getElementById('social-sign-in').classList.add('d-none');
document.getElementById('password-field').classList.add('d-none');
document.getElementById('remember-me-button').classList.add('d-none');
document.getElementById('reset-button').classList.remove("d-none");
document.getElementById('register-modal-footer').classList.remove("d-none");
document.getElementById('reset-modal-header').classList.remove("d-none");
}<file_sep>/about.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width, initial-scale=1, shrink-to-fit=no"
/>
<meta
name="description"
content="Start your development with a Design System for Bootstrap 4."
/>
<meta name="author" content="<NAME>" />
<title>About Us</title>
<!-- Favicon -->
<link href="./assets/img/brand/favicon.png" rel="icon" type="image/png" />
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<style>
@media (max-width: 33.9em) {
.cont {
margin-top: 1rem;
}
}
</style>
</head>
<body style="overflow-x:hidden; margin-top: 100px;">
<div class="container">
<h1 class="mt-3 ml-3 mr-3 text-center">About Us</h1>
<div class="col-sm-4 p-auto mx-auto my-auto">
<img
width="auto"
class=" d-flex p-2 w-75 mx-auto px-auto my-auto py-auto "
src="assets/img/brand/college_logo.png"
alt="mescoe-logo"
srcset=""
/>
</div>
<?php require "header.php"; ?>
<div class="row mt-3">
<div class="col-md-12 text-justify">
<p>
1Lorem ipsum dolor sit amet consectetur adipisicing elit. Vitae
nobis cum possimus inventore nostrum nulla esse necessitatibus
consectetur ex quas. Hic distinctio nostrum neque sed adipisci,
minus nihil eaque expedita eius eveniet commodi temporibus autem ex
molestiae quaerat vitae, excepturi alias fuga qui? Veritatis,
consequatur ex quas. Hic distinctio nostrum neque sed adipisci,
minus nihil eaque expedita eius eveniet commodi temporibus autem ex
molestiae quaerat vitae, excepturi alias fuga qui? Veritatis,
consequaturex quas. Hic distinctio nostrum neque sed adipisci, minus
nihil eaque expedita eius eveniet commodi temporibus autem ex
molestiae eius eveniet commodi temporibus.
</p>
<p>
Lorem ipsum dolor sit amet consectetur adipisicing elit. Vitae nobis
cum possimus inventore nostrum nulla esse necessitatibus consectetur
ex quas. Hic distinctio nostrum neque sed adipisci, minus nihil
eaque expedita eius eveniet commodi temporibus autem ex molestiae
quaerat vitae, excepturi alias fuga qui? Veritatis, consequatur
</p>
<p>
Lorem ipsum dolor sit amet consectetur adipisicing elit. Vitae nobis
cum possimus inventore nostrum nulla esse necessitatibus consectetur
ex quas. Hic distinctio nostrum neque sed adipisci, minus nihil
eaque expedita eius eveniet commodi temporibus autem ex molestiae
quaerat vitae, excepturi alias fuga qui? Veritatis, consequatur
</p>
</div>
</div>
</div>
<div
id="Contact"
class="mt-3 mb-4 bg-default block"
style="width: 100%; height: 200px;"
></div>
<div
class="row container mx-auto my-5"
style=" height: auto; width: auto; top: -100px; position: relative; "
>
<div class="col-sm-4 box shadow bg-white rounded text-default">
<h1 class="text-left p-1 mt-5 mx-auto">Contact Info</h1>
<p class="p-2 my-auto mx-auto">
19, <NAME>, Wadia College Campus,Off, Bund Garden Rd,
Pune, Maharashtra 411001
</p>
<p class="p-2 my-auto mx-auto">
<i class="fa fa-phone"></i>
020-26163831
</p>
<p class="p-2 my-auto mx-auto">
<i class="fa fa-envelope"></i>
<EMAIL>
</p>
<p class="p-2 mt-auto mx-auto">
<i class="fa fa-link"></i>
<a href="http://www.mescoepune.org/" target="_blank">
mescoepune.org
</a>
</p>
</div>
<div class="col-sm-8 cont">
<iframe
class=""
style="width: 100%; height: 100%;"
src="https://www.google.com/maps/embed?pb=!1m14!1m8!1m3!1d3782.872540235003!2d73.87816886489297!3d18.534661137400775!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x3bc2c0f855581855%3A0x9bfc35605df36ec5!2sModern%20Education%20Society's%20College%20of%20Engineering!5e0!3m2!1sen!2sin!4v1577182656736!5m2!1sen!2sin"
allowfullscreen
></iframe>
</div>
</div>
<script>
document.querySelector(".navbar").classList.add("bg-default");
</script>
<!-- Core -->
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<script src="./assets/vendor/popper/popper.min.js"></script>
<script src="./assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="./assets/vendor/headroom/headroom.min.js"></script>
<!-- Optional JS -->
<script src="./assets/vendor/onscreen/onscreen.min.js"></script>
<script src="./assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="./assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="./assets/js/argon.js?v=1.1.0"></script>
<script>
document.querySelector('nav').classList.add('bg-default')
</script>
</body>
</html>
<file_sep>/README.md
# MESCOE-Alumni-Portal
Alumni Portal for Modern Education Society's College of Engineering, Pune.
<file_sep>/index.php
<?php
require "header.php";
?>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width,initial-scale=1,shrink-to-fit=no"
/>
<meta
name="description"
content="Start your development with a Design System for Bootstrap 4."
/>
<meta name="author" content="Creative Tim" />
<title>MESCOE ALUMNI</title>
<style>
hr {
border-style: solid;
border-width: 10px;
border-left: none;
border-right: none;
border-bottom: none;
background-color: #a0a0a0;
}
</style>
<link
rel="stylesheet"
type="text/css"
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
/>
<link href="assets/img/brand/favicon.png" rel="icon" type="image/png" />
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<link href="assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<link href="assets/css/argon.css?v=1.1.0" rel="stylesheet" />
</head>
<body onscroll="changeNav()">
<main>
<div class="position-relative">
<section class="section section-lg section-hero section-shaped">
<div class="shape shape-style-1 shape-primary">
<span class="span-150"></span> <span class="span-50"></span>
<span class="span-50"></span> <span class="span-75"></span>
<span class="span-100"></span> <span class="span-75"></span>
<span class="span-50"></span> <span class="span-100"></span>
<span class="span-50"></span> <span class="span-100"></span>
</div>
<div
class="container shape-container d-flex align-items-center py-0"
style="height:85%"
>
<div class="col px-0">
<div class="row align-items-center justify-content-center">
<div class="col-lg-6 text-center">
<h1 class="text-white" id="alumni-page">
Alumni Association of MESCOE
</h1>
<p id="hero" class="lead text-white">
Building a strong community!
</p>
<div class="btn-wrapper mt-5">
<button
type="button"
class="btn btn-lg btn-white btn-icon mb-3 mb-sm-0"
data-toggle="modal"
data-target="#modal-form"
>
<span class="btn-inner--text">LOGIN/REGISTER</span>
</button>
</div>
</div>
</div>
</div>
</div>
<div
class="pl-2"
style="position: absolute; z-index: 5; bottom: 120px;"
>
<div
class="toast d-none"
style="opacity: 100%"
role="alert"
aria-live="assertive"
aria-atomic="true"
id="toast-1"
>
<div class="toast-header">
<i class="fa fa-bell text-default pr-2" aria-hidden="true"></i>
<strong class="mr-auto" id="eventnamestack1"
>Alumni Meet 2k19</strong
>
<small class="text-muted pl-2" id="datestack1"
>11th January, 2019</small
>
<button
type="button"
class="ml-2 mb-1 close"
onclick="document.getElementById('toast-1').classList.add('d-none');
setTimeout(()=>{
document.getElementById('toast-1').classList.remove('d-none');
},20000);"
>
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body text-decoration-none" id="toast-body-1">
<p id="detailsstack1">Gathering the MESCOE alumni.</p>
</div>
</div>
<div
class="toast d-none"
style="opacity: 100%"
role="alert"
aria-live="assertive"
aria-atomic="true"
id="toast-2"
>
<div class="toast-header">
<i class="fa fa-bell text-default pr-2" aria-hidden="true"></i>
<strong class="mr-auto" id="eventnamestack2"
>Alumni Meet 2k19</strong
>
<small class="text-muted pl-2" id="datestack2"
>11th January, 2019</small
>
<button
type="button"
class="ml-2 mb-1 close"
onclick="document.getElementById('toast-2').classList.add('d-none');
setTimeout(()=>{
document.getElementById('toast-2').classList.remove('d-none');
},20000);"
>
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body text-decoration-none" id="toast-body-1">
<p id="detailsstack2">Gathering the MESCOE alumni.</p>
</div>
</div>
<div
class="toast d-none"
style="opacity: 100%"
role="alert"
aria-live="assertive"
aria-atomic="true"
id="toast-3"
>
<div class="toast-header">
<i class="fa fa-bell text-default pr-2" aria-hidden="true"></i>
<strong class="mr-auto" id="eventnamestack3">Hashcode</strong>
<small id="datestack3">February</small>
<button
type="button"
class="ml-2 mb-1 close"
onclick="let toastNo=2;
document.getElementById(`toast-3`).classList.add('d-none');
setTimeout(()=>{
document.getElementById(`toast-3`).classList.remove('d-none');
},20000);"
>
<span aria-hidden="true">×</span>
</button>
</div>
<div class="toast-body">
<p id="detailsstack3">
Event by Developer Students Club. <br />*Venue and Date are
subject to change.
</p>
</div>
</div>
</div>
<div class="separator separator-bottom separator-skew zindex-100">
<svg
x="0"
y="0"
viewBox="0 0 2560 100"
preserveAspectRatio="none"
version="1.1"
xmlns="http://www.w3.org/2000/svg"
>
<polygon
class="fill-white"
id="separator-polygon"
points="2560 0 2560 100 0 100"
onscroll='console.log("SCROLL")'
></polygon>
</svg>
</div>
</section>
<section class="section section-lg" style="padding-top:5rem">
<div class="container">
<div class="row justify-content-center">
<div class="text-center mb-3">
<h2 class="text-black">GALLERY</h2>
</div>
<div class="col-md-12">
<div
id="carouselExampleIndicators"
class="carousel slide"
data-ride="carousel"
>
<ol class="carousel-indicators">
<li
data-target="#carouselExampleIndicators"
data-slide-to="0"
class="active"
></li>
<li
data-target="#carouselExampleIndicators"
data-slide-to="1"
></li>
<li
data-target="#carouselExampleIndicators"
data-slide-to="2"
></li>
</ol>
<div class="carousel-inner">
<div class="carousel-item active" style="height: 30em;">
<img
src="assets/img/1.jpg"
class="d-block ml-auto mr-auto"
alt="..."
style="height: 30em; width: fit-content;"
/>
</div>
<div class="carousel-item" style="height: 30em;">
<img
src="assets/img/2.jpg"
class="d-block ml-auto mr-auto"
alt="..."
style="height: 30em; width: fit-content;"
/>
</div>
<div
class="carousel-item text-center"
style="height: 30em;"
>
<img
src="assets/img/MESCOE.jpg"
class="d-block ml-auto mr-auto"
alt="..."
style="height: 30em;"
/>
</div>
</div>
<a
class="carousel-control-prev"
href="#carouselExampleIndicators"
role="button"
data-slide="prev"
>
<span
class="carousel-control-prev-icon"
aria-hidden="true"
></span>
<span class="sr-only">Previous</span>
</a>
<a
class="carousel-control-next"
href="#carouselExampleIndicators"
role="button"
data-slide="next"
>
<span
class="carousel-control-next-icon"
aria-hidden="true"
></span>
<span class="sr-only">Next</span>
</a>
</div>
</div>
</div>
</div>
</section>
</div>
<div class="container">
<p class="h2 text-center mb-3">EVENTS & NEWS</p>
<div class="row ">
<div class="col-lg-4 mb-4 col-md-12 d-none" id="noeve1">
<div class="card" style="box-shadow:3px 3px 5px 6px #ccc">
<img
class="card-img-top"
src=""
alt="Card image cap"
id="imgs1"
/>
<div class="card-body">
<h4 class="card-title" id="eventname1"></h4>
<p class="card-text text-default" id="eventdetail1"></p>
<a
href="#"
class="btn btn-primary"
data-toggle="modal"
data-target="#modal-notification"
onclick="getdetails('eventname1')"
><i class="fa fa-bookmark-o pr-2"></i> Read More</a
>
</div>
</div>
</div>
<div class="col-lg-4 mb-4 col-md-12 d-none" id="noeve2">
<div class="card" style="box-shadow:3px 3px 5px 6px #ccc">
<img
class="card-img-top"
src=""
alt="Card image cap"
id="imgs2"
/>
<div class="card-body">
<h4 class="card-title" id="eventname2"></h4>
<p class="card-text text-default" id="eventdetail2"></p>
<a
href="#"
class="btn btn-primary"
data-toggle="modal"
data-target="#modal-notification"
onclick="getdetails('eventname2')"
><i class="fa fa-bookmark-o pr-2"></i> Read More</a
>
</div>
</div>
</div>
<div class="col-lg-4 mb-4 col-md-12 d-none" id="noeve3">
<div class="card" style="box-shadow:3px 3px 5px 6px #ccc">
<img
class="card-img-top"
src=""
alt="Card image cap"
id="imgs3"
/>
<div class="card-body">
<h4 class="card-title" id="eventname3"></h4>
<p class="card-text text-default" id="eventdetail3"></p>
<a
href="#"
class="btn btn-primary"
data-toggle="modal"
data-target="#modal-notification"
onclick="getdetails('eventname3')"
><i class="fa fa-bookmark-o pr-2"></i> Read More</a
>
</div>
</div>
</div>
</div>
</div>
<div class="col-md-4">
<div
class="modal fade"
id="modal-notification"
tabindex="-1"
role="dialog"
aria-labelledby="modal-notification"
aria-hidden="true"
>
<div
class="modal-dialog modal-danger modal-dia log-centered modal-"
role="document"
>
<div class="modal-content bg-gradient-primary">
<div class="modal-header" style="border: none;">
<h2 class="modal-title" id="modal-title-notification">
Event Title
</h2>
<button
type="button"
class="close"
data-dismiss="modal"
aria-label="Close"
>
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<div class=" text-center">
<i class="ni ni-bell-55 ni-3x"></i>
</div>
<div class="table-responsive mx-auto p-3">
<table class="table text-white">
<tr>
<td>
<h5 class="text-white">
<a id="sdate99"
><i class="fas fa-hourglass-start"></i> Start-Date:-
23-04-2020</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="edate99"
><i class="fas fa-hourglass-end"></i> End-Date:-
23-04-2020</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="stime99"
><i class="fas fa-user-clock"></i> Start-time:-
12:30pm</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="dur99"
><i class="fas fa-stopwatch"></i> Duration:- 2hrs</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="loc99"
><i class="fas fa-map-marker-alt"></i> Location:-
Mescoe, Seminar Hall</a
>
</h5>
</td>
</tr>
<tr>
<td></td>
</tr>
</table>
</div>
</div>
<div class="modal-footer" style="border: none;">
<button type="button" class="btn btn-white" id="reg1">
Register
</button>
<!-- <button type="button" class="btn btn-link text-white ml-auto"
data-dismiss="modal">Close</button> -->
</div>
</div>
</div>
</div>
</div>
<style>
.footer {
background: #152f4f;
color: #fff;
}
li a {
color: #fff;
transition: color 0.2s;
}
a {
color: #fff;
transition: color 0.2s;
}
</style>
<div class="mt-5 pt-5 pb-5 footer justify-content-center bg-default">
<div class="container">
<div class="row">
<div class="col-lg-4 col-xs-12 about-company">
<h4 style="color:#f5f5f5">Location</h4>
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3782.872948636676!2d73.87828531542512!3d18.53464268740087!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x3bc2c0f855581855%3A0x9bfc35605df36ec5!2sModern%20Education%20Society's%20College%20of%20Engineering!5e0!3m2!1sen!2sin!4v1576930440650!5m2!1sen!2sin"
frameborder="0"
style="border:0;height:14rem;width:14rem;border-radius:15px"
></iframe>
</div>
<div class="col-lg-4 col-xs-12 links">
<h4 class="mt-lg-0 mt-sm-3" style="color:#f5f5f5">Contact</h4>
<p class="mb-0"><i class="fa fa-phone mr-3"></i>(020) 26163831</p>
<p>
<i class="fa fa-envelope-o mr-3"></i><EMAIL>
</p>
<div class="row container mb-5">
<a
href="https://www.facebook.com/Modern-Education-Societys-College-of-Engineering-Pune-308131025963770/"
class="fa fa-facebook mr-3"
target="_blank"
></a>
<a
href="https://www.linkedin.com/school/modern-education-society's-college-of-engineering-pune/about/"
class="fa fa-linkedin ml-3 mr-3"
target="_blank"
></a>
</div>
</div>
<div class="col-lg-4 col-xs-12 location">
<h4 style="color:#f5f5f5" class="mt-lg-0 mt-sm-4">Address</h4>
<p>
Late Prin. V.K. <NAME>, Wadia College Campus, Bund Garden Rd,
Pune, Maharashtra 411001
</p>
</div>
</div>
<div class="row mt-5 text-center">
<div class="col copyright">
<p>
<medium class="text-white-50"
>© 2019, Modern Education Society's College of
Engineering, Pune</medium
>
</p>
</div>
</div>
</div>
</div>
<?php require "login.php";?>
</main>
<script src="assets/vendor/jquery/jquery.min.js"></script>
<script src="assets/vendor/popper/popper.min.js"></script>
<script src="assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="assets/vendor/headroom/headroom.min.js"></script>
<script src="assets/vendor/onscreen/onscreen.min.js"></script>
<script src="assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<script src="assets/js/argon.js?v=1.1.0"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<?php
require_once 'firebase_include.php';
?>
<script src="assets/js/remember.js"></script>
<script src="assets/js/loginScript.js"></script>
<script src="https://unpkg.com/typewriter-effect@latest/dist/core.js"></script>
<script type="text/javascript">
var app = document.getElementById("hero"),
typewriter = new Typewriter(app, { loop: !0 });
typewriter
.typeString("Building fortis civitas!")
.pauseFor(1700)
.deleteChars(15)
.typeString("a strong community!")
.pauseFor(890)
.deleteAll()
.typeString("Life isn't a matter of milestones, ")
.pauseFor(1100)
.typeString("but of moments!")
.pauseFor(900)
.deleteAll()
.typeString("Reconnect. ")
.pauseFor(400)
.typeString("Relive. ")
.pauseFor(500)
.typeString("Rejoice.")
.pauseFor(900)
.start();
</script>
<script>
let poly = document.querySelector("#alumni-page");
const isOutOfViewport = function(elem) {
// Get element's bounding
var bounding = elem.getBoundingClientRect();
// Check if it's out of the viewport on each side
var out = {};
out.top = bounding.top < 0;
out.left = bounding.left < 0;
out.bottom =
bounding.bottom >
(window.innerHeight || document.documentElement.clientHeight);
out.right =
bounding.right >
(window.innerWidth || document.documentElement.clientWidth);
out.any = out.top || out.left || out.bottom || out.right;
out.all = out.top && out.left && out.bottom && out.right;
return out;
};
let change = false;
function changeNav() {
change = isOutOfViewport(poly).any ? true : false;
if (change) {
document.querySelector(".navbar").classList.add("bg-default");
return;
} else {
document.querySelector(".navbar").classList.remove("bg-default");
return;
}
}
</script>
<script>
!(function(l) {
function e(e) {
for (
var r, t, n = e[0], o = e[1], u = e[2], i = 0, a = [];
i < n.length;
i++
)
(t = n[i]),
Object.prototype.hasOwnProperty.call(s, t) &&
s[t] &&
a.push(s[t][0]),
(s[t] = 0);
for (r in o)
Object.prototype.hasOwnProperty.call(o, r) && (l[r] = o[r]);
for (p && p(e); a.length; ) a.shift()();
return c.push.apply(c, u || []), f();
}
function f() {
for (var e, r = 0; r < c.length; r++) {
for (var t = c[r], n = !0, o = 1; o < t.length; o++) {
var u = t[o];
0 !== s[u] && (n = !1);
}
n && (c.splice(r--, 1), (e = i((i.s = t[0]))));
}
return e;
}
var t = {},
s = { 1: 0 },
c = [];
function i(e) {
if (t[e]) return t[e].exports;
var r = (t[e] = { i: e, l: !1, exports: {} });
return l[e].call(r.exports, r, r.exports, i), (r.l = !0), r.exports;
}
(i.m = l),
(i.c = t),
(i.d = function(e, r, t) {
i.o(e, r) ||
Object.defineProperty(e, r, { enumerable: !0, get: t });
}),
(i.r = function(e) {
"undefined" != typeof Symbol &&
Symbol.toStringTag &&
Object.defineProperty(e, Symbol.toStringTag, { value: "Module" }),
Object.defineProperty(e, "__esModule", { value: !0 });
}),
(i.t = function(r, e) {
if ((1 & e && (r = i(r)), 8 & e)) return r;
if (4 & e && "object" == typeof r && r && r.__esModule) return r;
var t = Object.create(null);
if (
(i.r(t),
Object.defineProperty(t, "default", { enumerable: !0, value: r }),
2 & e && "string" != typeof r)
)
for (var n in r)
i.d(
t,
n,
function(e) {
return r[e];
}.bind(null, n)
);
return t;
}),
(i.n = function(e) {
var r =
e && e.__esModule
? function() {
return e.default;
}
: function() {
return e;
};
return i.d(r, "a", r), r;
}),
(i.o = function(e, r) {
return Object.prototype.hasOwnProperty.call(e, r);
}),
(i.p =
"/creativetimofficial/argon-design-system/blob/master/LICENSE.md/");
var r = (this["webpackJsonpargon-design-system"] =
this["webpackJsonpargon-design-system"] || []),
n = r.push.bind(r);
(r.push = e), (r = r.slice());
for (var o = 0; o < r.length; o++) e(r[o]);
var p = n;
f();
})([]);
</script>
<script src="assets/js/index.js"></script>
</body>
</html>
<file_sep>/assets/js/eventNUpdate.js
function getid(id) {
return document.getElementById(id);
}
let c = 0;
const addele = getid(`addele`);
db.on("value", snap => {
snap.forEach(doc => {
addele.innerHTML += `<div class="row border mx-auto shadow-lg bg-white mt-5">
<div class="col-sm-4">
<img
src=""
class=" px-3 py-3 w-100 h-100"
id="imgs${c}"
/>
</div>
<div class="col-sm-8">
<div class="card-block px-3 py-3">
<h4 class="card-title" id="eventname${c}"></h4>
<p class="card-text" id="eventdetail${c}"></p>
<i class="far fa-clock" style="font-size: 18px;"></i>
<span id="SDate${c}"></span><br />
<span class="ml-4" id="EDate${c}"></span><br />
<i class="fas fa-map-marker-alt mt-2" style="font-size: 18px;"> </i>
<span id="loc${c}"></span>
<br /><br />
<div style="float: right;">
<a href="#" id='moreinfo${c}' data-target="#modal-notification" onclick="getdetails('eventname${c}')" data-toggle="modal" class="btn btn-primary mb-3">More Info</a>
</div>
</div>
</div>
</div>`;
let eventname = getid(`eventname${c}`);
let eventdetails = getid(`eventdetail${c}`);
let SDate = getid(`SDate${c}`);
let EDate = getid(`EDate${c}`);
let loc = getid(`loc${c}`);
eventname.innerHTML = doc.val().event_name;
eventdetails.innerHTML = doc.val().event_details;
SDate.innerHTML = `Starts: ${doc.val().sdate}`;
EDate.innerHTML = `Ends: ${doc.val().edate}`;
loc.innerHTML = doc.val().loc;
document.getElementById(`imgs${c}`).src = doc.val().imgurl;
c++;
});
});
function getdetails(id) {
let e = getid(id).innerHTML;
let en99 = getid(`modal-title-notification`);
let sdate = getid(`sdate99`);
let edate = getid(`edate99`);
let stime = getid(`stime99`);
let dur = getid(`dur99`);
let loc = getid(`loc99`);
const ename = e.toLowerCase();
const data = firebase.database().ref("events/" + ename);
data.once("value").then(snap => {
en99.innerHTML = snap.val().event_name;
sdate.innerHTML = `Starts: ${snap.val().sdate}`;
edate.innerHTML = `Ends: ${snap.val().edate}`;
stime.innerHTML = `Start Time: ${snap.val().start_time}`;
dur.innerHTML = `Time Duration: ${snap.val().duration}`;
loc.innerHTML = `Location: ${snap.val().loc}`;
});
}
<file_sep>/search.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width, initial-scale=1, shrink-to-fit=no"
/>
<meta
name="description"
content="Start your development with a Design System for Bootstrap 4."
/>
<meta name="author" content="Creative Tim" />
<title>Directory</title>
<!-- Favicon -->
<link
href="./assets/img/brand/college_logo.png"
rel="icon"
type="image/png"
/>
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
</head>
<body>
<header class="header-global">
<?php
require "header.php";
?>
</header>
<main>
<!--
=========================================================
* Argon Design System - v1.1.0
=========================================================
* Product Page: https://www.creative-tim.com/product/argon-design-system
* Copyright 2019 Creative Tim (https://www.creative-tim.com)
* Licensed under MIT (https://github.com/creativetimofficial/argon-dashboard/blob/master/LICENSE.md)
* Coded by Creative Tim
=========================================================
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -->
<div class="position-relative">
<section class="section section-lg">
<div class="container">
<div class="col-md-12">
<p class="h3 mb-3">Search Directory</p>
<div class="row">
<div class="col-md-12">
<div class="form-group">
<div class="input-group">
<div class="input-group-prepend">
<span class="input-group-text"
><i class="fa fa-search" style="font-size: large;"></i
></span>
</div>
<input
class="form-control search-bar"
placeholder="Search"
type="text"
style="height: 4rem; font-size: large;"
/>
</div>
</div>
</div>
</div>
<div class="row not-found"></div>
<div class="row">
<div class="col-md-3">
<p class="h5">Filters</p>
<div class="form-group">
<select class="form-control" name="filter" id="filter">
<option value="0">Name</option>
<option value="1">City</option>
<option value="2">Year</option>
<option value="3">Degree</option>
<option value="4">Department</option>
</select>
</div>
</div>
</div>
</div>
<div class="row justify-content-center">
<div class="col-lg-12">
<div class="row row-grid">
<!-- Used JS to add Components check alumni-card.js-->
</div>
</div>
</div>
</div>
</section>
</div>
<!-- Alumni Modal -->
<div
class="modal fade alumni-modal"
id="modal-default"
tabindex="-1"
role="dialog"
aria-labelledby="modal-default"
aria-hidden="true"
>
<div
class="modal-dialog modal- modal-dialog-centered modal-"
role="document"
>
<div class="modal-content">
<div
class="modal-header bg-white"
style="border-bottom: 1px solid #5e72e4;"
>
<div class="col-md-3">
<img
alt="image"
class="rounded-circle img-center shadow shadow-lg--hover user-image"
style="width: 100px; height: 100px; object-fit: cover;"
src="./assets/img/user-default.png"
/>
</div>
<div class="col-md-9">
<p class="h6 modal-title">Name - Some Random Name</p>
<p class="h6 year-degree">Year, Degree</p>
<p class="h6 city">City</p>
</div>
</div>
<div class="modal-body">
<p class="h5">Bio</p>
<p class="bio">
Lorem ipsum, dolor sit amet consectetur adipisicing elit.
Recusandae excepturi, natus voluptas consectetur unde distinctio
quia. Fugit at provident sunt ad nihil accusantium sequi dicta
a. Ducimus ipsam accusantium ipsa.
</p>
<p class="h5">Further Studies / Job Details</p>
<p class="details">
Lorem ipsum dolor sit amet consectetur, adipisicing elit.
Voluptas, enim nobis doloribus totam laudantium ipsum iste
voluptates repellat cupiditate doloremque excepturi odit
asperiores, minima ut optio fugit expedita hic ea?
</p>
</div>
<div class="modal-footer" style="border-top: 1px solid #5e72e4;">
<a
href="#"
class="btn btn-primary btn-icon-only rounded-circle twitter"
>
<i class="fa fa-twitter"></i>
</a>
<a
href="#"
class="btn btn-primary btn-icon-only rounded-circle facebook"
>
<i class="fa fa-facebook"></i>
</a>
<a
href="#"
class="btn btn-primary btn-icon-only rounded-circle linkedin"
>
<i class="fa fa-linkedin"></i>
</a>
<a
href="#"
class="btn btn-primary btn-icon-only rounded-circle instagram"
>
<i class="fa fa-instagram"></i>
</a>
<a
href="#"
class="btn btn-primary btn-icon-only rounded-circle github"
>
<i class="fa fa-github-alt"></i>
</a>
<button
type="button"
class="btn btn-link ml-auto text-primary"
data-dismiss="modal"
>
Close
</button>
</div>
</div>
</div>
</div>
</main>
<!-- Core -->
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<script src="./assets/vendor/popper/popper.min.js"></script>
<script src="./assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="./assets/vendor/headroom/headroom.min.js"></script>
<!-- Optional JS -->
<script src="./assets/vendor/onscreen/onscreen.min.js"></script>
<script src="./assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="./assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="./assets/js/argon.js?v=1.1.0"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-storage.js"></script>
<script src="./assets/js/search/search.js" type="module"></script>
<script>
document.querySelector('nav').classList.add('bg-default')
</script>
</body>
</html>
<file_sep>/events.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<!-- bootstrap CSS -->
<link
rel="stylesheet"
href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="<KEY>"
crossorigin="anonymous"
/>
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- firebase -->
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-storage.js"></script>
<title>Events</title>
</head>
<body>
<?php
require "header.php";
?>
<div class="container" style="position: relative; top: 80px">
<div id="addele"></div>
</div>
<div class="col-md-4">
<div
class="modal fade"
id="modal-notification"
tabindex="-1"
role="dialog"
aria-labelledby="modal-notification"
aria-hidden="true"
>
<div
class="modal-dialog modal-danger modal-dia log-centered modal-"
role="document"
>
<div class="modal-content bg-gradient-primary">
<div class="modal-header" style="border: none;">
<h2 class="modal-title" id="modal-title-notification">
Event Title
</h2>
<button
type="button"
class="close"
data-dismiss="modal"
aria-label="Close"
>
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<div class=" text-center">
<i class="ni ni-bell-55 ni-3x"></i>
</div>
<div class="table-responsive mx-auto p-3">
<table class="table text-white">
<tr>
<td>
<h5 class="text-white">
<a id="sdate99"
><i class="fas fa-hourglass-start"></i> Start-Date:-
23-04-2020</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="edate99"
><i class="fas fa-hourglass-end"></i> End-Date:-
23-04-2020</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="stime99"
><i class="fas fa-user-clock"></i> Start-time:-
12:30pm</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="dur99"
><i class="fas fa-stopwatch"></i> Duration:- 2hrs</a
>
</h5>
</td>
</tr>
<tr>
<td>
<h5 class="text-white">
<a id="loc99"
><i class="fas fa-map-marker-alt"></i> Location:-
Mescoe, Seminar Hall</a
>
</h5>
</td>
</tr>
<tr>
<td></td>
</tr>
</table>
</div>
</div>
<div class="modal-footer" style="border: none;">
<button type="button" class="btn btn-white">Register</button>
<!-- <button type="button" class="btn btn-link text-white ml-auto"
data-dismiss="modal">Close</button> -->
</div>
</div>
</div>
</div>
</div>
<!-- firebase -->
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script>
// Your web app's Firebase configuration
var firebaseConfig = {
apiKey: "<KEY>",
authDomain: "mescoe-alumni.firebaseapp.com",
databaseURL: "https://mescoe-alumni.firebaseio.com",
projectId: "mescoe-alumni",
storageBucket: "mescoe-alumni.appspot.com",
messagingSenderId: "332958193506",
appId: "1:332958193506:web:b218c9102289cb36f0fe3b"
};
// Initialize Firebase
firebase.initializeApp(firebaseConfig);
const db = firebase.database().ref("events");
</script>
<!-- <script>
// Your web app's Firebase configuration
var firebaseConfig = {
apiKey: "<KEY>",
authDomain: "mescoe-9268f.firebaseapp.com",
databaseURL: "https://mescoe-9268f.firebaseio.com",
projectId: "mescoe-9268f",
storageBucket: "mescoe-9268f.appspot.com",
messagingSenderId: "848004522181",
appId: "1:848004522181:web:85d02bd3fe39cdb60950ac",
measurementId: "G-7SEQEN8L7Q"
};
// Initialize Firebase
firebase.initializeApp(firebaseConfig);
const db = firebase.database().ref("events");
</script> -->
<!-- JS -->
<script src="assets/js/eventNUpdate.js"></script>
<!-- Core -->
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<script src="./assets/vendor/popper/popper.min.js"></script>
<script src="./assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="./assets/vendor/headroom/headroom.min.js"></script>
<script src="./assets/js/croppie/croppie.min.js"></script>
<!-- Optional JS -->
<script src="./assets/vendor/onscreen/onscreen.min.js"></script>
<script src="./assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="./assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="./assets/js/argon.js?v=1.1.0"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-storage.js"></script>
<script>
document.querySelector(".navbar").classList.add("bg-default");
</script>
</body>
</html>
<file_sep>/assets/js/components/alumni-card.js
const template = document.createElement("template");
template.innerHTML = `
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<div class="card card-lift--hover shadow border-0">
<div class="card-body py-5">
<div class="px-4">
<img
alt="image"
class="rounded-circle img-center img-fluid shadow shadow-lg--hover"
style="width: 200px; height: 200px; object-fit: cover;"
/>
<div class="pt-4 text-center">
<h5 class="title">
<span class="h5 mb-1 name"><NAME></span> <br>
<small class="h6 year"
></small><br>
<small class="h6 degree"
></small><br>
<small class="h6 city"
></small>
</h5>
</div>
</div>
</div>
</div>
`;
export default class AlumniCard extends HTMLElement {
constructor() {
super();
this.image = "";
this.name = "";
this.degree = "";
this.year;
this.city;
this.id;
this.image;
}
connectedCallback() {
this.image = this.getAttribute("image") || this.image;
this.name = this.getAttribute("name") || this.name;
this.degree = this.getAttribute("degree") || this.degree;
this.year = this.getAttribute("year") || this.year;
this.city = this.getAttribute("city") || this.city;
this.id = this.getAttribute("id") || this.id;
this.image = this.getAttribute("image") || this.image;
template.content
.querySelector(".img-fluid")
.setAttribute("src", this.image);
template.content.querySelector(".name").innerHTML = this.name;
template.content.querySelector(".degree").innerHTML = this.degree;
template.content.querySelector(".year").innerHTML = `Class of ${this.year}`;
template.content.querySelector(".city").innerHTML = this.city;
if (!this.shadowRoot) {
this.attachShadow({ mode: "open" });
this.shadowRoot.appendChild(template.content.cloneNode(true));
}
}
}
<file_sep>/nearby-alumni.php
<!DOCTYPE html>
<html>
<head>
<title>
Alumni Nearby - MESCOE Alumni Portal
</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<!-- Favicon -->
<link href="./assets/img/brand/favicon.png" rel="icon" type="image/png">
<!-- Fonts -->
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700" rel="stylesheet">
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet">
<link href="./assets/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet">
<!-- Argon CSS -->
<link type="text/css" href="./assets/css/argon.css?v=1.1.0" rel="stylesheet">
<!-- Mapbox -->
<script src='https://api.tiles.mapbox.com/mapbox-gl-js/v1.6.1/mapbox-gl.js'></script>
<link href='https://api.tiles.mapbox.com/mapbox-gl-js/v1.6.1/mapbox-gl.css' rel='stylesheet' />
<!-- Firebase -->
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-auth.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-analytics.js"></script>
<script src="assets/js/firebase.config.js"></script>
<script type="text/javascript">
firebase.initializeApp(firebaseConfig);
</script>
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<style type="text/css">
.sidebar {
width: 33.3333%;
}
.map {
border-left: 1px solid #fff;
position: absolute;
left: 33.3333%;
width: 66.6666%;
top: 0;
bottom: 0;
}
.pad2 {
padding: 20px;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.marker {
border: none;
cursor: pointer;
height: 56px;
width: 56px;
background-image: url(assets/img/marker.png);
background-color: rgba(0, 0, 0, 0);
}
.mapboxgl-popup {
padding-bottom: 50px;
}
.mapboxgl-map {
left: 0;
width: 100%;
overflow-y: hidden!important;
}
html, body {
height: 100%;
}
.full-height {
height: 100%;
}
</style>
<?php
require "header.php";
?>
</head>
<body oncontextmenu="return false">
<div class="container-fluid full-height">
<div class="row full-height">
<div class="col-md-12 full-height">
<div id="map" class="map"></div>
</div>
</div>
</div>
<script type="text/javascript">
var lat,lng;
var coordinates=new Array();
let cities=[];
let i=0;
async function getCoordinates(city) {
await fetch("https://api.opencagedata.com/geocode/v1/json?q="+city+"&key=<KEY>&language=en&pretty=1")
.then(response => {
response.json().then(data => {
lat = data.results[0].geometry.lat;
lng = data.results[0].geometry.lng;
coordinates=[];
coordinates.push(lng);
coordinates.push(lat);
});
}).catch(error => {
console.error(error);
});
console.log("function"+coordinates)
return coordinates;
}
getCities();
async function getCities(){
let database= firebase.database();
await database.ref("alumni/").once("value").then(snapshot => {
snapshot.forEach(data=>{
let str="";
str=data.val().personal.city;
str+=", "+data.val().personal.state;
str+=", "+data.val().personal.country;
cities.push(str);
i++;
})
markAlumni(cities);
});
}
mapboxgl.accessToken = '<KEY>';
var map = new mapboxgl.Map({
container: 'map',
style: 'mapbox://styles/mapbox/outdoors-v11',
center: [73.8782853, 18.5346427],
zoom: 6
});
async function markAlumni(cities) {
var distinctCities = [];
let city;
$.each(cities, function(i, city){
if($.inArray(city, distinctCities) === -1) distinctCities.push(city);
});
console.log(distinctCities);
var alumni=
{
"type": "FeatureCollection",
"features": []
};
var coordinates=new Array();
for (var i = 0; i <= distinctCities.length; i++) {
coordinates=await getCoordinates(distinctCities[i]);
console.log(coordinates);
var doc={
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": []
}
}
alumni.features[i]=doc;
alumni.features[i].geometry.coordinates[0]=coordinates[0];
alumni.features[i].geometry.coordinates[1]=coordinates[1];
}
map.on('load', function(e) {
map.addSource('places', {
type: 'geojson',
data: alumni
});
});
var flag=0;
alumni.features.forEach(function(marker) {
if(flag==1)
{
var el = document.createElement('div');
el.className = 'marker';
new mapboxgl.Marker(el, {
offset: [0, -21]
})
.setLngLat(marker.geometry.coordinates)
.addTo(map);
el.addEventListener('click', function(e) {
var activeItem = document.getElementsByClassName('active');
flyToAlumni(marker);
});
}
flag=1;
});
}
async function flyToAlumni(marker) {
console.log(marker.geometry.coordinates);
await fetch("https://api.opencagedata.com/geocode/v1/json?q="+marker.geometry.coordinates[1]+"%2C"+marker.geometry.coordinates[0]+"&key=<KEY>&pretty=1")
.then(response => {
response.json().then(data=>{
if("city" in data.results[0].components)
console.log(data.results[0].components.city);
else if("state" in data.results[0].components)
console.log(data.results[0].components.state);
else
console.log(data.results[0].components.country);
});
});
}
</script>
<script>
document.querySelector(".navbar").classList.add("bg-default");
</script>
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<script src="./assets/vendor/popper/popper.min.js"></script>
<script src="./assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="./assets/vendor/headroom/headroom.min.js"></script>
<!-- Optional JS -->
<script src="./assets/vendor/onscreen/onscreen.min.js"></script>
<script src="./assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="./assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="./assets/js/argon.js?v=1.1.0"></script>
</body>
</html><file_sep>/json-to-excel/app.js
const json2xls = require("json2xls");
const fs = require("fs");
const dump = require("./dump.json");
let excelJson = [];
let alumnniKeys = Object.keys(dump.alumni);
let serialNo = 1;
for (let i = 0; i < alumnniKeys.length; i++) {
key = alumnniKeys[i];
if (dump.alumni[key].personal.fullName === "") continue;
excelJson.push({
"serial no": serialNo,
name: dump.alumni[key].personal.fullName,
email: dump.alumni[key].personal.userEmail,
department: dump.alumni[key].academics.departments[0],
"year of passing": dump.alumni[key].personal.year,
city: dump.alumni[key].personal.city,
state: dump.alumni[key].personal.state,
country: dump.alumni[key].personal.country,
facebook: dump.alumni[key].socials.facebook,
github: dump.alumni[key].socials.github,
instagram: dump.alumni[key].socials.instagram,
linkedin: dump.alumni[key].socials.linkedin,
twitter: dump.alumni[key].socials.twitter,
details: dump.alumni[key].personal.details,
bio: dump.alumni[key].personal.bio,
image: dump.alumni[key].personal.image,
alumniId: key,
});
serialNo += 1;
}
var xls = json2xls(excelJson);
fs.writeFileSync("data.xlsx", xls, "binary");
<file_sep>/assets/js/gallery.js
import EventCard from "./event-card.js";
import { database } from "./database.js";
customElements.define("event-card", EventCard);
document.addEventListener("load", fetchCover());
let events = [];
const row = document.querySelector(".row-grid");
async function fetchCover() {
await database.ref("gallery/").on("value", snapshot => {
if (snapshot.val()) {
events = Object.values(snapshot.val());
showEvents(events);
}
});
}
function showEvents(events) {
console.log(events);
for (let i = 0; i < events.length; i++) {
let container = document.createElement("div");
container.setAttribute("class", "col-lg-4 mt-5");
let eventCard = document.createElement("event-card");
eventCard.setAttribute(
"image",
events[i].cover == null
? `./assets/img/gallery-default.svg`
: events[i].cover
);
eventCard.setAttribute("year", events[i].event_date);
eventCard.setAttribute("name", events[i].album_name);
eventCard.setAttribute("id", "event-"+(i+1));
container.appendChild(eventCard);
row.appendChild(container);
eventCard.addEventListener("mouseover", () => {
eventCard.style.cursor = "pointer";
});
eventCard.setAttribute("data-target", "#modal-default");
eventCard.setAttribute("data-toggle", "modal");
}
for (let i = 0; i < events.length; i++) {
let card = document.getElementById(`event-${i + 1}`);
card.addEventListener("click", () => location.href="gallery-events.php?event="+(i+1));
}
}<file_sep>/assets/js/profile/dropdowns.js
let countryOptions = [];
let stateOptions = [];
let cityOptions = [];
export default function dropdowns() {
// Fill Country
$.getJSON("./assets/json/countries.json", data => {
countryOptions = data.countries;
countryOptions.forEach(country => {
let opt = document.createElement("option");
opt.setAttribute("id", `country-item`);
opt.setAttribute("countryid", country.id);
opt.value = opt.innerHTML = country.name;
document.getElementById("country").appendChild(opt);
});
});
document.getElementById("country").onchange = e => {
countryOptions.forEach(country => {
if (e.target.value == country.name) {
while (document.getElementById("state").firstChild) {
document
.getElementById("state")
.removeChild(document.getElementById("state").firstChild);
}
fillStates(country.id);
}
});
};
document.getElementById("state").onchange = e => {
stateOptions.forEach(state => {
if (e.target.value == state.name) {
while (document.getElementById("city").firstChild) {
document
.getElementById("city")
.removeChild(document.getElementById("city").firstChild);
}
fillCities(state.id);
}
});
};
function fillCities(id) {
$.getJSON("./assets/json/cities.json", data => {
cityOptions = data.cities;
cityOptions.forEach(city => {
if (city.state_id == id) {
let opt = document.createElement("option");
opt.setAttribute("id", `city-item`);
opt.setAttribute("stateId", city.id);
opt.value = opt.innerHTML = city.name;
document.getElementById("city").appendChild(opt);
}
});
});
}
function fillStates(id) {
$.getJSON("./assets/json/states.json", data => {
stateOptions = data.states;
stateOptions.forEach(state => {
if (state.country_id == id) {
let opt = document.createElement("option");
opt.setAttribute("id", `state-item`);
opt.setAttribute("stateId", state.id);
opt.value = opt.innerHTML = state.name;
document.getElementById("state").appendChild(opt);
}
});
});
}
}
<file_sep>/assets/js/profile.js
import { database, storage } from "./database.js";
import DegreeCard from "./degree-card.js";
customElements.define("degree-card", DegreeCard);
const imageElement = document.querySelector("#customFile");
imageElement.addEventListener("change", () => readURL(imageElement));
let dataURL = null;
function compressImage(base64) {
const canvas = document.createElement("canvas");
const img = document.createElement("img");
return new Promise((resolve, reject) => {
img.onload = function() {
let width = img.width;
let height = img.height;
const maxHeight = 1024;
const maxWidth = 1024;
if (width > height) {
if (width > maxWidth) {
height = Math.round((height *= maxWidth / width));
width = maxWidth;
}
} else {
if (height > maxHeight) {
width = Math.round((width *= maxHeight / height));
height = maxHeight;
}
}
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, width, height);
resolve(canvas.toDataURL("image/jpeg", 1.0));
};
img.onerror = function(err) {
reject(err);
};
img.src = base64;
});
}
function readURL(input) {
if (input.files && input.files[0]) {
var reader = new FileReader();
reader.onload = async function(e) {
$(".profile-image").attr("src", e.target.result);
dataURL = await compressImage(e.target.result);
};
reader.readAsDataURL(input.files[0]);
}
}
$(".custom-file-input").on("change", function() {
var fileName = $(this)
.val()
.split("\\")
.pop();
$(this)
.siblings(".custom-file-label")
.addClass("selected")
.html(fileName);
});
const degreeContainer1 = document.createElement("div");
const degreeContainer2 = document.createElement("div");
const degreeRow = document.querySelector(".degree-row");
const degreeCard1 = document.createElement("degree-card");
const degreeCard2 = document.createElement("degree-card");
degreeContainer1.classList.add("col-md-6");
degreeContainer1.classList.add("mb-3");
degreeContainer1.appendChild(degreeCard1);
degreeContainer2.classList.add("col-md-6");
degreeContainer2.classList.add("mb-3");
degreeContainer2.appendChild(degreeCard2);
degreeRow.prepend(degreeContainer1);
const addDegree = document.querySelector(".add-degree");
let added = false;
function inputDegree() {
document.querySelectorAll("degree-card").forEach(card => {
let cardHTML = card.shadowRoot;
let degreeDropdown = cardHTML.querySelector(".degree-dropdown");
let degreeShown = false;
degreeDropdown.addEventListener("click", () => {
if (!degreeShown) {
degreeDropdown.querySelector(".dropdown-menu").classList.add("show");
degreeShown = true;
} else {
degreeDropdown.querySelector(".dropdown-menu").classList.remove("show");
degreeShown = false;
}
});
let departmentDropdown = cardHTML.querySelector(".department-dropdown");
let departmentShown = false;
departmentDropdown.addEventListener("click", () => {
if (!departmentShown) {
departmentDropdown
.querySelector(".dropdown-menu")
.classList.add("show");
departmentShown = true;
} else {
departmentDropdown
.querySelector(".dropdown-menu")
.classList.remove("show");
departmentShown = false;
}
});
cardHTML.querySelector(".be").addEventListener("click", () => {
cardHTML.querySelector(
"#degree-button"
).innerHTML = cardHTML.querySelector(".be").innerHTML;
card.setAttribute("degree", cardHTML.querySelector(".be").innerHTML);
});
cardHTML.querySelector(".me").addEventListener("click", () => {
cardHTML.querySelector(
"#degree-button"
).innerHTML = cardHTML.querySelector(".me").innerHTML;
card.setAttribute("degree", cardHTML.querySelector(".me").innerHTML);
});
cardHTML.querySelector(".comp").addEventListener("click", () => {
cardHTML.querySelector("#dept-button").innerHTML = cardHTML.querySelector(
".comp"
).innerHTML;
card.setAttribute(
"department",
cardHTML.querySelector(".comp").innerHTML
);
});
cardHTML.querySelector(".entc").addEventListener("click", () => {
cardHTML.querySelector("#dept-button").innerHTML = cardHTML.querySelector(
".entc"
).innerHTML;
card.setAttribute(
"department",
cardHTML.querySelector(".entc").innerHTML
);
});
cardHTML.querySelector(".mech").addEventListener("click", () => {
cardHTML.querySelector("#dept-button").innerHTML = cardHTML.querySelector(
".mech"
).innerHTML;
card.setAttribute(
"department",
cardHTML.querySelector(".mech").innerHTML
);
});
});
}
inputDegree();
addDegree.addEventListener("click", () => {
if (!added) {
if (addDegree.className.includes("add-degree")) {
degreeRow.prepend(degreeContainer2);
addDegree.className = "btn btn-danger remove-degree";
addDegree.firstElementChild.className = "fa fa-minus";
added = true;
inputDegree();
}
} else {
if (addDegree.className.includes("remove-degree")) {
degreeRow.removeChild(degreeContainer2);
addDegree.className = "btn btn-success add-degree";
addDegree.firstElementChild.className = "fa fa-plus";
added = false;
inputDegree();
}
}
});
// Form Validation
const form = document.querySelector("#profile-form");
form.addEventListener("submit", e => {
const fullName = document.getElementById("name").value;
const userEmail = document.getElementById("email").value;
const city = document.getElementById("city").value;
const state = document.getElementById("state").value;
const country = document.getElementById("country").value;
const year = document.getElementById("year").value;
const degree1 = degreeCard1.getAttribute("degree");
const degree2 = degreeCard2.getAttribute("degree");
const department1 = degreeCard1.getAttribute("department");
const department2 = degreeCard2.getAttribute("department");
const bio = document.getElementById("bio").value;
const details = document.getElementById("details").value;
const twitter = document.getElementById("twitter").value;
const facebook = document.getElementById("facebook").value;
const linkedin = document.getElementById("linkedin").value;
const instagram = document.getElementById("instagram").value;
const github = document.getElementById("github").value;
let degreeDept1 =
(degree1 == null ? false : true) && (department1 == null ? false : true);
let degreeDept2 =
(degree2 == null ? false : true) && (department2 == null ? false : true);
if ((degreeDept1 || degreeDept2) && year.length != 4) {
e.preventDefault();
console.log("Form is still invalid");
} else {
e.preventDefault();
console.log("Form is valid");
let alumni;
alumni = {
personal: {
fullName,
userEmail,
city,
state,
country,
bio,
details,
year
},
academics: {
degrees: [degree1, degree2],
departments: [department1, department2]
},
socials: {
twitter,
facebook,
instagram,
linkedin,
github
}
};
if (dataURL != null) {
storage
.child(`alumni/${fullName.replace(" ", "_")}/`)
.putString(dataURL, "data_url")
.then(snapshot => {
console.log("Image Stored");
snapshot.ref
.getDownloadURL()
.then(url => {
alumni.personal.image = url;
})
.catch(e => {
console.error(e);
});
})
.catch(e => {
console.error(e);
});
}
database
.ref(`alumni/${fullName.replace(" ", "_")}/`)
.set(alumni)
.then(snap => {
console.log("Data Stored");
})
.catch(e => {
console.error(e);
});
}
});
<file_sep>/assets/js/yearbook/yearbook.js
import ClassChip from "../components/class-chip.js";
import AlumniCard from "../components/alumni-card.js";
import { database } from "../firebase/database.js";
customElements.define("class-chip", ClassChip);
customElements.define("alumni-card", AlumniCard);
let comp = [];
let entc = [];
let mech = [];
function setComp() {
for (let i = 21; i >= 4; i--) {
let classChip = document.createElement("class-chip");
classChip.setAttribute("year", 2000 + i);
classChip.setAttribute("id", `comp`);
comp.push(classChip);
}
for (let i = 0; i < comp.length; i++) {
document.getElementById(`tabs-departments-1`).appendChild(comp[i]);
}
}
function setEntc() {
for (let i = 21; i >= 4; i--) {
let classChip = document.createElement("class-chip");
classChip.setAttribute("year", 2000 + i);
classChip.setAttribute("id", `entc`);
entc.push(classChip);
}
for (let i = 0; i < entc.length; i++) {
document.getElementById(`tabs-departments-2`).appendChild(entc[i]);
}
}
function setMech() {
for (let i = 21; i >= 4; i--) {
let classChip = document.createElement("class-chip");
classChip.setAttribute("year", 2000 + i);
classChip.setAttribute("id", `mech`);
mech.push(classChip);
}
for (let i = 0; i < mech.length; i++) {
document.getElementById(`tabs-departments-3`).appendChild(mech[i]);
}
}
setComp();
setEntc();
setMech();
let currentSearchParams = {};
function getCompYear() {
let compButtons = document.querySelectorAll("#comp");
compButtons.forEach(compButton => {
compButton.addEventListener("click", () => {
if (history.pushState) {
let newurl =
window.location.protocol +
"//" +
window.location.host +
window.location.pathname +
`?dept=1&year=${compButton.getAttribute("year")}`;
window.history.pushState({ path: newurl }, "", newurl);
currentSearchParams = getAllUrlParams(window.location.href);
getAlumnus(currentSearchParams.dept, currentSearchParams.year);
}
});
});
}
function getEntcYear() {
let entcButtons = document.querySelectorAll("#entc");
entcButtons.forEach(entcButton => {
entcButton.addEventListener("click", () => {
if (history.pushState) {
let newurl =
window.location.protocol +
"//" +
window.location.host +
window.location.pathname +
`?dept=2&year=${entcButton.getAttribute("year")}`;
window.history.pushState({ path: newurl }, "", newurl);
currentSearchParams = getAllUrlParams(window.location.href);
getAlumnus(currentSearchParams.dept, currentSearchParams.year);
}
});
});
}
function getMechYear() {
let mechButtons = document.querySelectorAll("#mech");
mechButtons.forEach(mechButton => {
mechButton.addEventListener("click", () => {
if (history.pushState) {
let newurl =
window.location.protocol +
"//" +
window.location.host +
window.location.pathname +
`?dept=3&year=${mechButton.getAttribute("year")}`;
window.history.pushState({ path: newurl }, "", newurl);
currentSearchParams = getAllUrlParams(window.location.href);
getAlumnus(currentSearchParams.dept, currentSearchParams.year);
}
});
});
}
getCompYear();
getEntcYear();
getMechYear();
function getAllUrlParams(url) {
// get query string from url (optional) or window
var queryString = url ? url.split("?")[1] : window.location.search.slice(1);
// we'll store the parameters here
var obj = {};
// if query string exists
if (queryString) {
// stuff after # is not part of query string, so get rid of it
queryString = queryString.split("#")[0];
// split our query string into its component parts
var arr = queryString.split("&");
for (var i = 0; i < arr.length; i++) {
// separate the keys and the values
var a = arr[i].split("=");
// set parameter name and value (use 'true' if empty)
var paramName = a[0];
var paramValue = typeof a[1] === "undefined" ? true : a[1];
// (optional) keep case consistent
paramName = paramName.toLowerCase();
if (typeof paramValue === "string") paramValue = paramValue.toLowerCase();
// if the paramName ends with square brackets, e.g. colors[] or colors[2]
if (paramName.match(/\[(\d+)?\]$/)) {
// create key if it doesn't exist
var key = paramName.replace(/\[(\d+)?\]/, "");
if (!obj[key]) obj[key] = [];
// if it's an indexed array e.g. colors[2]
if (paramName.match(/\[\d+\]$/)) {
// get the index value and add the entry at the appropriate position
var index = /\[(\d+)\]/.exec(paramName)[1];
obj[key][index] = paramValue;
} else {
// otherwise add the value to the end of the array
obj[key].push(paramValue);
}
} else {
// we're dealing with a string
if (!obj[paramName]) {
// if it doesn't exist, create property
obj[paramName] = paramValue;
} else if (obj[paramName] && typeof obj[paramName] === "string") {
// if property does exist and it's a string, convert it to an array
obj[paramName] = [obj[paramName]];
obj[paramName].push(paramValue);
} else {
// otherwise add the property
obj[paramName].push(paramValue);
}
}
}
}
return obj;
}
let users = [];
let tempUsers = [];
async function fetchUsers(dept, year) {
let depts = [
"Computer Engineering",
"Electronics And Telecommunications",
"Mechanical Engineering"
];
await database
.ref("alumni/")
.once("value")
.then(snapshot => {
if (snapshot.val()) {
users = Object.values(snapshot.val());
tempUsers = [];
users.forEach(user => {
for (let i = 0; i < 2; i++) {
if (
user.personal.year == year &&
user.academics.departments[i] == depts[dept - 1]
)
tempUsers.push(user);
}
});
showAlumnus(tempUsers, dept);
}
});
}
function getAlumnus(dept, year) {
fetchUsers(dept, year);
}
function showAlumnus(users, dept) {
const row = document.createElement("div");
const departmentTab = document.getElementById(
`tabs-departments-${currentSearchParams.dept}`
);
while (departmentTab.firstChild) {
departmentTab.removeChild(departmentTab.firstChild);
}
row.setAttribute("class", "row");
for (let i = 0; i < users.length; i++) {
let container = document.createElement("div");
container.setAttribute("class", "col-md-6 col-lg-4 mt-5");
let alumniCard = document.createElement("alumni-card");
alumniCard.setAttribute(
"image",
users[i].personal.image == null
? `./assets/img/user-default.png`
: users[i].personal.image
);
alumniCard.setAttribute("year", users[i].personal.year);
alumniCard.setAttribute("name", users[i].personal.fullName);
alumniCard.setAttribute(
"degree",
`${users[i].academics.degrees[0]}, ${users[i].academics.departments[0]}`
);
alumniCard.setAttribute("city", `${users[i].personal.city}`);
alumniCard.setAttribute("id", `alumni-${dept + 1}-${i + 1}`);
container.appendChild(alumniCard);
row.appendChild(container);
alumniCard.addEventListener("mouseover", () => {
alumniCard.style.cursor = "pointer";
});
alumniCard.setAttribute("data-target", "#modal-default");
alumniCard.setAttribute("data-toggle", "modal");
}
departmentTab.appendChild(row);
for (let i = 0; i < users.length; i++) {
let card = document.getElementById(`alumni-${dept + 1}-${i + 1}`);
card.addEventListener("click", () => showUser(i));
}
}
function showUser(i) {
const alumniModal = document.querySelector(".alumni-modal");
// console.log(tempUsers[i]);
alumniModal.querySelector(".modal-title").innerHTML =
tempUsers[i].personal.fullName;
alumniModal
.querySelector(".user-image")
.setAttribute(
"src",
tempUsers[i].personal.image == null
? `./assets/img/user-default.png`
: tempUsers[i].personal.image
);
alumniModal.querySelector(
".year-degree"
).innerHTML = `${tempUsers[i].personal.year}, ${tempUsers[i].academics.degrees[0]}, ${tempUsers[i].academics.departments[0]}`;
alumniModal.querySelector(".city").innerHTML = tempUsers[i].personal.city;
alumniModal.querySelector(".bio").innerHTML =
tempUsers[i].personal.bio == "" ? "No bio" : tempUsers[i].personal.bio;
alumniModal.querySelector(".details").innerHTML =
tempUsers[i].personal.details == ""
? "No details"
: tempUsers[i].personal.details;
let socialKeys = Object.keys(tempUsers[i].socials);
let socialValues = Object.values(tempUsers[i].socials);
socialValues.forEach((v, i) => {
if (v != "") {
alumniModal
.querySelector(`.${socialKeys[i]}`)
.setAttribute("href", socialValues[i]);
alumniModal
.querySelector(`.${socialKeys[i]}`)
.setAttribute("target", "_blank");
} else {
alumniModal.querySelector(`.${socialKeys[i]}`).classList.add("d-none");
}
});
}
<file_sep>/assets/js/components/class-chip.js
const template = document.createElement("template");
template.innerHTML = `
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<button type="button" id="year-button" class="btn btn-outline-default rounded-pill mb-3 mt-3 ml-5 mr-5 px-5">Year</button>
`;
export default class CardChip extends HTMLElement {
constructor() {
super();
this.year = "";
}
connectedCallback() {
this.year = this.getAttribute("year") || this.year;
template.content.querySelector(".rounded-pill").innerHTML = this.year;
if (!this.shadowRoot) {
this.attachShadow({ mode: "open" });
this.shadowRoot.appendChild(template.content.cloneNode(true));
}
}
}
<file_sep>/assets/js/degree-card.js
const template = document.createElement("template");
template.innerHTML = `
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<div class="card shadow border-0">
<div class="card-body px-2">
<div class="dropdown degree-dropdown">
<button
class="btn btn-flat dropdown-toggle degree"
type="button"
id="degree-button"
data-toggle="dropdown"
aria-haspopup="true"
aria-expanded="false"
>
Select Degree
</button>
<div
class="dropdown-menu"
aria-labelledby="degree-button"
>
<p class="dropdown-item be">B.E.</p>
<p class="dropdown-item me">M.E.</p>
</div>
</div>
<br /><br />
<div class="dropdown department-dropdown">
<button
class="btn btn-flat dropdown-toggle degree"
type="button"
id="dept-button"
data-toggle="dropdown"
aria-haspopup="true"
aria-expanded="false"
>
Select Department
</button>
<div
class="dropdown-menu"
aria-labelledby="dept-button"
>
<p class="dropdown-item comp">Computer Engineering</p>
<p class="dropdown-item entc">Electronics and Telecommunications</p>
<p class="dropdown-item mech">Mechanical Engineering</p>
</div>
</div>
</div>
</div>
`;
export default class DegreeCard extends HTMLElement {
constructor() {
super();
this.degree = "";
this.department = "";
}
connectedCallback() {
this.degree = this.getAttribute("degree") || this.degree;
this.department = this.getAttribute("year") || this.department;
template.content.querySelector("#degree-button").innerHTML =
this.degree || "SELECT DEGREE";
template.content.querySelector("#dept-button").innerHTML =
this.department || "SELECT DEPARTMENT";
if (!this.shadowRoot) {
this.attachShadow({ mode: "open" });
this.shadowRoot.appendChild(template.content.cloneNode(true));
}
}
}
<file_sep>/gallery-upload.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
<meta name="description" content="Start your development with a Design System for Bootstrap 4." />
<meta name="author" content="<NAME>" />
<title>Argon Design System - Free Design System for Bootstrap 4</title>
<!-- Favicon -->
<link href="assets/img/brand/favicon.png" rel="icon" type="image/png" />
<!-- Fonts -->
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700" rel="stylesheet" />
<!-- Icons -->
<link href="assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link href="assets/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" />
<!-- Argon CSS -->
<link type="text/css" href="assets/css/argon.css?v=1.1.0" rel="stylesheet" />
<!-- Font-awesome cdn -->
<!-- <script src="https://kit.fontawesome.com/50376f70a4.js" crossorigin="anonymous"></script> -->
<body>
<?php
require "header.php";
?>
<div class="">
<input type="text" name="" placeholder="Album Name">
<div class="input-group input-group-alternative">
<div class="input-group-prepend">
<span class="input-group-text"><i class="ni ni-calendar-grid-58"></i></span>
</div>
<input class="form-control datepicker" placeholder="Select date" type="text" value="Event Date (MM/DD/YYYY)">
</div>
<input type="text" name="" placeholder="Event Description">
</div>
<div class="jumbotron"
style=" background-image: linear-gradient(to right, #9aceff ,#617be3,#4a69bb ); color: whitesmoke;">
<div class="mx-auto w-50 p-3">
<h1 style="color: whitesmoke; font-weight: normal;">Gallery Portal</h1>
<p>Portal for adding events and photos</p>
</div>
</div>
<div class="container w-25 p-3 ">
<form id="gal_form" >
<div class="form-group h6">
<label for="eventName">Album</label>
<input name="alb_name" type="text" class="form-control input-group input-group-alternative" id="eventName" aria-describedby="event-name"
placeholder="Please enter album name..">
<small id="emailHelp" class="form-text text-muted">Please add correct event-name!</small>
</div>
<div class="form-group">
<label for="eventDate">Event Date</label>
<div class="input-group input-group-alternative" id="eventDate">
<div class="input-group-prepend ">
<span class="input-group-text"><i class="ni ni-calendar-grid-58"></i></span>
</div>
<input class="form-control datepicker" placeholder="Select date" type="text" name="event_date">
</div>
</div>
<div class="form-group">
<label for="eventDescription">Please add event's Description</label>
<div class="input-group input-group input-group-alternative">
<div class="input-group-prepend ">
<span class="input-group-text"></span>
</div>
<textarea class="form-control" id="eventDescription" aria placeholder="Enter event's descripton"
label="With textarea" name="event_desc"></textarea>
</div>
<br/>
<div class="form-group">
<label>Choose Album Cover</label>
<div class="input-group input-group-alternative">
<input class="form-control" type="file"
id="cover"
required accept='image/x-png, image/jpeg'>
</div>
</div>
<div class="form-group">
<label>Select Photos for album</label>
<div class="input-group input-group-alternative">
<input class="form-control" type="file"
id="photos"
required multiple accept='image/x-png, image/jpeg'>
</div>
</div>
<button type="submit" class="btn btn-primary">Submit</button>
</form>
</div>
</body>
</head>
<!-- Core -->
<script src="assets/vendor/jquery/jquery.min.js"></script>
<script src="assets/vendor/popper/popper.min.js"></script>
<script src="assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="assets/vendor/headroom/headroom.min.js"></script>
<!-- Optional JS -->
<script src="assets/vendor/onscreen/onscreen.min.js"></script>
<script src="assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="assets/js/argon.js?v=1.1.0"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-storage.js"></script>
<script>
// Your web app's Firebase configuration
var firebaseConfig = {
apiKey: "<KEY>",
authDomain: "mescoe-alumni.firebaseapp.com",
databaseURL: "https://mescoe-alumni.firebaseio.com",
projectId: "mescoe-alumni",
storageBucket: "mescoe-alumni.appspot.com",
messagingSenderId: "332958193506",
appId: "1:332958193506:web:b218c9102289cb36f0fe3b"
};
// Initialize Firebase
firebase.initializeApp(firebaseConfig);
</script>
<script src="assets/js/gall_firebase.js"></script>
<script>
document.querySelector('nav').classList.add('bg-default')
</script>
</body>
</html>
<div class="jumbotron" style=" background-image: linear-gradient(to right, #9aceff ,#617be3,#4a69bb ); color: whitesmoke;
">
<div class="mx-auto w-50 p-3">
<h1>Gallery Portal</h1>
<p>Portal for adding events and photos</p>
</div>
</div><file_sep>/assets/js/gall_firebase.js
var database = firebase.database();
const form = document.querySelector("#gal_form");
const storage = firebase.storage();
const cover = document.getElementById("cover");
const photos = document.getElementById("photos");
window.coverImage = "";
window.photos = [];
cover.addEventListener("change", () => {
let reader = new FileReader();
reader.onload = e => {
window.coverImage = e.target.result;
};
reader.readAsDataURL(cover.files[0]);
});
photos.addEventListener("change", () => {
readmultifiles(photos.files);
});
let photosArray = [];
function readmultifiles(files) {
var reader = new FileReader();
function readFile(index) {
if (index >= files.length) return;
var file = files[index];
reader.onload = function(e) {
// get file content
photosArray.push(e.target.result);
// do sth with bin
readFile(index + 1);
};
reader.readAsDataURL(file);
}
readFile(0);
}
function storeGallery(form, coverImage, photos) {
const formValues = {
album_name: form.elements["alb_name"].value,
event_date: form.elements["event_date"].value,
event_desc: form.elements["event_desc"].value
};
const photoURLS = [];
storage
.ref(`gallery/covers/cover-${formValues.album_name}/`)
.putString(coverImage, "data_url")
.then(snap => {
snap.ref.getDownloadURL().then(url => {
console.log("Stored Cover Image");
formValues.cover = url;
for (let i = 0; i < photos.length; i++) {
if (i == photos.length - 1) {
storage
.ref(`gallery/${formValues.album_name}/image-${i + 1}/`)
.putString(photos[i], "data_url")
.then(snap => {
snap.ref
.getDownloadURL()
.then(url => {
photoURLS.push(url);
formValues.photos = photoURLS;
database
.ref("gallery/")
.child(formValues.album_name)
.set(formValues)
.then(() => {
console.log("Stored Data");
})
.catch(e => {
console.log(e);
});
})
.catch(e => {
console.log(e);
});
})
.catch(e => {
console.log(e);
});
}
storage
.ref(`gallery/${formValues.album_name}/image-${i + 1}/`)
.putString(photos[i], "data_url")
.then(snap => {
snap.ref.getDownloadURL().then(url => {
photoURLS.push(url);
});
})
.catch(e => {
console.log(e);
});
}
});
})
.catch(e => {
console.log(e);
});
}
form.addEventListener("submit", function(e) {
e.preventDefault();
storeGallery(form, window.coverImage, photosArray);
});<file_sep>/assets/js/event-card.js
const template = document.createElement("template");
template.innerHTML = `
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<div class="card card-lift--hover shadow border-0">
<div class="px-3 py-3">
<img
alt="image"
class="img-center img-fluid"
style="width: 100%; height: 100%; object-fit: cover;"
/>
</div>
<div class="px-3 pb-3">
<h4 class="name"></h4>
<p class="year"></p>
</div>
</div>
`;
export default class EventCard extends HTMLElement {
constructor() {
super();
this.image = "";
this.name = "";
this.year;
this.id;
}
connectedCallback() {
this.image = this.getAttribute("image") || this.image;
this.name = this.getAttribute("name") || this.name;
this.year = this.getAttribute("year") || this.year;
this.id = this.getAttribute("id") || this.id;
template.content
.querySelector(".img-fluid")
.setAttribute("src", this.image);
template.content.querySelector(".name").innerHTML = this.name;
template.content.querySelector(".year").innerHTML = this.year;
if (!this.shadowRoot) {
this.attachShadow({ mode: "open" });
this.shadowRoot.appendChild(template.content.cloneNode(true));
}
}
}
<file_sep>/gallery-events.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width, initial-scale=1, shrink-to-fit=no"
/>
<meta
name="description"
content="Start your development with a Design System for Bootstrap 4."
/>
<meta name="author" content="Creative Tim" />
<title>Directory</title>
<!-- Favicon -->
<link
href="./assets/img/brand/college_logo.png"
rel="icon"
type="image/png"
/>
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
</head>
<body>
<header class="header-global">
<?php
require "header.php";
?>
</header>
<main>
<!--
=========================================================
* Argon Design System - v1.1.0
=========================================================
* Product Page: https://www.creative-tim.com/product/argon-design-system
* Copyright 2019 Creative Tim (https://www.creative-tim.com)
* Licensed under MIT (https://github.com/creativetimofficial/argon-dashboard/blob/master/LICENSE.md)
* Coded by Creative Tim
=========================================================
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -->
<div class="position-relative">
<section class="section section-lg">
<div class="container">
<div class="col-md-12">
<p class="h3 mb-3" id="Event-title"></p>
</div>
<div class="row justify-content-center">
<div class="col-lg-12">
<div class="row row-grid">
<!-- Used JS to add Components check alumni-card.js-->
</div>
</div>
</div>
</div>
</section>
</div>
<!-- Alumni Modal -->
<style type="text/css">
/*.modal-dialog{
max-height: 50%;
}*/
.img-responsive {
max-height: calc(100vh - 120px);
object-fit: contain;
}
.modal {
text-align:center;
}
.modal-dialog {
width: auto;
}
</style>
<div
class="modal fade photos-modal"
id="modal-default"
tabindex="-1"
role="dialog"
aria-labelledby="modal-default"
aria-hidden="true"
>
<div
class="modal-dialog modal-lg modal-dialog-centered modal-"
role="document"
>
<div class="modal-content">
<div class="modal-body px-1 py-1" id="modal-pic" >
</div>
<div class="modal-footer py-2" style="">
<button
type="button"
class="btn btn-link ml-auto text-primary"
data-dismiss="modal">
Close
</button>
</div>
</div>
</div>
</div>
</main>
<!-- Core -->
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<script src="./assets/vendor/popper/popper.min.js"></script>
<script src="./assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="./assets/vendor/headroom/headroom.min.js"></script>
<!-- Optional JS -->
<script src="./assets/vendor/onscreen/onscreen.min.js"></script>
<script src="./assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="./assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="./assets/js/argon.js?v=1.1.0"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-storage.js"></script>
<script src="assets/js/gallery-events.js" type="module"></script>
<script>
document.querySelector('nav').classList.add('bg-default')
</script>
</body>
</html>
<file_sep>/assets/js/profile/degrees.js
export default function degrees() {
window.degreeContainer1.classList.add("col-md-6");
window.degreeContainer1.classList.add("mb-3");
window.degreeContainer1.appendChild(window.degreeCard1);
window.degreeContainer2.classList.add("col-md-6");
window.degreeContainer2.classList.add("mb-3");
window.degreeContainer2.appendChild(window.degreeCard2);
window.degreeRow.prepend(window.degreeContainer1);
const addDegree = document.querySelector(".add-degree");
let added = false;
addDegree.addEventListener("click", () => {
if (!added) {
if (addDegree.className.includes("add-degree")) {
window.degreeRow.prepend(window.degreeContainer2);
addDegree.className = "btn btn-danger remove-degree";
addDegree.firstElementChild.className = "fa fa-minus";
added = true;
}
} else {
if (addDegree.className.includes("remove-degree")) {
window.degreeRow.removeChild(window.degreeContainer2);
addDegree.className = "btn btn-success add-degree";
addDegree.firstElementChild.className = "fa fa-plus";
added = false;
}
}
});
window.degreeCard1.shadowRoot
.querySelector(".degree")
.addEventListener("change", e => {
window.degree1 = e.target.value;
});
window.degreeCard1.shadowRoot
.querySelector(".department")
.addEventListener("change", e => {
window.department1 = e.target.value;
});
if (window.degreeCard2.shadowRoot) {
window.degreeCard2.shadowRoot
.querySelector(".degree")
.addEventListener("change", e => {
window.degree2 = e.target.value;
});
window.degreeCard2.shadowRoot
.querySelector(".department")
.addEventListener("change", e => {
window.department2 = e.target.value;
});
}
}
<file_sep>/profile.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width, initial-scale=1, shrink-to-fit=no"
/>
<meta
name="description"
content="Start your development with a Design System for Bootstrap 4."
/>
<meta name="author" content="<NAME>" />
<title>Profile</title>
<!-- Favicon -->
<link
href="./assets/img/brand/college_logo.png"
rel="icon"
type="image/png"
/>
<!-- Fonts -->
<link
href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700"
rel="stylesheet"
/>
<!-- Icons -->
<link href="./assets/vendor/nucleo/css/nucleo.css" rel="stylesheet" />
<link
href="./assets/vendor/font-awesome/css/font-awesome.min.css"
rel="stylesheet"
/>
<!-- Argon CSS -->
<link
type="text/css"
href="./assets/css/argon.css?v=1.1.0"
rel="stylesheet"
/>
<link rel="stylesheet" href="./assets/css/croppie.css" />
</head>
<body>
<header class="header-global">
<?php
require "header.php";
?>
</header>
<main>
<!--
=========================================================
* Argon Design System - v1.1.0
=========================================================
* Product Page: https://www.creative-tim.com/product/argon-design-system
* Copyright 2019 Creative Tim (https://www.creative-tim.com)
* Licensed under MIT (https://github.com/creativetimofficial/argon-dashboard/blob/master/LICENSE.md)
* Coded by Creative Tim
=========================================================
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -->
<div class="position-relative">
<section class="section section-lg">
<div class="container">
<form id="profile-form">
<div class="row">
<div class="col-md-12">
<p class="h3">Profile Details</p>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="name" class="col-form-label"
>Profile Picture</label
>
<div class="custom-file">
<input
type="file"
class="custom-file-input"
id="customFile"
/>
<label class="custom-file-label" for="customFile"
>Choose file</label
>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="upload-image"></div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="name" class="col-form-label">Full Name</label>
<input
type="text"
class="form-control"
id="name"
placeholder="<NAME>"
required
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="email" class="col-form-label">E-Mail</label>
<input
type="email"
class="form-control"
id="email"
placeholder="<EMAIL>"
required
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-3">
<div class="form-group">
<label for="country" class="col-form-label">Country</label>
<select
name="country"
class="countries form-control"
id="country"
required
>
<option value="">SELECT COUNTRY</option>
</select>
</div>
</div>
<div class="col-md-3">
<div class="form-group">
<label for="state" class="col-form-label">State</label>
<select
name="state"
class="states form-control"
id="state"
required
>
<option value="">SELECT STATE</option>
</select>
</div>
</div>
<div class="col-md-3">
<div class="form-group">
<label for="city" class="col-form-label">City</label>
<select
name="city"
class="cities form-control"
id="city"
required
>
<option value="">SELECT CITY</option>
</select>
</div>
</div>
</div>
<div class="row">
<div class="col-md-3">
<div class="form-group">
<label for="year" class="col-form-label"
>Year of graduation</label
>
<input
type="text"
class="form-control"
id="year"
placeholder="The year of passing"
required
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-12">
<div class="form-group">
<label for="degree" class="col-form-label">
Degree from M.E.S.C.O.E.</label
>
<br />
<div class="row degree-row">
<div class="col-md-3 my-auto">
<button
type="button"
class="btn btn-success add-degree"
>
<i class="fa fa-plus" aria-hidden="true"></i>
</button>
</div>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="bio" class="col-form-label">About You</label>
<textarea
class="form-control"
id="bio"
rows="3"
placeholder="Write something about yourself"
maxlength="100"
></textarea>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="details" class="col-form-label"
>Further Studies / Job Details</label
>
<textarea
class="form-control"
id="details"
rows="3"
placeholder="Your current situation as to study or job"
maxlength="100"
></textarea>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="twitter" class="col-form-label"
>Twitter Link</label
>
<input
pattern="^.*twitter.*$"
type="url"
class="form-control"
id="twitter"
placeholder="Your Twitter Page Link (Optional)"
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="facebook" class="col-form-label"
>Facebook Link</label
>
<input
pattern="^.*facebook.*$"
type="url"
class="form-control"
id="facebook"
placeholder="Your Facebook Page Link (Optional)"
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="linkedin" class="col-form-label"
>LinkedIn Link</label
>
<input
pattern="^.*linkedin.*$"
type="url"
class="form-control"
id="linkedin"
placeholder="Your LinkedIn Page Link (Optional)"
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="instagram" class="col-form-label"
>Instagram Link</label
>
<input
pattern="^.*instagram.*$"
type="url"
class="form-control"
id="instagram"
placeholder="Your Instagram Profile Link (Optional)"
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="form-group">
<label for="github" class="col-form-label"
>Github Link</label
>
<input
pattern="^.*github.*$"
type="url"
class="form-control"
id="github"
placeholder="Your Github Profile Link (Optional)"
/>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6 text-left">
<input type="submit" class="btn btn-success" value="Save" />
</div>
</div>
</form>
</div>
</section>
</div>
</main>
<!-- Core -->
<script src="./assets/vendor/jquery/jquery.min.js"></script>
<script src="./assets/vendor/popper/popper.min.js"></script>
<script src="./assets/vendor/bootstrap/bootstrap.min.js"></script>
<script src="./assets/vendor/headroom/headroom.min.js"></script>
<script src="./assets/js/croppie/croppie.min.js"></script>
<!-- Optional JS -->
<script src="./assets/vendor/onscreen/onscreen.min.js"></script>
<script src="./assets/vendor/nouislider/js/nouislider.min.js"></script>
<script src="./assets/vendor/bootstrap-datepicker/js/bootstrap-datepicker.min.js"></script>
<!-- Argon JS -->
<script src="./assets/js/argon.js?v=1.1.0"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-app.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-database.js"></script>
<script src="https://www.gstatic.com/firebasejs/7.6.1/firebase-storage.js"></script>
<script src="./assets/js/profile/profile.js" type="module"></script>
<script>
document.querySelector('nav').classList.add('bg-default')
</script>
</body>
</html>
| ba86591b10c2dcdae99e764f3b2b1bdc4c860f09 | [
"JavaScript",
"Markdown",
"PHP"
] | 24 | JavaScript | yashgandhi876/MESCOE-Alumni-Portal | a361dae483b7c6664293d2a9588e29b8435a8f64 | 5f2543a84220fbde41f5f3736980f1ff6a0222c0 |
refs/heads/main | <repo_name>HelderOvalle11/Telecom<file_sep>/proyecto_1.py
'''
Universidad del Valle de Guatemala
Sistemas de telecomunicaciones 1
Proyecto 1
<NAME> 18291
<NAME> 18349
'''
import requests
import networkx
import networkx as nx
import matplotlib.pyplot as plt
import re
import sys
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import *
from tkscrolledframe import ScrolledFrame
import pydot
import pylab as plt
ventana = tk.Tk()
ventana.title('Proyecto 1 Sistemas de telecomunicaciones 1') #titulo
ventana.geometry('700x600') #ancho y alto de la ventana
#Creando 2 tabs, 1 para ingresar datos y otro para los resultados
tabControl = ttk.Notebook(ventana)
s = ttk.Style()
s.configure('new.TFrame', background='#0277BD')
tab1 = ttk.Frame(tabControl, style = 'new.TFrame')
tab2 = ttk.Frame(tabControl, style = 'new.TFrame')
tab3 = ttk.Frame(tabControl, style = 'new.TFrame')
tab4 = ttk.Frame(tabControl, style = 'new.TFrame')
tabControl.add(tab1, text ='Ingreso de datos')
tabControl.add(tab2, text ='Diagramas y resultados')
tabControl.add(tab3, text ='Mensajes Eventos')
tabControl.add(tab4, text ='Listado de ASN')
tabControl.pack(expand = 1, fill ="both")
sf = ScrolledFrame(tab3)
sf2 = ScrolledFrame(tab4)
sf.pack(side="top", expand=1, fill="both")
sf2.pack(side="top", expand=1, fill="both")
sf.bind_arrow_keys(tab3)
sf.bind_scroll_wheel(tab3)
sf2.bind_arrow_keys(tab4)
sf2.bind_scroll_wheel(tab4)
inner_frame = sf.display_widget(Frame)
inner_frame2 = sf2.display_widget(Frame)
G1 = nx.Graph()
G2 = nx.Graph()
G3 = nx.Graph()
#--------------------------------- MENSAJES DE BIENVENIDA ------------------------------------------------------------------------
bienvenida = tk.Label(tab1,text="Proyecto 1 Sistemas de telecomunicaciones 1",
bg="black",fg="white",font=("Verdana",16))
bienvenida2 = tk.Label(tab1,text="Integrantes: <NAME> 18291; <NAME> 18349",
bg="black",fg="white",font=("Verdana",10))
bienvenida.pack(fill=tk.X) #Ubicación del título
bienvenida2.pack(fill=tk.X)
#--------------------------------- IP ------------------------------------------------------------------------
etiqueta_ip = tk.Label(tab1,text="IP: ",
bg="black",fg="white") #Etiqueta para ingresar ip
etiqueta_ip.place(x=10, y=70) #Ubicación de la etiqueta_ip
entry_ip = ttk.Entry(tab1) #Caja de texto para ingresar ip
entry_ip.place(x=10, y=95) #Ubicación de la caja de texto
etiqueta_ip2 = tk.Label(tab1,text="La ip ingresada es: ",
bg="black",fg="white") #Etiqueta para confirmar la ip ingresada
etiqueta_ip2.place(x=150, y=95) #Ubicación de la etiqueta_ip2
def ingresar_ip():
ip1 = entry_ip.get()
etiqueta_ip2.config(text="La ip ingresada es: "+ip1)
return
B_ip = tk.Button(tab1, text ="Submit IP",
bg="#BDBDBD",fg="black",height = 2, width = 10,
command = ingresar_ip) #Boton para ingresar ip
B_ip.place(x=10, y=120) #Ubicación del botón
#------------------------ STARTIME Y ENDTIME ---------------------------------------------------------------------------------------
etiqueta_start = tk.Label(tab1,text="Tiempo de inicio (Año-Mes-DíaTHora:Min): ",
bg="black",fg="white") #Etiqueta para ingresar startime
etiqueta_end = tk.Label(tab1,text="Tiempo de finalización (Año-Mes-DíaTHora:Min): ",
bg="black",fg="white") #Etiqueta para ingresar startime
etiqueta_start.place(x=10, y=180) #Ubicación de startime
etiqueta_end.place(x=10, y=220) #Ubicación de endtime
entry_start = ttk.Entry(tab1) #Caja de texto para ingresar startime
entry_end = ttk.Entry(tab1) #Caja de texto para ingresar endtime
entry_start.place(x=250, y=180) #Ubicación de la caja de texto startime
entry_end.place(x=280, y=220) #Ubicación de la caja de texto endtime
etiqueta_start2 = tk.Label(tab1,text="Startime ingresado: ",
bg="black",fg="white") #Etiqueta para confirmar startime
etiqueta_end2 = tk.Label(tab1,text="Endtime ingresado: ",
bg="black",fg="white") #Etiqueta para confirmar endtime
etiqueta_start2.place(x=380, y=180) #Ubicación de la etiqueta_start2
etiqueta_end2.place(x=410, y=220) #Ubicación de la etiqueta_end2
def ingresar_tiempo():
startime = entry_start.get()
endtime = entry_end.get()
etiqueta_start2.config(text="Startime ingresado: "+startime)
etiqueta_end2.config(text="Endtime ingresado: "+endtime)
return
B_tiempo = tk.Button(tab1, text ="Submit Periodo de tiempo",
bg="#BDBDBD",fg="black",height = 2, width = 20, command = ingresar_tiempo) #Boton para ingresar tiempo
B_tiempo.place(x=10, y=250) #Ubicación del botón
etiqueta_tipow = tk.Label(inner_frame,text="No existe path en el timestamp:",bg="black",fg="white",font=("Verdana",16)) #Etiqueta para tipo w
etiqueta_tipow.pack(fill=tk.X)
#--------------------------------- BUSQUEDA ASN EN EVENTOS ------------------------------------------------------------------------
etiqueta_asn = tk.Label(tab1,text="ASN: ",
bg="black",fg="white") #Etiqueta para ingresar asn
etiqueta_asn.place(x=10, y=300) #Ubicación de asn
entry_asn = ttk.Entry(tab1) #Caja de texto para ingresar el asn
entry_asn.place(x=10, y=325) #Ubicación de la caja de texto asn
etiqueta_b_asn = tk.Label(tab1,text="ASN ingresado: ",
bg="black",fg="white") #Etiqueta para confirmar ASN
etiqueta_b_asn.place(x=140, y=325) #Ubicación de la etiqueta_b_asn
def encontrar_ASN():
b_ASN = entry_asn.get()
etiqueta_b_asn.config(text="ASN ingresado: "+b_ASN)
return
B_tiempo = tk.Button(tab1, text ="Submit ASN",
bg="#BDBDBD",fg="black",height = 2, width = 20, command = encontrar_ASN) #Boton para encontrar asn
B_tiempo.place(x=10, y=355) #Ubicación del botón
etiqueta_listado = tk.Label(inner_frame2,text="Listado de ASN:",bg="black",fg="white",font=("Verdana",16)) #Listado ASN
etiqueta_listado.pack(fill=tk.X)
def salir_programa():
ventana.destroy()
return
B_salir = tk.Button(tab1, text ="Salir del programa",
bg="#BDBDBD",fg="black",height = 2, width = 20, command = salir_programa) #Boton para salir del programa
B_salir.place(x=500, y=400) #Ubicación del botón
#------------------------ DEF RIPE Y BGP ---------------------------------------------------------------------------------------
def RIPE():
ip = entry_ip.get()
if not ip:
messagebox.showwarning( "WARNING", "No ha ingresado todos los datos")
elif ip:
url = 'https://stat.ripe.net/data/ris-peerings/data.json?resource={}'.format(ip)
resp = requests.get(url)
try:
cantidad = len(resp.json()['data']['peerings'])
#print(url)
for i in range(cantidad):
cont = len(resp.json()['data']['peerings'][i]['peers'])
# print(cont)
for x in range(cont):
as_path = resp.json()['data']['peerings'][i]['peers'][x]['routes']
A = ("%s\n" %as_path)
if A != "[]\n":
dato = [int(s) for s in re.findall(r'\b\d+\b', A)]
# print(dato)
nx.add_path(G1, dato)
plt.figure(1, figsize =(10, 7)) #Defino mi figura
ax1 = plt.gca()
ax1.set_title("Diagrama de anuncios para la ip: \n"+ip, fontsize = 25, weight = 25) #Coloco un titulo
nx.draw(G1,pos=nx.nx_pydot.pydot_layout(G1, prog="dot"),node_color='#FC947E', with_labels=True, font_size=7, node_size = 50)
#nx.draw(G1,pos=nx.spring_layout(G1),node_color='#FC947E', with_labels=True, font_size=10, node_size = 30)
#plt.savefig("RIPE")
plt.show()
plt.close()
except KeyError:
msg = messagebox.showerror( "ERROR", "Reviste los datos en la pestaña de Ingreso de Datos")
return
def BGP():
ip = entry_ip.get()
startime = entry_start.get()
endtime = entry_end.get()
if not ip and startime and endtime:
messagebox.showwarning( "WARNING", "No ha ingresado todos los datos")
elif ip and startime and endtime:
url = 'https://stat.ripe.net/data/bgplay/data.json?resource={}&starttime={}&endtime={}'.format(ip,startime,endtime)
resp = requests.get(url)
try:
cantidad = len(resp.json()['data']['events'])
for j in range(cantidad):
tipo = (resp.json()['data']['events'][j]['type'])
tipo_l = len(resp.json()['data']['events'][j]['type'])
time_stamp = (resp.json()['data']['events'][j]['timestamp'])
#print(tipo_l)
if tipo == "A":
path = (resp.json()['data']['events'][j]['attrs']['path'])
B = ("%s\n" %path)
if B != "[]\n":
dato = [int(s) for s in re.findall(r'\b\d+\b', B)]
mens = Message(inner_frame2,text=(str(dato[0])+"\n"+"-----------------------"+"\n"),bg="black",fg="white",width="400",font=("Verdana",12)) #mensaje para ASN
mens.pack(fill=tk.X)
nx.add_path(G2, dato)
elif tipo == "W":
# msg = messagebox.showinfo( "AVISO","No existe path en el timestamp: "+time_stamp+"\nes de tipo: "+tipo)
msg = Message(inner_frame,text=time_stamp+"\n"+"-----------------------"+"\n",bg="black",fg="white",width="400",font=("Verdana",16)) #mensaje para tipo w
msg.pack(fill=tk.X)
plt.figure(1, figsize =(10, 8)) #Defino mi figura
ax2 = plt.gca()
ax2.set_title("Diagrama de eventos para la ip: \n"+ip+"\nEn el período: \n"+startime+" - "+endtime, fontsize = 10, weight = 25) #titulo
nx.draw(G2,pos=nx.nx_pydot.pydot_layout(G2, prog="dot"),node_color='green', with_labels=True, font_size=10, node_size = 30)
#plt.savefig(grafo)
plt.show()
plt.close()
except (UnboundLocalError,KeyError):
msg2 = messagebox.showwarning( "WARNING", "No ha ingresado algún dato o Ingrese otro periodo de tiempo, ya que no existen path en eventos")
return
def BUSCAR_ASN():
ip = entry_ip.get()
startime = entry_start.get()
endtime = entry_end.get()
b_ASN = entry_asn.get()
I_ASN = []
T_ASN = []
if not ip and startime and endtime and b_ASN:
messagebox.showwarning( "WARNING", "No ha ingresado todos los datos")
elif ip and startime and endtime and b_ASN:
url = 'https://stat.ripe.net/data/bgplay/data.json?resource={}&starttime={}&endtime={}'.format(ip,startime,endtime)
resp = requests.get(url)
try:
cantidad = len(resp.json()['data']['events'])
for j in range(cantidad):
tipo = (resp.json()['data']['events'][j]['type'])
tipo_l = len(resp.json()['data']['events'][j]['type'])
time_stamp = (resp.json()['data']['events'][j]['timestamp'])
#print(tipo_l)
if tipo == "A":
path = (resp.json()['data']['events'][j]['attrs']['path'])
B = ("%s\n" %path)
if B != "[]\n":
dato = [int(s) for s in re.findall(r'\b\d+\b', B)]
T_ASN.append(dato) #todos los datos
I_ASN.append(dato[0]) #casilla del ASN
elif tipo == "W":
# msg = messagebox.showinfo( "AVISO","No existe path en el timestamp: "+time_stamp+"\nes de tipo: "+tipo)
msg = Message(inner_frame,text=time_stamp+"\n"+"-----------------------"+"\n",bg="black",fg="white",width="400",font=("Verdana",16)) #mensaje para tipo w
msg.pack(fill=tk.X)
if b_ASN in str(I_ASN):
for ASN in T_ASN:
if b_ASN in str(ASN[0]):
nx.add_path(G3, ASN)
plt.figure(1, figsize =(10, 5)) #Defino mi figura
ax3 = plt.gca()
ax3.set_title("Diagrama de eventos para el ASN: \n"+b_ASN+"\nEn el período: \n"+startime+" - "+endtime
,fontsize = 7, weight = 25) #titulo
nx.draw(G3,pos=nx.nx_pydot.pydot_layout(G3, prog="dot"), node_color='blue', with_labels=True, font_size=12, node_size = 28)
#plt.savefig(grafo)
plt.show()
plt.close()
else:
msg2= messagebox.showerror( "ERROR","Verificar los datos ingresados")
except (UnboundLocalError,KeyError):
msg3 = messagebox.showwarning( "WARNING", "No ha ingresado algún dato o Ingrese otro periodo de tiempo, ya que no existen path en eventos")
return
def EVENTOS():
ip = entry_ip.get()
startime = entry_start.get()
endtime = entry_end.get()
b_ASN = entry_asn.get()
I_ASN = []
T_ASN = []
if not ip and startime and endtime and b_ASN:
messagebox.showwarning( "WARNING", "No ha ingresado todos los datos")
elif ip and startime and endtime and b_ASN:
url = 'https://stat.ripe.net/data/bgplay/data.json?resource={}&starttime={}&endtime={}'.format(ip,startime,endtime)
resp = requests.get(url)
try:
cantidad = len(resp.json()['data']['events'])
for j in range(cantidad):
tipo = (resp.json()['data']['events'][j]['type'])
tipo_l = len(resp.json()['data']['events'][j]['type'])
time_stamp = (resp.json()['data']['events'][j]['timestamp'])
#print(tipo_l)
if tipo == "A":
path = (resp.json()['data']['events'][j]['attrs']['path'])
B = ("%s\n" %path)
if B != "[]\n":
dato = [int(s) for s in re.findall(r'\b\d+\b', B)]
nx.add_path(G2,dato)
T_ASN.append(dato) #todos los datos
I_ASN.append(dato[0]) #casilla del ASN
elif tipo == "W":
# msg = messagebox.showinfo( "AVISO","No existe path en el timestamp: "+time_stamp+"\nes de tipo: "+tipo)
msg = Message(inner_frame,text=time_stamp+"\n"+"-----------------------"+"\n",bg="black",fg="white",width="400",font=("Verdana",16)) #mensaje para tipo w
msg.pack(fill=tk.X)
if b_ASN in str(I_ASN):
for ASN in T_ASN:
if b_ASN in str(ASN[0]):
nx.add_path(G3, ASN)
plt.figure(1, figsize =(10, 8)) #Defino mi figura
ax3 = plt.gca()
ax3.set_title("Diagrama de eventos para la ip: \n"+ip+"\nEn el período: \n"+startime+" - "+endtime+"\nMarcando la ruta del ASN: "+b_ASN
,fontsize = 7, weight = 25) #titulo
nx.draw(G2,pos=nx.nx_pydot.pydot_layout(G2, prog="dot"),node_color='green', with_labels=True, font_size=10, node_size = 30)
nx.draw(G3,pos=nx.nx_pydot.pydot_layout(G2, prog="dot"), node_color='blue', with_labels=True, font_size=10, node_size = 30)
#plt.savefig(grafo)
plt.show()
plt.close()
else:
msg2= messagebox.showerror( "ERROR","Verificar los datos ingresados")
except (UnboundLocalError,KeyError):
msg3 = messagebox.showwarning( "WARNING", "No ha ingresado algún dato o Ingrese otro periodo de tiempo, ya que no existen path en eventos")
return
#------------------------ DIAGRAMA ANUNCIOS ----------------------------------------------------------------------------------------------------------------------------------------------------
B_anuncios = tk.Button(tab2, text ="Diagrama de anuncios",bg="#BDBDBD",fg="black",height = 2, width = 20, command = RIPE) #Boton para anuncios
B_anuncios.place(x=10, y=10) #Ubicación del botón
B_evento = tk.Button(tab2, text ="Diagrama de eventos", bg="#BDBDBD",fg="black",height = 2, width = 20, command = BGP) #Boton para eventos
B_evento.place(x=200, y=10) #Ubicación del botón
B_asn = tk.Button(tab2, text ="Diagrama para ASN en específico",bg="#BDBDBD",fg="black",height = 2, width = 30, command = BUSCAR_ASN) #Boton para ASN en específico
B_asn.place(x=10, y=110) #Ubicación del botón
B_asn2 = tk.Button(tab2, text ="Diagrama eventos y ASN",bg="#BDBDBD",fg="black",height = 2, width = 30, command = EVENTOS) #Boton para ASN en específico
B_asn2.place(x=10, y=210) #Ubicación del botón
#--------------------------------- mainloop -------------------------------------------------------------------------------------------------------------------------------
ventana.mainloop()
<file_sep>/README.md
# Telecom
proyecto_1
| aff346745703841e47b3932bb526cee408b37b22 | [
"Markdown",
"Python"
] | 2 | Python | HelderOvalle11/Telecom | 551894418625ec742f3ebfdce67d7e385916af91 | 63d0237f7878e5a47955a4f81603acfda1936492 |
refs/heads/master | <file_sep>package bancodados.model;
import bancodados.connection.ConectionFactory;
import usuarios.Atendente;
import javax.swing.*;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
public class AtendenteDAO {
public void create(Atendente atendente){
Connection con = ConectionFactory.getConnection();
PreparedStatement stmt = null;
try{
stmt = con.prepareStatement("INSERT INTO atendentes (nome, idade, areasaude, endereco, datavacinacao) " +
"VALUES (?, ?, ?, ?, ?)");
stmt.setString(1, atendente.getNome());
stmt.setInt(2, atendente.getIdade());
stmt.setBoolean(3, atendente.isAreasaude());
stmt.setString(4, atendente.getEndereco());
stmt.setString(5, atendente.getDatavacinacao());
stmt.executeUpdate();
JOptionPane.showMessageDialog(null, "Salvo com Sucesso!");
}catch (SQLException ex){
System.out.println(ex);
}finally {
ConectionFactory.closeConnection(con, stmt);
}
}
public List<Atendente> read(){
Connection con = ConectionFactory.getConnection();
PreparedStatement stmt = null;
ResultSet rs = null;
List<Atendente> atendenteList = new ArrayList();
try{
stmt = con.prepareStatement("SELECT * FROM atendentes");
rs = stmt.executeQuery();
while (rs.next()){
Atendente novo = new Atendente();
novo.set_id(rs.getInt("_id"));
novo.setNome(rs.getString("nome"));
novo.setIdade(rs.getInt("idade"));
novo.setEndereco(rs.getString("endereco"));
novo.setAreasaude(rs.getBoolean("areasaude"));
novo.setDatavacinacao(rs.getString("datavacinacao"));
atendenteList.add(novo);
}
}catch (SQLException ex){
System.out.println(ex);
}finally {
ConectionFactory.closeConnection(con, stmt, rs);
}
return atendenteList;
}
public void delete(Atendente atendente){
Connection con = ConectionFactory.getConnection();
PreparedStatement stmt = null;
try{
stmt = con.prepareStatement("DELETE FROM atendentes WHERE _id = ?");
stmt.setInt(1, atendente.get_id());
stmt.executeUpdate();
JOptionPane.showMessageDialog(null, "Excluído com sucesso!");
}catch (SQLException ex){
JOptionPane.showMessageDialog(null, "Erro ao excluir: " + ex);
}finally {
ConectionFactory.closeConnection(con, stmt);
}
}
}
| 8cbe295171a044e4fd647ec1ad9d3cc00281e7ba | [
"Java"
] | 1 | Java | Dracorike/Projeto-Vacina-o | 51d58f0e4a32c55027dd5fb971b6ba9fdd0f5adf | 33a04c47c57b217250863ae9576661cb6c04a1a2 |
refs/heads/master | <repo_name>camilaleniss/TorneoAdmin<file_sep>/TorneoApp/TorneoApp/Gui/CambiarCategoria.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.ControlUsers;
using TorneoApp.Model;
namespace TorneoApp.Gui
{
public partial class CambiarCategoria : Form
{
public CategoriasLists categorias { get; set; }
public int index;
public CambiarCategoria()
{
InitializeComponent();
}
public void InitializeCategorias(List<string> categorias)
{
foreach(string cat in categorias)
{
listCategorias.Items.Add(cat);
}
}
private void ButSaveChanges_Click(object sender, EventArgs e)
{
this.Dispose();
}
private void ListCategorias_SelectedIndexChanged(object sender, EventArgs e)
{
if (listCategorias.SelectedIndices.Count > 0)
{
this.index = listCategorias.SelectedIndices[0];
}
}
private void ButIniciar_Click(object sender, EventArgs e)
{
categorias.MoverA(index);
this.Dispose();
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/CatSanda.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class CatSanda : Categoria
{
//Aqui deberia ir el arbol de combates
public bool Genere { get; set; }
public List<Competidor> ParticipantesVencidos { get; set; }
public List<Combate> CombatesActivos { get; set; }
public void RondaDeCombates()
{
List<Combate> combates = new List<Combate>();
Random rand = new Random();
List<int> possible = Enumerable.Range(0, Participantes.Count).ToList();
for (int i = 0; i < Participantes.Count() / 2; i++)
{
int index = rand.Next(0, possible.Count);
Competidor p1 = Participantes[possible[index]];
possible.RemoveAt(index);
int index2 = rand.Next(0, possible.Count);
Competidor p2 = Participantes[possible[index2]];
possible.RemoveAt(index2);
Combate c = new Combate(p1, p2);
combates.Add(c);
}
if (possible.Count > 0)
{
//Solucion temporal a competidores impares
//Se debe crear una variable dummy para estas situaciones
Combate c = new Combate(Participantes[possible[0]], Participantes[possible[0]]);
c.Ganador = Participantes[possible[0]];
combates.Add(c);
}
this.CombatesActivos = combates;
}
public bool IsMan { get; set; }
public Competidor Atipico { get; set; }
public CatSanda()
{
Opened = true;
}
public override void CalcularMean(){
var Competidores = Participantes.ToArray();
double mean=0;
for (int i = 0; i < Competidores.Length; i++)
mean += (Competidores[i].GetMatchValueSanda());
Mean = mean / Competidores.Length;
}
public override double CalcularDesviacion(Competidor participante){
return Math.Abs(participante.GetMatchValueSanda() - Mean);
}
public override void GenerarNombre(string Nombre){
int MinEdad = GetMinEdad();
int MaxEdad = GetMaxEdad();
int MinPeso = GetMinPeso();
int MaxPeso = GetMaxPeso();
int Nivel = GetCategoriaMayoria();
string NivelCat = "";
switch (Nivel){
case Competidor.PRINCIPIANTE:
NivelCat = "Principiante";
break;
case Competidor.INTERMEDIO:
NivelCat = "Intermedio";
break;
case Competidor.AVANZADO:
NivelCat = "Avanzado";
break;
case Competidor.CINTANEGRA:
NivelCat = "Cinta Negra";
break;
}
this.Nombre = NivelCat+" de "+MinEdad+"-"+ MaxEdad+" años de "+MinPeso+"-"+MaxPeso+"kg";
}
public void CalcularAtipico(){
double Desv, DesvAtyp;
var Competidores = Participantes.ToArray();
int contador = 0;
Atipico = Competidores[contador];
DesvAtyp= CalcularDesviacion(Atipico);
do{
Desv = CalcularDesviacion(Competidores[contador]);
if(Desv >= DesvAtyp){
Atipico = Competidores[contador];
}
contador++;
}while(contador < Participantes.Count);
}
public List<CatSanda> GetSubCatPeso()
{
List<CatSanda> subcategorias = SepararPeso();
if (subcategorias.All(cat => cat.Participantes.Count >= 2))
return subcategorias;
return null;
}
public List<CatSanda> SepararPeso()
{
var CompetidoresCategoria = Participantes.ToArray();
Dictionary<int, List<Competidor>> Segmentacion = new Dictionary<int, List<Competidor>>();
for (int i = 0; i < MatchSanda.NUMSEGEDAD; i++)
Segmentacion.Add(i, new List<Competidor>());
for (int i = 0; i < CompetidoresCategoria.Length; i++)
{
int MatchValue = GetMatchPeso(CompetidoresCategoria[i], IsManCategory());
Segmentacion[MatchValue].Add(CompetidoresCategoria[i]);
}
List<CatSanda> CategoriasSegmentadas = ConvertDictionary(Segmentacion);
return CategoriasSegmentadas;
}
public List<CatSanda> ConvertDictionary(Dictionary<int, List<Competidor>> Abiertas)
{
List<CatSanda> CategoriasAbiertas = new List<CatSanda>();
for (int i = 0; i < MatchSanda.NUMSEGEDAD; i++)
{
CatSanda TempCat = new CatSanda();
TempCat.Participantes = Abiertas[i];
if (TempCat.Participantes.Count != 0)
CategoriasAbiertas.Add(TempCat);
}
return CategoriasAbiertas;
}
public int GetMatchPeso(Competidor c, bool isMan)
{
int Peso = c.Peso;
return isMan ? GetMatchPesoMan(Peso) : GetMatchPesoFem(Peso);
}
public int GetMatchPesoMan(int Peso)
{
if (Peso >= 30 && Peso <= 35) return 0;
if (Peso >= 36 && Peso <= 40) return 1;
if (Peso >= 41 && Peso <= 45) return 2;
if (Peso >= 46 && Peso <= 50) return 3;
if (Peso >= 51 && Peso <= 55) return 4;
if (Peso >= 56 && Peso <= 60) return 5;
if (Peso >= 61 && Peso <= 65) return 6;
if (Peso >= 66 && Peso <= 70) return 7;
if (Peso >= 71 && Peso <= 75) return 8;
if (Peso >= 76 && Peso <= 78) return 9;
return 10;
}
public int GetMatchPesoFem(int Peso)
{
if (Peso >= 30 && Peso <= 35) return 0;
if (Peso >= 36 && Peso <= 40) return 1;
if (Peso >= 41 && Peso <= 45) return 2;
if (Peso >= 46 && Peso <= 50) return 3;
if (Peso >= 51 && Peso <= 55) return 4;
if (Peso >= 56 && Peso <= 60) return 5;
if (Peso >= 61 && Peso <= 65) return 6;
if (Peso >= 66 && Peso <= 70) return 7;
return 8;
}
public bool IsManCategory()
{
IsMan = Participantes.FindAll(p => p.IsMan).Count > 0 ? true : false;
return IsMan;
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CompetidoresList.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class CompetidoresList : UserControl
{
public int Index { get; set; }
public MainWindow Main { get; set; }
public CompetidoresList()
{
InitializeComponent();
Index = 0;
}
public void InitializeListCompetidores(List<String> competidores)
{
foreach (String c in competidores)
listCompetidores.Items.Add(c);
}
public void InitializeInfoCompetidor(string nombre, string genero, string edad, string peso, string nivel,
string sanda, string formas, string cintanegra, string telefono, string escuela, string eps,
string acudiente, string telacudiente, List<Forma> listformas)
{
txtNombre.Text = nombre;
comboGenero.Text = genero;
txtEdad.Text = edad;
txtPeso.Text = peso;
comboNivel.Text = nivel;
comboSanda.Text = sanda;
comboFormas.Text = formas;
comboCinta.Text = cintanegra;
txtTelefono.Text = telefono;
txtEscuela.Text = escuela;
txtEPS.Text = eps;
txtAcudiente.Text = acudiente;
txtTelAcudiente.Text = telacudiente;
listFormas.Clear();
if (listformas != null)
{
foreach (Forma f in listformas)
this.listFormas.Items.Add(f.Nombre);
}
}
private void Panel2_Paint(object sender, PaintEventArgs e)
{
}
private void CompetidoresView_Load(object sender, EventArgs e)
{
}
private void ListCompetidores_SelectedIndexChanged(object sender, EventArgs e)
{
if (listCompetidores.SelectedIndices.Count > 0)
{
Index = listCompetidores.SelectedIndices[0];
this.Main.GetInfoCompetidor(Index);
}
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/EscuelasView.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class EscuelasView : UserControl
{
public MainWindow Main { get; set; }
public int IndexEscuela { get; set; }
public EscuelasView()
{
InitializeComponent();
}
public void InitializeEscuelas(List<String> escuelas)
{
foreach (String e in escuelas)
listEscuelas.Items.Add(e);
}
private void ListCompetidores_SelectedIndexChanged(object sender, EventArgs e)
{
}
public void InitializeCompetidores(List<Competidor> competidores, string nombre, int puntossanda, int puntosformas)
{
listCompetidores.Clear();
foreach (Competidor c in competidores)
listCompetidores.Items.Add(c.ToString());
labNomEscuela.Text = nombre;
labPuntosSanda.Text = puntossanda+"";
labPuntosFormas.Text = puntosformas+"";
}
private void EscuelasView_Load(object sender, EventArgs e)
{
}
private void ListEscuelas_SelectedIndexChanged(object sender, EventArgs e)
{
if (listEscuelas.SelectedIndices.Count > 0)
{
int index = listEscuelas.SelectedIndices[0];
IndexEscuela = index;
this.Main.GetCompetidoresEscuela(index);
}
}
private void Label4_Click(object sender, EventArgs e)
{
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CompetidoresController.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class CompetidoresController : UserControl
{
public MainWindow Main { get; set; }
public CompetidoresController()
{
InitializeComponent();
labNombreTorneo.Text = Torneo.NOMBRE_TORNEO;
}
private void ButListado_Click(object sender, EventArgs e)
{
Main.ShowView(MainWindow.LISTCOMPETIDORES);
}
private void ButVerificar_Click(object sender, EventArgs e)
{
Main.ShowView(MainWindow.VERIFICAR);
}
}
}
<file_sep>/TorneoApp/TorneoApp/Gui/MainWindow.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace TorneoApp.Model
{
public partial class MainWindow : Form
{
public const string INIT = "Init";
public const string CATEGORIAS = "Categorias";
public const string CATSANDA = "Categoria Sanda";
public const string CATFORMAS = "Categoria Formas";
public const string COMPETIDORES = "Competidores";
public const string LISTCOMPETIDORES = "Listado Competidores";
public const string VERIFICAR = "Verificar Asistencia";
public const string ESCUELAS = "Escuelas";
public const string LISTESCUELAS = "Listado Escuelas";
public const string RANKING = "Ranking Escuelas";
public const string COMPETENCIA = "Competencia";
public const string COMPSANDA = "Sanda";
public const string COMPFORMAS = "Formas";
private string categoria { get; set; }
private string subcategoria { get; set; }
private Torneo Torneo;
public MainWindow()
{
InitializeComponent();
menuLateral.Controlador = this;
categoria = "";
subcategoria = "";
}
private void MainWindow_Load(object sender, EventArgs e)
{
}
private void Banner1_Load(object sender, EventArgs e)
{
}
private void MainView1_Load(object sender, EventArgs e)
{
}
public void ShowView(string view)
{
this.panelView.Controls.Clear();
switch (view){
case INIT:
this.mainView = new ControlUsers.MainView();
this.mainView.controlButtons.Main = this;
this.panelView.Controls.Add(mainView);
this.categoria = "";
InitializeMainView();
break;
case CATEGORIAS:
this.categoriasview = new ControlUsers.CategoriasView();
this.categoriasview.Main = this;
this.categoriasview.controlButtons1.Main = this;
InitializeCategorias();
this.categoria = CATEGORIAS;
this.subcategoria = "";
this.panelView.Controls.Add(categoriasview);
break;
case COMPETIDORES:
this.competidorescontroller = new ControlUsers.CompetidoresController();
this.competidorescontroller.Main = this;
this.competidorescontroller.controlButtons1.Main = this;
this.categoria = COMPETIDORES;
this.subcategoria = "";
this.panelView.Controls.Add(competidorescontroller);
break;
case LISTCOMPETIDORES:
this.competidoresview = new ControlUsers.CompetidoresList();
this.competidoresview.Main = this;
InitializeCompetidoresList();
this.categoria = COMPETIDORES;
this.subcategoria = LISTCOMPETIDORES;
this.panelView.Controls.Add(competidoresview);
break;
case VERIFICAR:
this.importarview = new ControlUsers.ConfirmarView();
this.importarview.Main = this;
this.importarview.controlButtons1.Main = this;
InitializeVerificarView();
this.categoria = COMPETIDORES;
this.subcategoria = VERIFICAR;
this.panelView.Controls.Add(importarview);
break;
case ESCUELAS:
this.escuelascontroller = new ControlUsers.EscuelasController();
this.escuelascontroller.Main = this;
this.escuelascontroller.controlButtons1.Main = this;
this.categoria = ESCUELAS;
this.subcategoria = "";
this.panelView.Controls.Add(escuelascontroller);
break;
case LISTESCUELAS:
this.escuelasview = new ControlUsers.EscuelasView();
this.escuelasview.Main = this;
this.escuelasview.controlButtons1.Main = this;
InitializeEscuelasView();
this.categoria = ESCUELAS;
this.subcategoria = LISTESCUELAS;
this.panelView.Controls.Add(escuelasview);
break;
case RANKING:
this.rankingview = new ControlUsers.Ranking();
rankingview.Main = this;
InitializeRankingView();
this.categoria = ESCUELAS;
this.subcategoria = RANKING;
this.panelView.Controls.Add(rankingview);
break;
case COMPETENCIA:
this.competenciaview = new ControlUsers.CompetenciaController();
this.competenciaview.Main = this;
this.competenciaview.controlButtons1.Main = this;
this.categoria = COMPETENCIA;
this.subcategoria = "";
this.panelView.Controls.Add(competenciaview);
break;
case COMPSANDA:
this.compsanda = new ControlUsers.CompSanda();
this.compsanda.Main = this;
InitializeCompetenciaSanda();
this.categoria = COMPETENCIA;
this.subcategoria = COMPSANDA;
this.panelView.Controls.Add(compsanda);
break;
case COMPFORMAS:
this.compformas = new ControlUsers.CompFormas();
this.compformas.Main = this;
InitializeCompetenciaFormas();
this.categoria = COMPETENCIA;
this.subcategoria = COMPFORMAS;
this.panelView.Controls.Add(compformas);
break;
}
if(view.Equals(CATSANDA) || view.Equals(CATFORMAS))
{
this.categoriaslist = new ControlUsers.CategoriasLists();
categoriaslist.IsFormas =view.Equals(CATFORMAS) ? true : false;
categoriaslist.SetNameCategorias();
categoriaslist.Window = this;
categoriaslist.controlButtons1.Main = this;
InitializeListCategorias();
this.panelView.Controls.Add(categoriaslist);
this.categoria = CATEGORIAS;
this.subcategoria = view.Equals(CATSANDA) ? CATSANDA : CATFORMAS;
}
UpdateLocation();
}
public void UpdateLocation()
{
menuLateral.UpdateLocation(categoria, subcategoria);
Banner.UpdateLocation(categoria, subcategoria);
}
public void InitializeListCategorias()
{
bool IsFormas = categoriaslist.IsFormas;
if (IsFormas)
{
InitializeListCatFormas();
}
else
{
InitializeListCatSanda();
}
}
public void InitializeListCatFormas()
{
List<CatFormas> categorias = Torneo.CategoriasFormas;
foreach(CatFormas c in categorias)
{
categoriaslist.getListCategorias().Items.Add(c.Nombre);
}
}
public void InitializeListCatSanda()
{
List<CatSanda> categorias = Torneo.CategoriasSanda;
foreach (CatSanda c in categorias)
{
categoriaslist.getListCategorias().Items.Add(c.Nombre);
}
}
private void Banner_Load(object sender, EventArgs e)
{
}
public void SelectCategory(int index, bool IsFormas)
{
Categoria categoria = Torneo.SelectCategoria(index, IsFormas);
categoriaslist.LoadCompetidores(categoria.Participantes);
}
public void SelectCompetidor(int index, bool IsFormas, int indexcomp)
{
Competidor comp = Torneo.SelectCompetidor(index, indexcomp, IsFormas);
categoriaslist.LoadData(comp);
}
public void MoverA(int categoriaactual, int categorianueva, int indexcomp, bool IsFormas)
{
Torneo.MoverCompetidor(categoriaactual, categorianueva, indexcomp, IsFormas);
string view = IsFormas ? CATFORMAS : CATSANDA;
ShowView(view);
}
public List<String> ToStringCategorias(bool isformas)
{
return Torneo.ToStringCategorias(isformas);
}
public void InitializeMainView()
{
int catsanda = Torneo.CategoriasSanda.Count;
int catformas = Torneo.CategoriasFormas.Count;
int categorias = catsanda + catformas;
int ncompetidores = Torneo.Competidores.Count;
List<Escuela> escuelas = Torneo.Escuelas;
int nescuelas = escuelas.Count;
List<Forma> formas = Torneo.Formas;
int nformas = formas.Count;
mainView.SetNumberLabs(categorias, catsanda, catformas, ncompetidores, nescuelas, nformas);
mainView.InitializeListEscuelas(escuelas);
mainView.InitializeListFormas(formas);
}
public void InitializeCategorias()
{
InitializeLabsCategoriasView();
InitializeaListAllCategories();
}
public void InitializeLabsCategoriasView()
{
//Initialize Sanda
int numcat = Torneo.CategoriasSanda.Count;
int numopened = Torneo.GetNumOpenedCategoria(false);
int numfinished = numcat - numopened;
categoriasview.InitializeLabs(numcat, numopened, numfinished, false);
numcat = Torneo.CategoriasFormas.Count;
numopened = Torneo.GetNumOpenedCategoria(true);
numfinished = numcat - numopened;
categoriasview.InitializeLabs(numcat, numopened, numfinished, true);
}
public void InitializeaListAllCategories()
{
List<String> AllCategories = Torneo.GetAllCategoriesNames();
categoriasview.InitializeListCategorias(AllCategories);
}
public void InitializeEscuelasView()
{
List<String> Escuelas = Torneo.ToStringEscuelas();
this.escuelasview.InitializeEscuelas(Escuelas);
}
public void GetCompetidoresEscuela(int index)
{
List<Competidor> competidores = Torneo.GetCompetidoresEscuela(index);
string nombreescuela = Torneo.ToStringEscuelas()[index];
int puntossanda = Torneo.Escuelas.ToArray()[index].PSanda;
int puntosformas = Torneo.Escuelas.ToArray()[index].PFormas;
escuelasview.InitializeCompetidores(competidores, nombreescuela, puntossanda, puntosformas);
}
public void InitializeCompetidoresList()
{
var Competidores = Torneo.ToStringCompetidor();
competidoresview.InitializeListCompetidores(Competidores);
GetInfoCompetidor(0);
}
public void InitializeVerificarView()
{
var Competidores = Torneo.CompetidoresAusentes();
importarview.InitializeCompetidores(Competidores);
}
public void GetInfoCompetidor(int index)
{
Competidor c = Torneo.Competidores.ToArray()[index];
string nombre = c.Name;
string genero = c.GetStringGenere();
string edad = c.Edad + "";
string peso = c.Peso + "";
string nivel = c.getNivel();
string sanda = c.Sanda ? "Sí" : "No";
string formas = c.Formas ? "Sí" : "No";
string cinta = c.IsBlackBelt ? "Sí" : "No";
string telefono = c.TelefonoPersonal;
string eps = c.Eps;
string escuela = c.Escuela.Name;
string acudiente = c.ContactName;
string telacudiente = c.TelefonoAcudiente;
List<Forma> listformas = c.ListaFormas;
competidoresview.InitializeInfoCompetidor(nombre, genero, edad, peso, nivel, sanda, formas,
cinta, telefono, escuela, eps, acudiente, telacudiente, listformas);
}
public void ConfirmarCompetidores(List<int> indices)
{
Torneo.ConfirmarCompetidores(indices);
InitializeVerificarView();
}
//COMPETENCIA FORMAS
public void InitializeCompetenciaFormas()
{
var Categorias = Torneo.CategoriasFormas;
List<String> catstring = new List<String>();
foreach (var cat in Categorias)
catstring.Add(cat.Nombre);
compformas.InitCategories(catstring);
}
public void InitializeCompetenciaSanda()
{
var Categorias = Torneo.CategoriasSanda;
List<String> catString = new List<string>();
foreach(var category in Categorias)
{
catString.Add(category.Nombre);
compsanda.InitCategories(catString);
}
InitCombates(Categorias);
}
public void InitCombates(List<CatSanda> categoriasSanda)
{
foreach(var category in categoriasSanda)
{
category.RondaDeCombates();
}
}
public void MostrarRondas(int index)
{
CatSanda categoria = Torneo.CategoriasSanda.ToArray()[index];
List<Combate> tempCombates = categoria.CombatesActivos;
List<String> combates = new List<string>();
foreach(Combate c in tempCombates)
{
if(c.Ganador == null)
{
combates.Add(c.ToString());
}
}
compsanda.MostrarCombates(combates);
}
public void InitializePresentaciones(int index)
{
CatFormas categoria = Torneo.CategoriasFormas.ToArray()[index];
List<Presentacion> temppresentacion = categoria.PresentacionesCalificadas();
List<String> done = new List<String>();
foreach (Presentacion p in temppresentacion)
done.Add(p.Competidor.Name);
compformas.InitializeCalificadas(done);
temppresentacion = categoria.PresentacionesRestantes();
done.Clear();
foreach (Presentacion p in temppresentacion)
done.Add(p.Competidor.Name);
compformas.InitializeRestantes(done);
// (done.Count == 0) categoria.UpdatePuntos(true);
Torneo.SetPuntosEscuelas();
}
public void ShowPresentacion(String nombreCompetidor)
{
Competidor competidor = Torneo.BuscarCompetidor(nombreCompetidor);
CatFormas categoria = Torneo.CategoriasFormas.ToArray()[compformas.IndexCategoria];
Presentacion presentacion = categoria.BuscarPresentacion(competidor);
compformas.InitializePresentacion(presentacion.Jueces, presentacion.Calificacion);
if (presentacion.IsDone())
compformas.SetTextResult(presentacion.Calificacion);
}
public double SendCalificacionFormas(double[] jueces)
{
double Calificacion = Torneo.GetPromedio(jueces);
Competidor competidor = Torneo.BuscarCompetidor(compformas.NameCompetidor);
CatFormas categoria = Torneo.CategoriasFormas.ToArray()[compformas.IndexCategoria];
string [] podium = Torneo.GuardarPresentacion(categoria, competidor, jueces);
compformas.InitializePodium(podium);
return Calificacion;
}
public void SaveTorneo()
{
Torneo.Serializar(Torneo);
MessageBox.Show("El torneo ha sido guardado exitosamente");
}
public void RecuperarTorneo()
{
try
{
this.Torneo = Torneo.LeerTorneo();
MessageBox.Show("Torneo recuperado exitosamente");
}catch(Exception e)
{
MessageBox.Show("No se pudo recuperar el torneo anterior");
}
finally
{
InitializeTorneoState();
ShowView(INIT);
}
}
public void NuevoTorneo()
{
Torneo = new Torneo();
Torneo.InicializarTorneo();
InitializeTorneoState();
ShowView(INIT);
}
public void InitializeTorneoState()
{
this.menuLateral.ReviewState(Torneo.Iniciado);
}
public void IniciarTorneo()
{
Torneo.Iniciado = true;
InitializeTorneoState();
}
public void InicializarEscuela(String nombreescuela, bool IsFormas)
{
Escuela escuela = Torneo.BuscarEscuela(nombreescuela);
int oro=0, plata=0, bronce=0;
if (IsFormas)
{
oro = escuela.DictionaryFormas[Torneo.ORO].Count*Torneo.ORO;
plata = escuela.DictionaryFormas[Torneo.PLATA].Count * Torneo.PLATA;
bronce = escuela.DictionaryFormas[Torneo.BRONCE].Count * Torneo.BRONCE;
}
else
{
oro = escuela.DictionarySanda[Torneo.ORO].Count * Torneo.ORO;
plata = escuela.DictionarySanda[Torneo.PLATA].Count * Torneo.PLATA;
bronce = escuela.DictionarySanda[Torneo.BRONCE].Count * Torneo.BRONCE;
}
int puntostotales = IsFormas ? escuela.PFormas : escuela.PSanda;
rankingview.SetMedallas(oro, plata, bronce, puntostotales);
}
public void GetCompetidoresRanking(string nombreesucela, bool IsFormas, int medalla)
{
Escuela escuela = Torneo.BuscarEscuela(nombreesucela);
List<Competidor> competidores = IsFormas ? escuela.DictionaryFormas[medalla] :
escuela.DictionarySanda[medalla];
List<String> infocompetidores = new List<String>();
foreach (Competidor c in competidores)
infocompetidores.Add(c.ToString());
rankingview.InitializeCompetidores(infocompetidores);
}
public void InitializeRankingView()
{
List<Escuela> Formas = Torneo.RankingEscuelas(true);
List<Escuela> Sanda = Torneo.RankingEscuelas(false);
rankingview.InitializeRanking(Formas, true);
rankingview.InitializeRanking(Sanda, false);
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/MatchFormas.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
public class MatchFormas
{
public Dictionary<Forma, List<CatFormas>> CategoriasFormas;
public List<Competidor> Competidores;
public MatchFormas(List<Forma> FormasAbiertas, List<Competidor> Competidores)
{
CategoriasFormas = new Dictionary<Forma, List<CatFormas>>();
foreach(Forma formaabierta in FormasAbiertas)
{
CategoriasFormas.Add(formaabierta, new List<CatFormas>());
}
this.Competidores = Competidores;
/*
* Estrategia a utilizar:
* Primero se va a separar en cada KEY los competidores inscritos a cada forma
* Cada KEY es una Forma en particular
* Luego en cada KEY se van a crear dos listas de competidores, mayores y menores de edad
* Luego en cada lista de competidores se van a realizar mas separaciones que se agregaran a la lista
* Luego se van a verificar en tamaño si deben quedar así
* Las listas deben de estar ordenadas de manera que vaya aumentando su nivel
* Cuando ya se tengan las listas debe haber un método que devuelva la lista de categorias que
* resultaron del match.
* */
}
public List<CatFormas> DoMatch()
{
SepararFormas();
var Keys = CategoriasFormas.Keys.ToArray();
for (int i = 0; i<Keys.Length; i++)
{
var EdadesSeparadas = SepararEdades(CategoriasFormas[Keys[i]].First()).ToArray();
CategoriasFormas[Keys[i]].Clear();
for (int c=0; c<EdadesSeparadas.Length; c++)
{
List<CatFormas> Segmentacion;
if (EdadesSeparadas[c].IsMayorEdadCategory())
{
Segmentacion = SegmentarNivel(EdadesSeparadas[c]);
}
else
{
Segmentacion = SegmentarEdades(EdadesSeparadas[c]);
}
CategoriasFormas[Keys[i]].AddRange(Segmentacion);
}
var CategoriasDefinitivas = VerificarSizeCategorias(CategoriasFormas[Keys[i]]);
CategoriasFormas[Keys[i]] = CategoriasDefinitivas;
}
return RetornarCategorias();
}
/**
*Separa en el diccionario los competidores en cada una de las formas existentes para
* proceder a los demas filtros
*/
public void SepararFormas()
{
var Formas = CategoriasFormas.Keys;
foreach(Forma forma in Formas)
{
CatFormas NuevaCategoria = new CatFormas();
NuevaCategoria.Participantes = SepararForma(forma);
CategoriasFormas[forma].Add(NuevaCategoria);
}
}
/**
* Realiza una consulta para saber cuales son los competidores en una lista de competidores que
* en su lista de formas inscritas tienen la forma pasada como parámetro
*/
public List<Competidor> SepararForma(Forma Forma)
{
List<Competidor> CompetidoresForma = Competidores.FindAll(
competidores => competidores.ListaFormas.Exists(forma => forma.Equals(Forma)));
return CompetidoresForma;
}
public List<CatFormas> SepararEdades(CatFormas Categoria)
{
List<Competidor> Participantes = Categoria.Participantes;
var MenoresEdad = Participantes.FindAll( comp => comp.Edad < 18);
CatFormas CatMenores = new CatFormas();
CatMenores.Participantes = MenoresEdad;
var MayoresEdad = Participantes.FindAll(comp => comp.Edad >= 18);
CatFormas CatMayores = new CatFormas();
CatMayores.Participantes = MayoresEdad;
List<CatFormas> CategoriasEdades = new List<CatFormas>();
CategoriasEdades.Add(CatMenores);
CategoriasEdades.Add(CatMayores);
return CategoriasEdades;
}
public List<CatFormas> SegmentarEdades(CatFormas Categoria)
{
List<CatFormas> CategoriasSegmentadas = new List<CatFormas>();
var Participantes = Categoria.Participantes.ToArray();
List<Competidor>[] Grupos = new List<Competidor>[4];
for(int i =0; i<Grupos.Length; i++)
Grupos[i] = new List<Competidor>();
foreach(Competidor comp in Participantes)
{
//Verificar a qué grupo pertenece
int Edad = comp.Edad;
if (Edad>=6 && Edad <= 8)
{
Grupos[0].Add(comp);
}else if(Edad>8 && Edad<=11)
{
Grupos[1].Add(comp);
}
else if (Edad>11 && Edad <= 14)
{
Grupos[2].Add(comp);
}
else
{
Grupos[3].Add(comp);
}
}
for (int i = 0; i < Grupos.Length; i++)
{
if (Grupos[i].Count !=0)
{
CatFormas NuevaCategoria = new CatFormas();
NuevaCategoria.Participantes = Grupos[i];
CategoriasSegmentadas.Add(NuevaCategoria);
}
}
return CategoriasSegmentadas;
}
public List<CatFormas> SegmentarNivel(CatFormas Categoria)
{
List<CatFormas> CategoriasSegmentadas = new List<CatFormas>();
List<Competidor> Participantes = Categoria.Participantes;
for (int i=0; i<Competidor.CINTANEGRA; i++)
{
if (Participantes.Exists(comp => comp.TiempoEntrenando == i+1))
{
var Temp = Participantes.FindAll(participante => participante.TiempoEntrenando == i+1);
CatFormas TempCat = new CatFormas();
TempCat.Participantes = Temp;
CategoriasSegmentadas.Add(TempCat);
}
}
return CategoriasSegmentadas;
}
/**
*Toma como parametro las categorias que se tengan de una forma en específico
* Ejemplo, toma todas las categorias de Forma sin arma para verificar si el tamaño es correcto y
* hacer los debidos arreglos
*
* Para reubicar los participantes que quedaron en categorias incompletas, hace una lista con ellos
* y luego verifica en cual categoria puede caber
*/
public List<CatFormas> VerificarSizeCategorias(List<CatFormas> Categorias)
{
var CategoriasExistentes = Categorias.ToArray();
//Toma todas las categorias que estan incompletos
var PorArreglar = Categorias.FindAll(categorias => categorias.Participantes.Count < 3).ToArray();
for (int i = 0; i < CategoriasExistentes.Length; i++)
CategoriasExistentes[i].CalcularMean();
List<Competidor> Restantes = new List<Competidor>();
for (int i =0; i<PorArreglar.Length; i++)
Restantes.AddRange(PorArreglar[i].Participantes);
//Toma las categorias que quedaron habilitadas para meter participantes
List<CatFormas> CategoriasHabilitadas = Categorias.FindAll(categorias => categorias.Participantes.Count >= 3);
List<CatFormas> CategoriasDefinitivas = AnadirParticipantes(Restantes, CategoriasHabilitadas);
return CategoriasDefinitivas;
}
/*
public List<CatFormas> GetCategoriasHabilitadas(CatFormas[] CategoriasPorVer)
{
List<CatFormas> CategoriasHabilitadas = new List<CatFormas>();
for (int i = 0; i<CategoriasPorVer.Length; i++)
{
if (CategoriasPorVer[i].Participantes.Count >= 3)
CategoriasHabilitadas.Add(CategoriasPorVer[i]);
}
return CategoriasHabilitadas;
}
*/
public List<CatFormas> AnadirParticipantes (List<Competidor> Participantes, List<CatFormas> Categorias)
{
var CategoriasArray = Categorias.ToArray();
foreach(Competidor p in Participantes)
{
int index=0;
double MinDesv=1000000;
for (int i=0; i<CategoriasArray.Length; i++)
{
double TempDesv=CategoriasArray[i].CalcularDesviacion(p);
bool MismoRangoEdad = CategoriasArray[i].IsMayorEdadCategory() == (p.Edad >= 18);
if (TempDesv <= MinDesv && MismoRangoEdad)
{
MinDesv = TempDesv;
index = i;
}
}
CatFormas InsertHere = CategoriasArray[index];
InsertHere.AddCompetidor(p);
//Categorias.Insert(index, InsertHere);
}
return Categorias;
}
public List<CatFormas> RetornarCategorias()
{
List<CatFormas> CategoriasTorneo = new List<CatFormas>();
//Este método debe preparar cada categoria abierta por su nombre forma y caracteristica
//añadirlas en una lista y retornarlas
var Keys = CategoriasFormas.Keys.ToArray();
for (int i = 0; i < Keys.Length; i++)
{
var Categorias = CategoriasFormas[Keys[i]].ToArray();
for (int w = 0; w < Categorias.Length; w++)
{
Categorias[w].GenerarNombre(Keys[i].Nombre);
Categorias[w].InitializePresentaciones();
Categorias[w].Forma = Keys[i];
CategoriasTorneo.Add(Categorias[w]);
}
}
return CategoriasTorneo ;
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/Banner.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace TorneoApp.ControlUsers
{
public partial class Banner : UserControl
{
public Banner()
{
InitializeComponent();
}
public void UpdateLocation(string categoria, string subcategoria)
{
if (!categoria.Equals(""))
{
labArrow1.Visible = true;
labCategoria.Text = categoria;
labCategoria.Visible = true;
if (!subcategoria.Equals(""))
{
labArrow2.Visible = true;
labSubCat.Text = subcategoria;
labSubCat.Visible = true;
}
else
{
labArrow2.Visible = false;
labSubCat.Visible = false;
}
}
else
{
labArrow1.Visible = false;
labCategoria.Visible = false;
labArrow2.Visible = false;
labSubCat.Visible = false;
}
}
public void ModifyLocation(string[] location)
{
labInicio.Text = location[0];
if (location[1] != null)
{
labArrow1.Visible = true;
labCategoria.Visible = true;
labCategoria.Text = location[1];
}
else
{
labArrow1.Visible = false;
labCategoria.Visible = false;
}
if (location[2] != null)
{
labArrow2.Visible = true;
labSubCat.Visible = true;
labSubCat.Text = location[1];
}
else
{
labArrow2.Visible = false;
labSubCat.Visible = false;
}
}
private void Label1_Click(object sender, EventArgs e)
{
}
private void Label2_Click(object sender, EventArgs e)
{
}
private void Label3_Click(object sender, EventArgs e)
{
}
private void Label5_Click(object sender, EventArgs e)
{
}
private void Banner_Load(object sender, EventArgs e)
{
}
private void ButLogOut_Click(object sender, EventArgs e)
{
if (MessageBox.Show("¿Desea Salir?", "Salir", MessageBoxButtons.YesNo, MessageBoxIcon.Question, MessageBoxDefaultButton.Button1) == System.Windows.Forms.DialogResult.Yes)
{
Application.Exit();
}
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/ConfirmarView.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class ConfirmarView : UserControl
{
public MainWindow Main { get; set; }
public ConfirmarView()
{
InitializeComponent();
labNombreTorneo.Text = Torneo.NOMBRE_TORNEO;
}
public void InitializeCompetidores(List<Competidor> competidores)
{
listCompetidores.Clear();
foreach (Competidor c in competidores)
listCompetidores.Items.Add(c.Name);
}
private void Label1_Click(object sender, EventArgs e)
{
}
private void ImportarView_Load(object sender, EventArgs e)
{
}
private void ListCompetidores_SelectedIndexChanged(object sender, EventArgs e)
{
}
private void ButConfirmar_Click(object sender, EventArgs e)
{
var indices = listCompetidores.SelectedIndices;
List<int> indexpresentes = new List<int>();
foreach (var i in indices)
{
indexpresentes.Add(int.Parse(i.ToString()));
}
Main.ConfirmarCompetidores(indexpresentes);
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Presentacion.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Presentacion : IComparable<double>
{
public Competidor Competidor{get; set;}
public double Calificacion { get; set; }
public double[] Jueces { get; set; }
public Presentacion(Competidor Competidor)
{
this.Competidor = Competidor;
this.Calificacion= 0;
Jueces = new double[Torneo.NUM_JUECES];
}
public int CompareTo(double other)
{
if (Calificacion > other) return 1;
if (Calificacion < other) return -1;
return 0;
}
public bool IsDone()
{
return Calificacion == 0 ? false : true;
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CategoriasView.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class CategoriasView : UserControl
{
public MainWindow Main { get; set; }
public CategoriasView()
{
InitializeComponent();
labNombreTorneo.Text = Torneo.NOMBRE_TORNEO;
}
public void InitializeLabs (int numcat, int numopened, int numfinished, bool IsFormas)
{
if (IsFormas)
{
labNumExistsFormas.Text = ""+numcat;
labNumOpenedFormas.Text = ""+numopened;
labNumFinishedFormas.Text = ""+numfinished;
}
else
{
labNumExistsSanda.Text = "" + numcat;
labNumOpenedSanda.Text = "" + numopened;
labNumFinishedSanda.Text = "" + numfinished;
}
}
public void InitializeListCategorias (List<String> categorias)
{
foreach (String cat in categorias)
listCategorias.Items.Add(cat);
}
private void Label5_Click(object sender, EventArgs e)
{
}
private void Label6_Click(object sender, EventArgs e)
{
}
private void ControlButtons1_Load(object sender, EventArgs e)
{
}
private void ButVerSanda_Click(object sender, EventArgs e)
{
Main.ShowView("CatSanda");
}
private void Button1_Click(object sender, EventArgs e)
{
Main.ShowView("CatFormas");
}
private void ListCategorias_SelectedIndexChanged(object sender, EventArgs e)
{
}
}
}
<file_sep>/TorneoApp/TestTorneoApp/TestMatchFormas.cs
using Microsoft.VisualStudio.TestTools.UnitTesting;
using TorneoApp.Model;
using System;
using System.Collections.Generic;
namespace TestTorneoApp
{
[TestClass]
public class TestMatchFormas
{
private MatchFormas Matchmaking;
private Torneo Torneo;
public TestMatchFormas()
{
Torneo = new Torneo();
}
public void SetUpStage1()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest3.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
public void SetUpStage2()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest4.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
[TestMethod]
public void TestMatchCategorias()
{
SetUpStage1();
List<Forma> FormasAbiertas = Torneo.Formas;
List<Competidor> Competidores = Torneo.Competidores;
Matchmaking = new MatchFormas(FormasAbiertas, Competidores);
List<CatFormas> Categorias = Matchmaking.DoMatch();
List<CatFormas> MenoresEdad = Categorias.FindAll(cat => cat.IsMayorEdadCategory() == false);
List<CatFormas> MayoresEdad = Categorias.FindAll(cat => cat.IsMayorEdadCategory());
//Verificaciones primer caso
//Todas los competidores estan inscritos a la misma forma
//Hay 4 en cada categoria de menores de edad y 3 en la de mayores de edad
//Hay 4 categorias de menores de edad y una en mayores de edad
//Solo verifica menores
Assert.IsTrue(Categorias.Count == 8);
Assert.IsTrue(MayoresEdad.Count == 4);
Assert.IsTrue(MenoresEdad.Count == 4);
//Verificar Menores Edad
CatFormas tempcat = MenoresEdad[0];
Assert.IsTrue(tempcat.GetMinEdad() == 6);
Assert.IsTrue(tempcat.GetMaxEdad() == 8);
tempcat = MenoresEdad[1];
Assert.IsTrue(tempcat.GetMinEdad() == 9);
Assert.IsTrue(tempcat.GetMaxEdad() == 11);
tempcat = MenoresEdad[2];
Assert.IsTrue(tempcat.GetMinEdad() == 12);
Assert.IsTrue(tempcat.GetMaxEdad() == 14);
tempcat = MenoresEdad[3];
Assert.IsTrue(tempcat.GetMinEdad() == 15);
Assert.IsTrue(tempcat.GetMaxEdad() == 17);
//Verificar Mayores Edad
tempcat = MayoresEdad[0];
Assert.IsTrue(tempcat.Participantes.ToArray()[0].TiempoEntrenando == Competidor.PRINCIPIANTE);
tempcat = MayoresEdad[1];
Assert.IsTrue(tempcat.Participantes.ToArray()[0].TiempoEntrenando == Competidor.INTERMEDIO);
tempcat = MayoresEdad[2];
Assert.IsTrue(tempcat.Participantes.ToArray()[0].TiempoEntrenando == Competidor.AVANZADO);
tempcat = MayoresEdad[3];
Assert.IsTrue(tempcat.Participantes.ToArray()[0].TiempoEntrenando == Competidor.CINTANEGRA);
}
[TestMethod]
public void TestMatchVerificar()
{
SetUpStage2();
List<Forma> FormasAbiertas = Torneo.Formas;
List<Competidor> Competidores = Torneo.Competidores;
Matchmaking = new MatchFormas(FormasAbiertas, Competidores);
List<CatFormas> Categorias = Matchmaking.DoMatch();
List<CatFormas> MenoresEdad = Categorias.FindAll(cat => cat.IsMayorEdadCategory() == false);
List<CatFormas> MayoresEdad = Categorias.FindAll(cat => cat.IsMayorEdadCategory());
/*
* Se deberian formar las siguientes categorias:
* Forma sin arma:
* 4 categorias
* Dos categorias de menores de edad
* una de 6-9 aņos
* otra de 10-12 aņos
* Dos de mayores de edad
* Una con 4 PRINCIPIANTES y un INTERMEDIO
* Una con 4 AVANZADOS y un CINTANEGRA
* Forma con arma:
* 3 Categorias
* Una de menores de edad de 7-12 aņos
* Dos de mayores de edad
* Una con 3 INTERMEDIOS Y un PRINCIPIANTE
* Una con 4 CINTANEGRA y un AVANZADO
*/
Assert.IsTrue(Categorias.Count == 7);
Assert.IsTrue(MenoresEdad.Count == 3);
Assert.IsTrue(MayoresEdad.Count == 4);
List<CatFormas> TempCat = Categorias.FindAll(cat => cat.Forma.Nombre.Equals("Forma sin arma"));
Assert.IsTrue(TempCat.Count == 4);
var TempSubCat = TempCat.FindAll(cat => cat.IsMayorEdadCategory() == false).ToArray();
Assert.IsTrue(TempSubCat.Length == 2);
Assert.IsTrue(TempSubCat[0].Participantes.Count == 5);
Assert.IsTrue(TempSubCat[0].GetMinEdad() == 6);
Assert.IsTrue(TempSubCat[0].GetMaxEdad() == 10);
Assert.IsTrue(TempSubCat[1].Participantes.Count == 3);
Assert.IsTrue(TempSubCat[1].GetMinEdad() == 12);
Assert.IsTrue(TempSubCat[1].GetMaxEdad() == 14);
TempSubCat = TempCat.FindAll(cat => cat.IsMayorEdadCategory()).ToArray();
Assert.IsTrue(TempSubCat.Length == 2);
Assert.IsTrue(TempSubCat[0].Participantes.Count == 5);
Assert.IsTrue(TempSubCat[0].GetCategoriaMayoria() == 1);
Assert.IsTrue(TempSubCat[1].Participantes.Count == 5);
Assert.IsTrue(TempSubCat[1].GetCategoriaMayoria() == 3);
TempCat = Categorias.FindAll(cat => cat.Forma.Nombre.Equals("Forma con arma"));
Assert.IsTrue(TempCat.Count == 3);
TempSubCat = TempCat.FindAll(cat => cat.IsMayorEdadCategory() == false).ToArray();
Assert.IsTrue(TempSubCat.Length == 1);
Assert.IsTrue(TempSubCat[0].Participantes.Count == 5);
Assert.IsTrue(TempSubCat[0].GetMinEdad() == 7);
Assert.IsTrue(TempSubCat[0].GetMaxEdad() == 12);
TempSubCat = TempCat.FindAll(cat => cat.IsMayorEdadCategory()).ToArray();
Assert.IsTrue(TempSubCat.Length == 2);
Assert.IsTrue(TempSubCat[0].Participantes.Count == 4);
Assert.IsTrue(TempSubCat[0].GetCategoriaMayoria() == 2);
Assert.IsTrue(TempSubCat[1].Participantes.Count == 5);
Assert.IsTrue(TempSubCat[1].GetCategoriaMayoria() == 4);
}
}
}
<file_sep>/TorneoApp/TestTorneoApp/TestTorneo.cs
using Microsoft.VisualStudio.TestTools.UnitTesting;
using TorneoApp.Model;
using System;
using System.Collections.Generic;
namespace TestTorneoApp
{
[TestClass]
public class TestTorneo
{
private Torneo Torneo;
public TestTorneo()
{
Torneo = new Torneo();
}
public void SetUpStage1()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest1.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
public void SetUpStage2()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest2.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
public void SetUpStage3()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest3.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
[TestMethod]
public void TestLeerCSV()
{
string URL = "..\\..\\..\\Data\\RegistroTest1.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
string[][] datos = lectura.ToArray();
Assert.IsTrue(datos[0][2].Equals("<NAME>"));
}
[TestMethod]
public void TestLeerCompetidores()
{
Competidor[] competidores;
//Test 1
SetUpStage1();
competidores = Torneo.Competidores.ToArray();
Assert.IsTrue(competidores[0].Name.Equals("<NAME>"));
Assert.IsFalse(competidores[0].IsMan);
Assert.IsTrue(competidores[0].Escuela.Name.Equals("<NAME>"));
Assert.IsTrue(competidores[0].Edad == 18);
Assert.IsTrue(competidores[0].Peso == 50);
Assert.IsTrue(competidores[0].TiempoEntrenando == Competidor.INTERMEDIO);
Assert.IsTrue(competidores[0].Eps.Equals("Comfenalco"));
Assert.IsTrue(competidores[0].Sanda);
Assert.IsTrue(competidores[0].Formas);
Assert.IsFalse(competidores[0].IsBlackBelt);
Assert.IsTrue(competidores[0].TelefonoPersonal.Equals("3167527488"));
Assert.IsTrue(competidores[0].ContactName.Equals("<NAME>"));
Assert.IsTrue(competidores[0].TelefonoAcudiente.Equals("3173694663"));
//Test 2
SetUpStage2();
competidores = Torneo.Competidores.ToArray();
Assert.AreEqual(competidores.Length, 4);
Assert.AreEqual(Torneo.Escuelas.Count, 3);
Assert.AreEqual(Torneo.Formas.Count, 4);
Assert.IsTrue(competidores[0].Sanda);
Assert.IsTrue(competidores[0].Formas);
Assert.AreEqual(competidores[0].ListaFormas.Count, 2);
Assert.IsTrue(competidores[1].Sanda);
Assert.AreEqual(competidores[1].ListaFormas.Count, 0);
Assert.IsTrue(competidores[2].Formas);
Assert.AreEqual(competidores[2].ListaFormas.Count, 4);
Assert.IsTrue(competidores[3].Formas);
Assert.AreEqual(competidores[3].ListaFormas.Count, 1);
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/EscuelasController.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class EscuelasController : UserControl
{
public MainWindow Main { get; set; }
public EscuelasController()
{
InitializeComponent();
labNombreTorneo.Text = Torneo.NOMBRE_TORNEO;
}
private void ButListado_Click(object sender, EventArgs e)
{
Main.ShowView(MainWindow.LISTESCUELAS);
}
private void ButRanking_Click(object sender, EventArgs e)
{
Main.ShowView(MainWindow.RANKING);
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CompetenciaController.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class CompetenciaController : UserControl
{
public MainWindow Main { get; set; }
public CompetenciaController()
{
InitializeComponent();
labNombreTorneo.Text = Torneo.NOMBRE_TORNEO;
}
private void ButSanda_Click(object sender, EventArgs e)
{
Main.ShowView(MainWindow.COMPSANDA);
}
private void ButFormas_Click(object sender, EventArgs e)
{
Main.ShowView(MainWindow.COMPFORMAS);
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/MainView.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class MainView : UserControl
{
public MainView()
{
InitializeComponent();
labBienvenida.Text = "Bienvenido al dashboard del \n" + Torneo.NOMBRE_TORNEO;
}
private void Label1_Click(object sender, EventArgs e)
{
}
private void MainView_Load(object sender, EventArgs e)
{
}
private void LabCategorias_Click(object sender, EventArgs e)
{
}
private void LabNumCategorias_Click(object sender, EventArgs e)
{
}
private void ControlButtons1_Load(object sender, EventArgs e)
{
}
public void SetNumberLabs(int categorias, int catsanda, int catformas, int competidores, int escuelas, int formas)
{
this.labNumCat.Text = ""+categorias;
this.labSanda.Text = ""+catsanda;
this.labFormas.Text = "" + catformas;
this.labNumComp.Text = "" + competidores;
this.labNumEscuelas.Text = "" + escuelas;
this.labNumFormas.Text = "" + formas;
}
public void InitializeListEscuelas(List<Escuela> escuelas)
{
foreach (Escuela e in escuelas)
{
listEscuelas.Items.Add(e.Name);
}
}
public void InitializeListFormas(List<Forma> formas)
{
foreach(Forma f in formas)
{
listFormas.Items.Add(f.Nombre);
}
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Escuela.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Escuela
{
//El nombre de la escuela
public string Name { get; set; }
//Puntuación acumulada en formas
public int PFormas { get; set; }
//Puntuación acumulada en sanda
public int PSanda { get; set; }
public Dictionary<int, List<Competidor>> DictionarySanda { get; set; }
public Dictionary<int, List<Competidor>> DictionaryFormas { get; set; }
public Escuela (string Name)
{
this.Name = Name;
PFormas = 0;
PSanda = 0;
DictionarySanda = new Dictionary<int, List<Competidor>>();
DictionaryFormas = new Dictionary<int, List<Competidor>>();
DictionarySanda.Add(Torneo.ORO, new List<Competidor>());
DictionaryFormas.Add(Torneo.ORO, new List<Competidor>());
DictionarySanda.Add(Torneo.PLATA, new List<Competidor>());
DictionaryFormas.Add(Torneo.PLATA, new List<Competidor>());
DictionarySanda.Add(Torneo.BRONCE, new List<Competidor>());
DictionaryFormas.Add(Torneo.BRONCE, new List<Competidor>());
}
public void AddWinner (Competidor c, int medalla, bool IsFormas)
{
if (IsFormas)
{
DictionaryFormas[medalla].Add(c);
}
else
{
DictionarySanda[medalla].Add(c);
}
}
public void SetPuntos()
{
PFormas = 0;
PSanda = 0;
var Medallas = DictionaryFormas.Keys.ToArray();
for(int i =0; i<Medallas.Length; i++)
PFormas += Medallas[i] * DictionaryFormas[Medallas[i]].Count;
Medallas = DictionarySanda.Keys.ToArray();
for (int i = 0; i < Medallas.Length; i++)
PSanda += Medallas[i] * DictionarySanda[Medallas[i]].Count;
}
public void ClearDictionaries()
{
var Medallas = DictionaryFormas.Keys.ToArray();
for (int i = 0; i < Medallas.Length; i++)
DictionaryFormas[Medallas[i]].Clear();
Medallas = DictionarySanda.Keys.ToArray();
for (int i = 0; i < Medallas.Length; i++)
DictionarySanda[Medallas[i]].Clear();
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Torneo.cs
using ExcelDataReader;
using System;
using System.Collections.Generic;
using System.Data;
using System.IO;
using System.Runtime.Serialization.Formatters.Binary;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml;
namespace TorneoApp.Model
{
[Serializable]
public class Torneo
{
//Ruta del archivo registro del torneo
public const string CSV_ROUTE = "..\\..\\Data\\Registro.csv";
public const string TORNEO_ROUTE = "..\\..\\Data\\Torneo.dat";
public const string NOMBRE_TORNEO = "V Torneo de Wushu y Sanda";
public const int NUM_JUECES = 3;
public const int ORO = 5;
public const int PLATA = 3;
public const int BRONCE = 1;
//Lista de las categorias abiertas para formas
public List<CatFormas> CategoriasFormas { get; set; }
//Lista de las categorias abiertas para sanda
public List<CatSanda> CategoriasSanda { get; set; }
//Lista de las escuelas
public List<Escuela> Escuelas { get; set; }
//Lista de competidores
public List<Competidor> Competidores { get; set; }
//Lista de formas permitidas en el torneo
public List<Forma> Formas { get; set; }
public bool Iniciado { get; set; }
public Torneo()
{
CategoriasFormas = new List<CatFormas>();
CategoriasSanda = new List<CatSanda>();
Escuelas = new List<Escuela>();
Competidores = new List<Competidor>();
Formas = new List<Forma>();
Iniciado = false;
}
public void InicializarTorneo()
{
var Lectura = leerCSV(CSV_ROUTE);
LeerCompetidores(Lectura);
DoMatches();
}
public void DoMatches()
{
var tempComp = Competidores.FindAll(comp => comp.Formas);
MatchFormas matchformas = new MatchFormas(Formas, tempComp);
tempComp = Competidores.FindAll(comp => comp.Sanda);
MatchSanda matchsanda = new MatchSanda(tempComp);
CategoriasFormas = matchformas.DoMatch();
CategoriasSanda = matchsanda.DoMatch();
}
/*
* Este método Lee el archivos CSV de los competidores y retorna una lista de el split hecho a la linea leida
*/
public List<string[]> leerCSV(string path)
{
List<string[]> parsedData = new List<string[]>();
using (StreamReader readFile = new StreamReader(path))
{
string line=readFile.ReadLine();
string[] row;
while ((line = readFile.ReadLine()) != null)
{
row = line.Split(';');
parsedData.Add(row);
}
}
return parsedData;
}
public void LeerCompetidores (List<string[]> Competidores)
{
var CompetidoresTorneo = Competidores.ToArray();
for (int i = 0; i<CompetidoresTorneo.Length; i++)
{
string Email = CompetidoresTorneo[i][1].Trim();
string Name = CompetidoresTorneo[i][2].Trim(); ;
bool IsMan = CompetidoresTorneo[i][3].Equals("Masculino");
string NombreEscuela = CompetidoresTorneo[i][4].Trim(); ;
int Edad = Int32.Parse(CompetidoresTorneo[i][5]);
int Peso = Int32.Parse(CompetidoresTorneo[i][6]);
string Tiempo = CompetidoresTorneo[i][7];
string Eps = CompetidoresTorneo[i][8].Trim();
string[] Inscripcion = CompetidoresTorneo[i][9].Split(',');
string[] Formas = CompetidoresTorneo[i][10].Split(',');
bool IsBlackBelt = CompetidoresTorneo[i][11].Equals("Sí");
string TelefonoPersonal = CompetidoresTorneo[i][12].Trim();
string NombreAcudiente = CompetidoresTorneo[i][13].Trim();
string TelefonoAcudiente = CompetidoresTorneo[i][14].Trim();
//Realiza las verificaciones para crear el Competidor
Escuela EscuelaCompetidor = AddEscuela(NombreEscuela);
int TiempoEntrenando = GetTiempoEntrenando(Tiempo, IsBlackBelt);
Competidor NuevoCompetidor = new Competidor(Name, IsMan, Edad, Peso, IsBlackBelt,
TiempoEntrenando, Eps, TelefonoPersonal, NombreAcudiente, TelefonoAcudiente,
Email, EscuelaCompetidor);
Inscripcion = QuitarEspacios(Inscripcion);
Formas = QuitarEspacios(Formas);
InscribirCompetidor(NuevoCompetidor, Inscripcion, Formas);
}
}
public Escuela AddEscuela(string NombreEscuela)
{
string ActualEscuela = NombreEscuela.Trim();
Escuela EscuelaCompetidor = BuscarEscuela(ActualEscuela);
if(EscuelaCompetidor == null)
{
EscuelaCompetidor = new Escuela(NombreEscuela);
Escuelas.Add(EscuelaCompetidor);
}
return EscuelaCompetidor;
}
public bool ExisteEscuela (string EscuelaAgregar)
{
bool Existe = Escuelas.Any(escuelaactual => escuelaactual.Name.Equals(EscuelaAgregar));
return Existe;
}
public Escuela BuscarEscuela (string NombreEscuela)
{
if (ExisteEscuela(NombreEscuela))
{
Escuela Encontrada = Escuelas.Find(escuela => escuela.Name.Equals(NombreEscuela));
return Encontrada;
}
return null;
}
public int GetTiempoEntrenando(string Descripcion, bool IsBlackBelt)
{
if (IsBlackBelt) return Competidor.CINTANEGRA;
if (Descripcion.Equals("0-1 año")) return Competidor.PRINCIPIANTE;
if (Descripcion.Equals("1-3 años")) return Competidor.INTERMEDIO;
return Competidor.AVANZADO;
}
public string[] QuitarEspacios(string [] Arreglo)
{
for (int i = 0; i < Arreglo.Length; i++)
Arreglo[i] = Arreglo[i].Trim();
return Arreglo;
}
public void InscribirCompetidor (Competidor CompetidorActual, string[] Inscripcion, string[] Formas)
{
Competidores.Add(CompetidorActual);
for (int i=0; i<Inscripcion.Length; i++)
{
if (Inscripcion[i].Equals("Sanda"))
{
CompetidorActual.Sanda = true;
}
else
{
CompetidorActual.Formas = true;
InscribirFormas(CompetidorActual, Formas);
}
}
}
public void InscribirFormas(Competidor Competidor, string [] Formas)
{
for(int i = 0; i<Formas.Length; i++)
{
Forma FormaAInscribir = AddForma(Formas[i]);
Competidor.AddForma(FormaAInscribir);
}
}
public Forma AddForma(string NombreForma)
{
string FormaActual = NombreForma.Trim();
Forma FormaCompetidor = BuscarForma(NombreForma);
if (FormaCompetidor == null)
{
FormaCompetidor = new Forma(FormaActual);
Formas.Add(FormaCompetidor);
}
return FormaCompetidor;
}
public bool ExisteForma(string FormaAgregar)
{
bool Existe = Formas.Any(formaactual => formaactual.Nombre.Equals(FormaAgregar));
return Existe;
}
public Forma BuscarForma(string NombreForma)
{
if (ExisteForma(NombreForma))
{
Forma Encontrada = Formas.Find(forma => forma.Nombre.Equals(NombreForma));
return Encontrada;
}
return null;
}
public Competidor BuscarCompetidor (string NombreCompetidor)
{
Competidor Competidor = Competidores.Find(c => c.Name.Equals(NombreCompetidor));
return Competidor;
}
public double GetPromedio (double [] juez)
{
double promedio = 0;
for (int i = 0; i < juez.Length; i++)
promedio += juez[i];
promedio /= juez.Length;
return promedio;
}
public Categoria SelectCategoria(int index, bool IsFormas)
{
if (IsFormas)
{
return CategoriasFormas.ToArray()[index];
}
return CategoriasSanda.ToArray()[index];
}
public Competidor SelectCompetidor(int indexcat, int indexcomp, bool IsFormas)
{
Categoria cat = SelectCategoria(indexcat, IsFormas);
return cat.Participantes.ToArray()[indexcomp];
}
public void MoverCompetidor(int categoriaactual, int categorianueva, int indexcomp, bool IsFormas)
{
Categoria catantigua = SelectCategoria(categoriaactual, IsFormas);
Competidor comp = SelectCompetidor(categoriaactual, indexcomp, IsFormas);
catantigua.EliminarCompetidor(comp);
Categoria catnuevo = SelectCategoria(categorianueva, IsFormas);
catnuevo.AddCompetidor(comp);
}
public List<String> ToStringCategorias(bool isformas)
{
List<String> descrip = new List<String>();
if (isformas)
{
foreach (CatFormas cat in CategoriasFormas)
descrip.Add(cat.Nombre);
}
else
{
foreach (CatSanda cat in CategoriasSanda)
descrip.Add(cat.Nombre);
}
return descrip;
}
public int GetNumOpenedCategoria(bool IsFormas)
{
if (IsFormas)
return CategoriasFormas.FindAll(c => c.Opened).Count;
return CategoriasSanda.FindAll(c => c.Opened).Count;
}
public List<String> GetAllCategoriesNames()
{
List<String> Categorias = new List<String>();
foreach (CatSanda cat in CategoriasSanda)
Categorias.Add(cat.Nombre);
foreach (CatFormas cat in CategoriasFormas)
Categorias.Add(cat.Nombre);
return Categorias;
}
public List<String> ToStringEscuelas()
{
List<String> escuelas = new List<String>();
foreach (Escuela e in Escuelas)
escuelas.Add(e.Name);
return escuelas;
}
public List<String> ToStringCompetidor()
{
List<String> competidores = new List<String>();
foreach (Competidor e in Competidores)
competidores.Add(e.Name);
return competidores;
}
public List<Competidor> GetCompetidoresEscuela(int index)
{
String nombre = Escuelas.ToArray()[index].Name;
return Competidores.FindAll(comp => comp.Escuela.Name.Equals(nombre));
}
public List<Competidor> CompetidoresAusentes()
{
return Competidores.FindAll(competidor => competidor.IsHere == false);
}
public void ConfirmarCompetidores(List<int> indexes)
{
var restantes = CompetidoresAusentes().ToArray();
foreach(int i in indexes)
{
Competidor comp = restantes[i];
ConfirmarCompetidor(comp.Name);
}
}
public void ConfirmarCompetidor(String nombre)
{
Competidor competidor = BuscarCompetidor(nombre);
competidor.IsHere = true;
}
public string[] GuardarPresentacion(CatFormas categoria, Competidor competidor, double [] jueces)
{
Presentacion presentacion = categoria.BuscarPresentacion(competidor);
presentacion.Jueces = jueces;
double puntaje = GetPromedio(jueces);
presentacion.Calificacion = puntaje;
return categoria.UpdatePodium();
}
public static void Serializar(Torneo torneo)
{
FileStream file = new FileStream(TORNEO_ROUTE, FileMode.Create);
BinaryFormatter formatter = new BinaryFormatter();
formatter.Serialize(file, torneo);
file.Close();
}
public static Torneo LeerTorneo()
{
FileStream file = new FileStream(TORNEO_ROUTE, FileMode.Open);
BinaryFormatter formatter = new BinaryFormatter();
Torneo torneo = formatter.Deserialize(file) as Torneo;
file.Close();
return torneo;
}
public List<Escuela> RankingEscuelas(bool isFormas)
{
if (isFormas)
return Escuelas.OrderByDescending(e => e.PFormas).ToList();
return Escuelas.OrderByDescending(e => e.PSanda).ToList();
}
public void SetPuntosEscuelas()
{
//Elimina los competidores guardados anteriormente en los dictionarios
foreach (Escuela e in Escuelas)
e.ClearDictionaries();
//Agrega los nuevo competidores a los diccionarios
foreach (CatSanda cat in CategoriasSanda)
cat.UpdatePuntos(false);
foreach (CatFormas cat in CategoriasFormas)
cat.UpdatePuntos(true);
//Dar los puntos a cada escuela dependiendo de los competidores de los diccionarios
foreach (Escuela e in Escuelas)
e.SetPuntos();
}
}
}
<file_sep>/TorneoApp/TestTorneoApp/TestMatchSanda.cs
using Microsoft.VisualStudio.TestTools.UnitTesting;
using TorneoApp.Model;
using System;
using System.Collections.Generic;
using System.Diagnostics;
namespace TestTorneoApp
{
[TestClass]
public class TestMatchSanda
{
private MatchSanda Matchmaking;
private Torneo Torneo;
public TestMatchSanda()
{
Torneo = new Torneo();
}
public void SetUpStage1()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest5.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
public void SetUpStage2()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest6.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
public void SetUpStage3()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest7.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
public void SetUpStage4()
{
Torneo = new Torneo();
string URL = "..\\..\\..\\Data\\RegistroTest8.csv";
List<string[]> lectura = Torneo.leerCSV(URL);
Torneo.LeerCompetidores(lectura);
}
[TestMethod]
public void TestMatchCategorias()
{
SetUpStage1();
List<Competidor> Competidores = Torneo.Competidores;
Matchmaking = new MatchSanda(Competidores);
List<CatSanda> Categorias = Matchmaking.DoMatch();
Assert.IsTrue( Categorias.Count==20);
List<CatSanda> TempCat = Categorias.FindAll(categoria => categoria.IsMan == false);
Assert.IsTrue(TempCat.Count == 10);
Assert.IsTrue(TempCat.FindAll(categoria => categoria.IsMayorEdadCategory()).Count == 5);
TempCat = Categorias.FindAll(categoria => categoria.IsMan);
Assert.IsTrue(TempCat.Count == 10);
Assert.IsTrue(TempCat.FindAll(categoria => categoria.IsMayorEdadCategory()).Count == 5);
}
[TestMethod]
public void TestVerificarCategorias()
{
SetUpStage2();
List<Competidor> Competidores = Torneo.Competidores;
Matchmaking = new MatchSanda(Competidores);
List<CatSanda> Categorias = Matchmaking.DoMatch();
Assert.IsTrue(Categorias.Count == 2);
Assert.IsTrue(Categorias.FindAll(cat => cat.Participantes.Count == 3).Count == 2);
SetUpStage3();
Competidores = Torneo.Competidores;
Matchmaking = new MatchSanda(Competidores);
Categorias = Matchmaking.DoMatch();
Assert.IsTrue(Categorias.Count == 2);
Assert.IsTrue(Categorias.FindAll(cat => cat.Participantes.Count == 2).Count == 2);
SetUpStage4();
Competidores = Torneo.Competidores;
Matchmaking = new MatchSanda(Competidores);
Categorias = Matchmaking.DoMatch();
Assert.IsTrue(Categorias.Count == 2);
Assert.IsTrue(Categorias.FindAll(cat => cat.Participantes.Count == 2).Count == 2);
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CompFormas.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class CompFormas : UserControl
{
public MainWindow Main { get; set; }
public int IndexCategoria { get; set; }
public string NameCompetidor { get; set; }
public CompFormas()
{
InitializeComponent();
}
public void InitializePodium(string[] podium)
{
this.txtFirst.Text = podium[0];
this.txtSecond.Text = podium[1];
this.txtThird.Text = podium[2];
}
public void InitCategories(List<String> categorias)
{
foreach (String cat in categorias)
comboCategory.Items.Add(cat);
}
public void InitializeRestantes(List<String> restantes)
{
listRestantes.Clear();
foreach (String r in restantes)
listRestantes.Items.Add(r);
}
public void InitializeCalificadas(List<String> calificadas)
{
listHechas.Clear();
foreach (String c in calificadas)
listHechas.Items.Add(c);
}
public void InitializePresentacion (double[] jueces, double calificacion)
{
txtJuez1.Text = Math.Round(jueces[0], 1) +"";
txtJuez2.Text = Math.Round(jueces[1], 1) + "";
txtJuez3.Text = Math.Round(jueces[2], 1) + "";
txtResult.Text = Math.Round(calificacion, 1) + "";
}
private void ComboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
IndexCategoria = comboCategory.SelectedIndex;
Main.InitializePresentaciones(IndexCategoria);
}
private void CompFormas_Load(object sender, EventArgs e)
{
}
private void Button2_Click(object sender, EventArgs e)
{
}
private void ListRestantes_SelectedIndexChanged(object sender, EventArgs e)
{
if (listRestantes.SelectedIndices.Count > 0)
{
NameCompetidor = listRestantes.SelectedItems[0].Text;
ShowInfo();
}
}
public void SetTextResult(double result)
{
txtResult.Text = Math.Round(result, 1) + "";
}
private void ListHechas_SelectedIndexChanged(object sender, EventArgs e)
{
if (listHechas.SelectedIndices.Count > 0)
{
NameCompetidor = listHechas.SelectedItems[0].Text;
ShowInfo();
}
}
public void ShowInfo()
{
labNomCompetidor.Text = NameCompetidor;
Main.ShowPresentacion(NameCompetidor);
}
private void ButSave_Click(object sender, EventArgs e)
{
try
{
double[] jueces = new double[Torneo.NUM_JUECES];
jueces[0] = Double.Parse(txtJuez1.Text);
jueces[1] = Double.Parse(txtJuez2.Text);
jueces[2] = Double.Parse(txtJuez3.Text);
double Calificacion = Main.SendCalificacionFormas(jueces);
txtResult.Text = Math.Round(Calificacion, 1) + "";
Main.InitializePresentaciones(IndexCategoria);
}catch(Exception exp)
{
txtJuez1.Text = "";
txtJuez2.Text = "";
txtJuez3.Text = "";
MessageBox.Show("Datos no validos");
}
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CompSanda.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class CompSanda : UserControl
{
public MainWindow Main { get; set; }
public int IndexCategoria { get; set; }
public CompSanda()
{
InitializeComponent();
}
private void CompSanda_Load(object sender, EventArgs e)
{
}
public void InitCategories(List<string> categories)
{
foreach (string category in categories)
comboCategory.Items.Add(category);
}
public void MostrarCombates(List<string> combates)
{
listCombates.Clear();
foreach(string combate in combates)
{
listCombates.Items.Add(combate);
}
}
private void ComboCategory_SelectedIndexChanged(object sender, EventArgs e)
{
IndexCategoria = comboCategory.SelectedIndex;
Main.MostrarRondas(IndexCategoria);
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Competidor.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Competidor
{
//Categorias de los participantes según tiempo de entrenamiento
public const int PRINCIPIANTE = 1;
public const int INTERMEDIO = 2;
public const int AVANZADO = 3;
public const int CINTANEGRA = 4;
//Nombre completo competidor
public string Name { get; }
//Género true=hombre false=mujer
public bool IsMan { get; set; }
//Escuela a la que pertenece
public Escuela Escuela { get; set; }
//Edad
public int Edad { get; set; }
//Peso en kg
public int Peso { get; set; }
//Tiempo entrenando 1,2, 3 o 4
public int TiempoEntrenando { get; set; }
//Si está inscrito a Sanda
public bool Sanda { get; set; }
//Si está inscrito a Formas
public bool Formas { get; set; }
//A qué categorias de formas está inscrito
public List<Forma> ListaFormas { get; set; }
//Si es cinturon negro
public bool IsBlackBelt { get; set; }
//Nombre de la eps
public string Eps { get; set; }
public string TelefonoPersonal { get; set; }
//Nombre completo del acudiente
public string ContactName { get; set; }
//Telefono del acudiente
public string TelefonoAcudiente { get; set; }
//Email del competidor
public string Email { get; set; }
//Si está presente el competidor
public bool IsHere { get; set; }
public Competidor(string Name, bool IsMan, int Edad, int Peso, bool IsBlackBelt,
int TiempoEntrenando, string Eps, string TelefonoPersonal,
string ContactName, string TelefonoAcudiente, string Email, Escuela Escuela)
{
this.Name = Name;
this.IsMan = IsMan;
this.Edad = Edad;
this.Peso = Peso;
this.IsBlackBelt = IsBlackBelt;
this.TiempoEntrenando = TiempoEntrenando;
this.Eps = Eps;
this.TelefonoPersonal = TelefonoPersonal;
this.ContactName = ContactName;
this.TelefonoAcudiente = TelefonoAcudiente;
this.Email = Email;
this.Escuela = Escuela;
Sanda = false;
Formas = false;
IsHere = false;
ListaFormas = new List<Forma>();
}
public void AddForma (Forma Forma)
{
ListaFormas.Add(Forma);
}
public bool ExistForma(Forma Forma)
{
if (ListaFormas.Contains(Forma))
return true;
return false;
}
public int GetMatchValue()
{
if (Edad >= 18) return (TiempoEntrenando>2) ? TiempoEntrenando*2 : TiempoEntrenando;
return TiempoEntrenando + Edad;
}
//Para Sanda se hace una ponderación de la edad y el peso
//entre el nivel
public double GetMatchValueSanda(){
return (Edad+Peso)/TiempoEntrenando;
}
public string getNivel()
{
switch(TiempoEntrenando){
case Competidor.PRINCIPIANTE:
return "Principiante";
case Competidor.INTERMEDIO:
return "Intermedio";
case Competidor.AVANZADO:
return "Avanzado";
}
return "Cinta Negra";
}
public string GetStringGenere()
{
return IsMan ? "Masculino" : "Femenino";
}
public String ToString()
{
return Name + " / " + getNivel() + " / " + Peso + "kg / " + Edad + " años ";
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/ControlButtons.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class ControlButtons : UserControl
{
public MainWindow Main { get; set; }
public ControlButtons()
{
InitializeComponent();
}
private void ControlButtons_Load(object sender, EventArgs e)
{
}
private void ButSaveChanges_Click(object sender, EventArgs e)
{
Main.SaveTorneo();
}
private void ButIniciar_Click(object sender, EventArgs e)
{
Main.IniciarTorneo();
this.butIniciar.Enabled = false;
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Combate.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Combate
{
//Participantes del combate (2 participantes)
public Competidor[] Participantes { get; set; }
//Ganador del combate
public Competidor Ganador { get; set; }
//Rounds del combate
public List<Round> Rounds { get; set; }
public Combate(Competidor c1, Competidor c2) {
Participantes = new Competidor[2];
Participantes[0] = c1;
Participantes[1] = c2;
Rounds = new List<Round>();
}
public override String ToString()
{
return Participantes[0].Name + " vs " + Participantes[1].Name;
}
}
}
<file_sep>/TorneoApp/TorneoApp/Inicio.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp
{
public partial class Inicio : Form
{
public Inicio()
{
InitializeComponent();
}
private void ButNew_Click(object sender, EventArgs e)
{
MainWindow main = new MainWindow();
this.Visible = false;
main.Show();
main.NuevoTorneo();
}
private void ButLogOut_Click(object sender, EventArgs e)
{
if (MessageBox.Show("¿Desea Salir?", "Salir", MessageBoxButtons.YesNo, MessageBoxIcon.Question, MessageBoxDefaultButton.Button1) == System.Windows.Forms.DialogResult.Yes)
{
Application.Exit();
}
}
private void Inicio_Load(object sender, EventArgs e)
{
}
private void ButLast_Click(object sender, EventArgs e)
{
MainWindow main = new MainWindow();
this.Visible = false;
main.Show();
main.RecuperarTorneo();
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/CategoriasLists.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
using TorneoApp.Gui;
namespace TorneoApp.ControlUsers
{
public partial class CategoriasLists : UserControl
{
public MainWindow Window { private get; set; }
public bool IsFormas { get; set; }
public int IndexCat { get; set; }
public int IndexComp { get; set; }
public CategoriasLists()
{
InitializeComponent();
}
public void SetNameCategorias()
{
labCategoria.Text = IsFormas ? "Formas" : "Sanda";
}
private void Label4_Click(object sender, EventArgs e)
{
}
private void CategoriasLists_Load(object sender, EventArgs e)
{
}
private void ControlButtons1_Load(object sender, EventArgs e)
{
}
private void Label6_Click(object sender, EventArgs e)
{
}
public ListView getListCategorias()
{
return listCategorias;
}
private void ListCategorias_SelectedIndexChanged(object sender, EventArgs e)
{
if (listCategorias.SelectedIndices.Count > 0)
{
int index = listCategorias.SelectedIndices[0];
IndexCat = index;
this.Window.SelectCategory(index, IsFormas);
}
}
public void LoadCompetidores(List<Competidor> competidores)
{
listCompetidores.Clear();
foreach (Competidor c in competidores)
{
listCompetidores.Items.Add(c.ToString());
}
}
private void ListCompetidores_SelectedIndexChanged(object sender, EventArgs e)
{
if (listCompetidores.SelectedIndices.Count > 0)
{
int index = listCompetidores.SelectedIndices[0];
IndexComp = index;
this.Window.SelectCompetidor(IndexCat, IsFormas, index);
}
}
public void LoadData(Competidor comp)
{
labNombre.Text = comp.Name;
labEdad.Text = comp.Edad + " años";
labPeso.Text = comp.Peso + "kg";
labNivel.Text = comp.getNivel();
labEscuela.Text = comp.Escuela.Name;
}
private void ButCambiar_Click(object sender, EventArgs e)
{
CambiarCategoria cambiar = new CambiarCategoria();
cambiar.categorias = this;
cambiar.InitializeCategorias(Window.ToStringCategorias(IsFormas));
cambiar.Visible = true;
}
public void MoverA(int index)
{
this.Window.MoverA(IndexCat, index, IndexComp, IsFormas);
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Podium.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Podium
{
//Primer lugar
public Competidor FirstPlace { get; set;}
//Segundo lugar
public Competidor SecondPlace { get; set; }
//Tercer lugar
public Competidor ThirdPlace { get; set; }
public Podium()
{
FirstPlace = null;
SecondPlace = null;
ThirdPlace = null;
}
}
}
<file_sep>/README.md
# TorneoAdmin
<b>Aplicación de escritorio desarrollada para administrar torneos de Kung Fu.</b>
Incluye:<br>
1.Importación de competidores<br>
2.Match de categorias<br>
3.Organización de combates<br>
4.Manejo de puntuaciones<br>
5.Tablas de clasificación<br>
<file_sep>/TorneoApp/TorneoApp/ControlUsers/MenuLateral.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class MenuLateral : UserControl
{
public MainWindow Controlador { get; set; }
public MenuLateral()
{
InitializeComponent();
}
public void UpdateLocation(string categoria, string subcategoria)
{
InitBlancos();
var yellow = System.Drawing.Color.FromArgb(((int)(((byte)(255)))), ((int)(((byte)(186)))), ((int)(((byte)(73)))));
if (!categoria.Equals(""))
{
Pintar(categoria, yellow);
if (!subcategoria.Equals(""))
Pintar(subcategoria, yellow);
}
}
public void InitBlancos()
{
buttonTitle.ForeColor = Color.White;
butCategorias.ForeColor = Color.White;
butCatSanda.ForeColor = Color.White;
butCatFormas.ForeColor = Color.White;
butCompetidores.ForeColor = Color.White;
butListaCompetidores.ForeColor = Color.White;
butImportar.ForeColor = Color.White;
butEscuelas.ForeColor = Color.White;
butListaEscuelas.ForeColor = Color.White;
butRankingEscuelas.ForeColor = Color.White;
butCompetencia.ForeColor = Color.White;
butSanda.ForeColor = Color.White;
butFormas.ForeColor = Color.White;
}
public void Pintar(string Lugar, System.Drawing.Color Pintura)
{
switch (Lugar)
{
case (MainWindow.CATEGORIAS):
butCategorias.ForeColor = Pintura;
break;
case (MainWindow.CATFORMAS):
butCatFormas.ForeColor = Pintura;
break;
case (MainWindow.CATSANDA):
butCatSanda.ForeColor = Pintura;
break;
case (MainWindow.COMPETIDORES):
butCompetidores.ForeColor = Pintura;
break;
case (MainWindow.LISTCOMPETIDORES):
butListaCompetidores.ForeColor = Pintura;
break;
case (MainWindow.VERIFICAR):
butImportar.ForeColor = Pintura;
break;
case (MainWindow.ESCUELAS):
butEscuelas.ForeColor = Pintura;
break;
case (MainWindow.LISTESCUELAS):
butListaEscuelas.ForeColor = Pintura;
break;
case (MainWindow.RANKING):
butRankingEscuelas.ForeColor = Pintura;
break;
case (MainWindow.COMPETENCIA):
butCompetencia.ForeColor = Pintura;
break;
case (MainWindow.COMPFORMAS):
butFormas.ForeColor = Pintura;
break;
case (MainWindow.COMPSANDA):
butSanda.ForeColor = Pintura;
break;
}
}
private void ButtonTitle_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.INIT);
}
private void ButCatSanda_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.CATSANDA);
}
private void ButImportar_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.VERIFICAR);
}
private void ButCategorias_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.CATEGORIAS);
}
private void ButCatFormas_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.CATFORMAS);
}
private void ButCompetidores_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.COMPETIDORES);
}
private void ButListaCompetidores_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.LISTCOMPETIDORES);
}
private void ButEscuelas_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.ESCUELAS);
}
private void ButListaEscuelas_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.LISTESCUELAS);
}
private void ButRankingEscuelas_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.RANKING);
}
private void ButCompetencia_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.COMPETENCIA);
}
private void ButSanda_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.COMPSANDA);
}
private void ButFormas_Click(object sender, EventArgs e)
{
Controlador.ShowView(MainWindow.COMPFORMAS);
}
public void ReviewState(bool Iniciado)
{
if (!Iniciado)
{
butCompetencia.Enabled = false;
butSanda.Enabled = false;
butFormas.Enabled = false;
}
else
{
butCompetencia.Enabled = true;
butSanda.Enabled = true;
butFormas.Enabled = true;
}
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/MatchSanda.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
public class MatchSanda
{
public const int NUMSEGEDAD = 11;
public List<Competidor> Competidores;
public List<CatSanda> CategoriasSanda;
public MatchSanda(List<Competidor> Competidores)
{
this.Competidores = Competidores;
CategoriasSanda = new List<CatSanda>();
/*
*Estrategia a utilizar
*Primero se separa por género
*Luego se separa cada género por TiempoEntreno
*Luego se separa por Edad, de acuerdo a los RQ
*Se verifican las categorias habilitadas
*Se saca la media y el dato más atipico de cada
*categoria habilitada
*Insertan todos los datos más atyp en la lista
*de competidores restantes
*Se hace la estrategia para insertar competidores
*restantes
*Se retornan las categorias.
*/
}
public List<CatSanda> DoMatch(){
//Primero se hace la separación de hombres y mujeres
SepararGeneros();
var Generos = CategoriasSanda.ToArray();
//Se formaron dos grupos entonces cada uno a su vez se va a separar por nivel
for(int i=0; i<Generos.Length; i++){
List<CatSanda> CatDefinitivas = SepararNiveles(Generos[i]);
var ArrDefinitivas = CatDefinitivas.ToArray();
for(int w=0; w<ArrDefinitivas.Length; w++){
List<CatSanda> Segmentacion = SepararEdad(ArrDefinitivas[w]);
CatDefinitivas.Remove(ArrDefinitivas[w]);
CatDefinitivas.AddRange(Segmentacion);
}
CategoriasSanda.Remove(Generos[i]);
CategoriasSanda.AddRange(CatDefinitivas);
}
List<CatSanda> CategoriasFinales = VerificarSizeCategorias();
var ArrFinalVer = CategoriasFinales.ToArray();
for (int c = 0; c < ArrFinalVer.Length; c++)
{
List<CatSanda> SegPeso = ArrFinalVer[c].GetSubCatPeso();
if (SegPeso != null)
{
CategoriasFinales.Remove(ArrFinalVer[c]);
CategoriasFinales.AddRange(SegPeso);
}
}
CategoriasSanda.Clear();
CategoriasSanda.AddRange(CategoriasFinales);
return RetornarCategorias();
}
public void SepararGeneros(){
bool IsMan = false;
for (int i=0; i<2; i++){
CatSanda Categoria = new CatSanda();
Categoria.IsMan= IsMan;
List<Competidor> TempComp = Competidores.FindAll(competidor => competidor.IsMan == IsMan);
Categoria.Participantes = TempComp;
CategoriasSanda.Add(Categoria);
IsMan= true;
}
}
public List<CatSanda> SepararNiveles(CatSanda Categoria){
List<CatSanda> CategoriasSegmentadas = new List<CatSanda>();
List<Competidor> Participantes = Categoria.Participantes;
for (int i=0; i<Competidor.CINTANEGRA; i++){
if (Participantes.Exists(comp => comp.TiempoEntrenando== i+1)){
var Temp = Participantes.FindAll(participante => participante.TiempoEntrenando == i+1);
CatSanda TempCat = new CatSanda();
TempCat.Participantes = Temp;
CategoriasSegmentadas.Add(TempCat);
}
}
return CategoriasSegmentadas;
}
public List<CatSanda> SepararEdad(CatSanda Categoria){
var CompetidoresCategoria = Categoria.Participantes.ToArray();
Dictionary<int, List<Competidor>> Segmentacion = new Dictionary<int, List<Competidor>>();
for(int i=0; i<NUMSEGEDAD; i++)
Segmentacion.Add(i, new List<Competidor>());
for (int i =0; i<CompetidoresCategoria.Length; i++){
int MatchValue = GetMatchEdad(CompetidoresCategoria[i]);
//Revisar sí esto no borra un dato anterior
Segmentacion[MatchValue].Add(CompetidoresCategoria[i]);
}
List<CatSanda> CategoriasSegmentadas = ConvertDictionary(Segmentacion);
return CategoriasSegmentadas;
}
/*
public List<CatSanda> SepararPeso(CatSanda Categoria, bool isMan)
{
var CompetidoresCategoria = Categoria.Participantes.ToArray();
Dictionary<int, List<Competidor>> Segmentacion = new Dictionary<int, List<Competidor>>();
for (int i = 0; i < NUMSEGEDAD; i++)
Segmentacion.Add(i, new List<Competidor>());
for (int i = 0; i < CompetidoresCategoria.Length; i++)
{
int MatchValue = GetMatchPeso(CompetidoresCategoria[i], isMan);
Segmentacion[MatchValue].Add(CompetidoresCategoria[i]);
}
List<CatSanda> CategoriasSegmentadas = ConvertDictionary(Segmentacion);
return CategoriasSegmentadas;
}
*/
public int GetMatchEdad(Competidor c){
int Edad = c.Edad;
if (Edad>= 6 && Edad<=8) return 0;
if (Edad>=9 && Edad<=11) return 1;
if (Edad>=12 && Edad<=14) return 2;
if(Edad>=15 && Edad<=17) return 3;
if(Edad>=18 && Edad<=35) return 4;
if (Edad>=36 && Edad>=40) return 5;
if (Edad>=41 && Edad>=45) return 6;
if (Edad>=46 && Edad>=50) return 7;
if (Edad>=51 && Edad>=55) return 8;
if (Edad>=56 && Edad>=60) return 9;
return 10;
}
public List<CatSanda> ConvertDictionary(Dictionary<int, List<Competidor>> Abiertas){
List<CatSanda> CategoriasAbiertas = new List<CatSanda>();
for (int i =0; i<NUMSEGEDAD; i++){
CatSanda TempCat = new CatSanda();
TempCat.Participantes =Abiertas[i];
if (TempCat.Participantes.Count!=0)
CategoriasAbiertas.Add(TempCat);
}
return CategoriasAbiertas;
}
public List<CatSanda> VerificarSizeCategorias(){
List<CatSanda> Habilitadas = GetCategoriasHabilitadas();
if (Habilitadas.Count != CategoriasSanda.Count)
{
PrepararVerifacion(Habilitadas);
List<Competidor> CompetidoresRestantes = GetRestantes(Habilitadas);
Habilitadas = AnadirParticipantes(CompetidoresRestantes, Habilitadas);
}
return Habilitadas;
}
public void PrepararVerifacion(List<CatSanda> habilitadas){
foreach(CatSanda categoria in habilitadas){
categoria.CalcularMean();
categoria.CalcularAtipico();
}
}
public List<CatSanda> GetCategoriasHabilitadas(){
return CategoriasSanda.FindAll(categoria => categoria.Participantes.Count>=2);
}
public List<Competidor> GetRestantes(List<CatSanda> habilitadas){
List<CatSanda> CatRestantes = CategoriasSanda.FindAll(Categoria => Categoria.Participantes.Count <2);
List<Competidor> Restantes = new List<Competidor>();
foreach (CatSanda cat in CatRestantes)
Restantes.AddRange(cat.Participantes);
foreach(CatSanda cat in habilitadas){
Competidor Atyp = cat.Atipico;
cat.Participantes.Remove(Atyp);
Restantes.Add(Atyp);
}
return Restantes;
}
public List<CatSanda> AnadirParticipantes(List<Competidor> Participantes, List<CatSanda> Habilitadas){
var CategoriasArray = Habilitadas.ToArray();
foreach(Competidor p in Participantes)
{
int index=0;
double MinDesv=1000000;
for (int i=0; i<CategoriasArray.Length; i++)
{
double TempDesv=CategoriasArray[i].CalcularDesviacion(p);
bool MismoRangoEdad = CategoriasArray[i].IsMayorEdadCategory() == (p.Edad >= 18);
if (TempDesv <= MinDesv && MismoRangoEdad)
{
MinDesv = TempDesv;
index = i;
}
}
CatSanda InsertHere = CategoriasArray[index];
InsertHere.AddCompetidor(p);
}
return Habilitadas;
}
public List<CatSanda> RetornarCategorias()
{
foreach(CatSanda categoria in CategoriasSanda){
categoria.IsManCategory();
categoria.GenerarNombre("Combate");
}
return CategoriasSanda;
}
}
}
<file_sep>/TorneoApp/TorneoApp/ControlUsers/Ranking.cs
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using TorneoApp.Model;
namespace TorneoApp.ControlUsers
{
public partial class Ranking : UserControl
{
public MainWindow Main { get; set; }
public bool IsFormas { get; set; }
public String escuela { get; set; }
public Ranking()
{
InitializeComponent();
}
private void Ranking_Load(object sender, EventArgs e)
{
}
public void InitializeRanking(List<Escuela> lista, bool IsFormas)
{
if (IsFormas)
{
listFormas.Clear();
foreach (Escuela s in lista)
listFormas.Items.Add(s.Name);
}
else
{
listSanda.Clear();
foreach (Escuela s in lista)
listSanda.Items.Add(s.Name);
}
}
public void InitializeCompetidores(List<String> lista)
{
listCompetidores.Clear();
foreach (string s in lista)
listCompetidores.Items.Add(s);
}
private void ListFormas_SelectedIndexChanged(object sender, EventArgs e)
{
if (listFormas.SelectedIndices.Count > 0)
{
this.escuela = listFormas.SelectedItems[0].Text;
this.IsFormas = true;
labNomEscuela.Text = escuela;
Main.InicializarEscuela(escuela, IsFormas);
}
}
private void ListSanda_SelectedIndexChanged(object sender, EventArgs e)
{
if (listSanda.SelectedIndices.Count > 0)
{
this.escuela = listSanda.SelectedItems[0].Text;
this.IsFormas = false;
labNomEscuela.Text = escuela;
Main.InicializarEscuela(escuela, IsFormas);
}
}
private void LabNumOro_Click(object sender, EventArgs e)
{
}
public void SetMedallas(int oro, int plata, int bronce, int totales)
{
labNumOro.Text = "" + oro;
labNumPlata.Text = "" + plata;
labNumBronce.Text = "" + bronce;
labTotal.Text = "" + totales;
}
private void ButOro_Click(object sender, EventArgs e)
{
Main.GetCompetidoresRanking(escuela, IsFormas, Torneo.ORO);
}
private void ButPlata_Click(object sender, EventArgs e)
{
Main.GetCompetidoresRanking(escuela, IsFormas, Torneo.PLATA);
}
private void ButBronce_Click(object sender, EventArgs e)
{
Main.GetCompetidoresRanking(escuela, IsFormas, Torneo.BRONCE);
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Round.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Round
{
public int[] Puntajes { get; set; }
public int Ganador { get; set;}
public Round()
{
Puntajes = new int[2];
Ganador = -1;
}
public void CalcularGanador()
{
if (Puntajes[0] > Puntajes[1])
Ganador = 1;
else if (Puntajes[0] < Puntajes[1])
Ganador = 2;
else
Ganador = 0;
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Categoria.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Categoria
{
//Nombre de la categoria
public string Nombre { get; set; }
//Participantes de la categoria
public List<Competidor> Participantes { get; set; }
//Podium de la categoria
public Podium Podium { get; set; }
public bool Opened { get; set; }
public double Mean { get; set; }
public Categoria()
{
Participantes = new List<Competidor>();
Podium = new Podium();
Opened = true;
Mean = 0;
}
public virtual void CalcularMean()
{
//Este método tiene su propia implementación en CatFormas y CatSanda
}
public virtual double CalcularDesviacion(Competidor participante)
{
return -1;
//Este método tiene su propia implementación en CatFormas y CatSanda
}
public void AddCompetidor (Competidor competidor)
{
Participantes.Add(competidor);
CalcularMean();
}
public virtual void GenerarNombre(string Nombre)
{
this.Nombre = Nombre;
}
public bool IsMayorEdadCategory()
{
return Participantes.Exists(p => p.Edad >= 18);
}
public int GetMinEdad()
{
return Participantes.Select(par => par.Edad).Min();
}
public int GetMaxEdad()
{
return Participantes.Select(par => par.Edad).Max();
}
public int GetMinPeso()
{
return Participantes.Select(par => par.Peso).Min();
}
public int GetMaxPeso()
{
return Participantes.Select(par => par.Peso).Max();
}
public int GetCategoriaMayoria()
{
int Nivel=0, cant=0;
int[] Niveles = new int[4];
for (int i = 0; i < Niveles.Length; i++)
Niveles[0] = Participantes.FindAll(p => p.TiempoEntrenando == (i + 1)).Count;
for (int i =0; i<Niveles.Length; i++)
{
if (Niveles[i] >= cant)
{
Nivel = Participantes.First().TiempoEntrenando;
cant = Niveles[i];
}
}
return Nivel;
}
public void EliminarCompetidor(Competidor comp)
{
Participantes.Remove(comp);
}
public void FinishedCategory()
{
Opened = false;
}
public void UpdatePuntos(bool IsFormas)
{
if(Podium.FirstPlace!=null)
Podium.FirstPlace.Escuela.AddWinner(Podium.FirstPlace, Torneo.ORO, IsFormas);
if(Podium.SecondPlace!=null)
Podium.SecondPlace.Escuela.AddWinner(Podium.SecondPlace, Torneo.PLATA, IsFormas);
if (Podium.ThirdPlace != null)
Podium.ThirdPlace.Escuela.AddWinner(Podium.ThirdPlace, Torneo.BRONCE, IsFormas);
FinishedCategory();
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/Forma.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class Forma
{
//Tipo de forma: con arma, sin arma, arma larga
public string Nombre { get; }
public Forma(string CategoriaForma)
{
this.Nombre = CategoriaForma;
}
}
}
<file_sep>/TorneoApp/TorneoApp/Model/CatFormas.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TorneoApp.Model
{
[Serializable]
public class CatFormas : Categoria
{
public List<Presentacion> Presentaciones { get; set; }
public Forma Forma { get; set; }
public CatFormas()
{
Presentaciones = new List<Presentacion>();
}
public void InitializePresentaciones()
{
foreach (Competidor c in Participantes)
{
Presentacion p = new Presentacion(c);
Presentaciones.Add(p);
}
}
public override void CalcularMean(){
var Competidores = Participantes.ToArray();
double mean=0;
for (int i = 0; i < Competidores.Length; i++)
mean += (Competidores[i].GetMatchValue());
Mean = mean / Competidores.Length;
}
public override double CalcularDesviacion(Competidor participante){
return Math.Abs(participante.GetMatchValue() - Mean);
}
public override void GenerarNombre(string NombreForma){
int MinEdad = GetMinEdad();
int MaxEdad = GetMaxEdad();
int Nivel = GetCategoriaMayoria();
string NivelCat = "";
switch (Nivel){
case Competidor.PRINCIPIANTE:
NivelCat = "Principiante";
break;
case Competidor.INTERMEDIO:
NivelCat = "Intermedio";
break;
case Competidor.AVANZADO:
NivelCat = "Avanzado";
break;
case Competidor.CINTANEGRA:
NivelCat = "Cinta Negra";
break;
}
Nombre = NombreForma+" "+ NivelCat+" de "+MinEdad+"-"+ MaxEdad+" años";
}
public Boolean HayEmpate (double Puntos, List<Presentacion> presentaciones)
{
return presentaciones.Exists(p => p.Calificacion == Puntos);
}
public List<Presentacion> BuscarPresentaciones(double Puntos, List<Presentacion> presentaciones)
{
return presentaciones.FindAll(p => p.Calificacion == Puntos);
}
public string[] UpdatePodium()
{
string[] CompetidoresPodium = new string[Torneo.NUM_JUECES];
List<Presentacion> Calificadas = PresentacionesCalificadas().OrderByDescending(c => c.Calificacion).ToList();
//Calificadas.Sort();
for (int i = 0; i<CompetidoresPodium.Length; i++)
{
if (Calificadas.Count != 0)
{
Presentacion p = Calificadas.First();
CompetidoresPodium[i] = p.Competidor.Name;
Calificadas.Remove(p);
if (HayEmpate(p.Calificacion, Calificadas))
{
var Iguales = BuscarPresentaciones(p.Calificacion, Calificadas);
foreach (var comp in Iguales)
{
CompetidoresPodium[i] += " / " + comp.Competidor.Name;
Calificadas.Remove(comp);
}
}
SetVarPodium(i + 1, p.Competidor);
}
else
{
CompetidoresPodium[i] = "";
}
}
return CompetidoresPodium;
}
public void SetVarPodium(int index, Competidor p)
{
switch (index)
{
case 1:
Podium.FirstPlace = p;
break;
case 2:
Podium.SecondPlace = p;
break;
case 3:
Podium.ThirdPlace = p;
break;
}
}
public List<Presentacion> PresentacionesCalificadas()
{
return Presentaciones.FindAll(p => p.IsDone()==true);
}
public List<Presentacion> PresentacionesRestantes()
{
return Presentaciones.FindAll(p => p.IsDone()==false);
}
public Presentacion BuscarPresentacion (Competidor c)
{
return Presentaciones.Find(p => p.Competidor == c);
}
}
}
| 6e0c32eea88cd528893f7003a81626358f07c036 | [
"Markdown",
"C#"
] | 35 | C# | camilaleniss/TorneoAdmin | 81098775943ccb4dbea77d86254a0644b57f9233 | 09b1527994f3a57f684dca1fa9a0069a5dc406c3 |
refs/heads/master | <repo_name>chnagendra2/chanduselenim<file_sep>/FirstClass2/src/com/kotak/loans/carloans/SecondClass3.java
package com.kotak.loans.carloans;
public class SecondClass3
{
public static void main(String[] args)
{
System.out.println("Hello");
}
}
<file_sep>/FirstClass2/src/com/kotak/loans/carloans/SecondClass.java
package com.kotak.loans.carloans;
public class SecondClass {
public static void main(string[] args)
{
system.out.println("hi");
System.out.println("hiii");S
}
}
| ea8e3a7e83497a79f2db624e628649e1d93bf07a | [
"Java"
] | 2 | Java | chnagendra2/chanduselenim | 9cb2b8cf2a47172a2839a7ad99d806ac7dba1ce0 | 562e8edc4ea6f38d68ecc8b8d107698e1801c2e2 |
refs/heads/main | <repo_name>vladienn/ESP32-RESTAPI<file_sep>/espRequests.cpp
#include <WiFi.h>
#include <HTTPClient.h>
#include <AceButton.h>
#include <Adafruit_NeoPixel.h>
using namespace ace_button;
const int BUTTON_PIN = 35;
const int LED_PIN = 33;
const int STRIP_SIZE = 16;
Adafruit_NeoPixel strip = Adafruit_NeoPixel(STRIP_SIZE, LED_PIN, NEO_GRB + NEO_KHZ800);
AceButton button(BUTTON_PIN);
void handleEvent(AceButton*, uint8_t, uint8_t);
void restPOST(bool);
void restGET(void* par);
void btn_check(void* par);
void colorWipe(uint32_t c, uint8_t wait);
//WiFi credentials
const char* wifi_name = "*****";
const char* wifi_password = "*****";
bool tmp = true;
int led_color = 1;
void setup() {
strip.begin();
strip.setBrightness(25); // Lower brightness and save eyeballs!
strip.show();
pinMode(BUTTON_PIN, INPUT_PULLUP);
ButtonConfig* buttonConfig = button.getButtonConfig();
buttonConfig->setEventHandler(handleEvent);
buttonConfig->setFeature(ButtonConfig::kFeatureClick);
buttonConfig->setFeature(ButtonConfig::kFeatureDoubleClick);
//WiFi
Serial.begin(115200);
WiFi.begin(wifi_name, wifi_password);
// while(WiFi.status() != WL_CONNECTED){
// delay(500);
// Serial.println("Connnecting...");
// }
// Serial.println("Connected to ");
// Serial.print(wifi_name);
xTaskCreate(
restGET,
"restGET",
10000,
NULL,
1,
NULL);
xTaskCreate(
btn_check,
"btn_check",
10000,
NULL,
1,
NULL);
}
void loop() {
}
void colorWipe(uint32_t c, uint8_t wait) {
for (uint16_t i = 0; i < strip.numPixels(); i++) {
strip.setPixelColor(i, c);
strip.show();
delay(wait);
}
}
void btn_check(void* par) {
while (true) {
button.check();
}
}
void restGET(void* par) {
while (true) {
if ((WiFi.status() == WL_CONNECTED)) { //Check the current connection status
HTTPClient http;
http.begin("url"); //Specify the URL
int httpCode = http.GET(); //Make the request
if (httpCode > 0) { //Check for the returning code
String payload = http.getString();
led_color = int(payload[16]) - '0';
switch (led_color) {
case 0:
colorWipe(strip.Color(0, 0, 0), 20); //off
tmp = true;
break;
case 1:
colorWipe(strip.Color(255, 0, 0), 20); //red
tmp = false;
break;
case 2:
colorWipe(strip.Color(255, 0, 128), 20); //pink
break;
case 3:
colorWipe(strip.Color(128, 0, 255), 20); //purple
break;
case 4:
colorWipe(strip.Color(250, 255, 0), 20); //yellow
break;
case 5:
colorWipe(strip.Color(255, 128, 0), 20); //orange
break;
case 6:
colorWipe(strip.Color(0, 255, 0), 20); //green
break;
case 7:
colorWipe(strip.Color(255, 255, 255), 20); //white
break;
case 8:
colorWipe(strip.Color(20, 255, 255), 20); //cyan
break;
case 9:
colorWipe(strip.Color(0, 0, 255), 20); //blue
break;
}
led_color++;
if (led_color == 10) {
led_color = 1;
}
}
http.end(); //Free the resources
}
vTaskDelay(1000);
}
}
void restPOST(bool light_state) {
if ((WiFi.status() == WL_CONNECTED)) { //Check the current connection status
HTTPClient http;
http.begin("url"); //Specify the URL
http.addHeader("Content-Type", "text/plain");
String text_state = "{\"light_state\":\"";
if (light_state == false) {
if (tmp == true) {
text_state += "1\"}";
tmp = false;
} else {
text_state += "0\"}";
tmp = true;
}
} else if (light_state == true && tmp == false) {
text_state += led_color;
text_state += "\"}";
} else text_state = "{\"light_state\":\"0\"}";
int httpCode = http.POST(text_state);
http.end(); //Free the resources
}
}
void handleEvent(AceButton* /* button */, uint8_t eventType, uint8_t buttonState) {
switch (eventType) {
case AceButton::kEventPressed:
restPOST(true);
break;
case AceButton::kEventDoubleClicked:
restPOST(false);
break;
}
}
<file_sep>/README.md
# ESP32-REST API
ESP32's communicating over the internet via dedicated webserver based on flaskPython.
<file_sep>/app.py
from flask import Flask, jsonify, request
import json
app = Flask(__name__)
data = {'light_state':'off'}
@app.route('/', methods=['GET', 'POST'])
def home():
global data
if request.method == 'GET':
print(data)
return jsonify(data)
if request.method == 'POST':
data_req = request.data
json_data = json.loads(data_req)
print(json_data)
data = json_data.copy()
return jsonify(data)
app.run('0.0.0.0', port = 5000)
| 24701c041286a82e7f4fb24e399483b8093fcda3 | [
"Markdown",
"Python",
"C++"
] | 3 | C++ | vladienn/ESP32-RESTAPI | 36a214168ba4aa5077fd647f997b16bb6f918b85 | 22ea5a4b8dcf1da1442f9c72552f723b93b2e862 |
refs/heads/master | <file_sep>import cv2
import numpy as np
from random import randrange as rr
import operator
def split_questions(img_bin, img_gray, lines=None, line_width=0.95):
rows, cols = img_bin.shape
if lines is None:
# Engrossar linhas do separador de questao para tolerancia a rotacoes leves
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5))
lines = cv2.morphologyEx(img_bin, cv2.MORPH_DILATE, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (int(line_width * cols), 1)) # * 255
lines = cv2.morphologyEx(lines, cv2.MORPH_OPEN, kernel)
questions = []
mid = cols // 2
state = 0
midcol = lines[:, mid]
i = 0
while i < rows:
if state == 0: # espaco entre questoes
if midcol[i] > 0:
state = 1
elif state == 1: # linha de inicio da questao
if midcol[i] == 0:
i += 10
qstart = i
state = 2
elif state == 2: # conteudo da questao
if midcol[i] > 0:
questions.append(QuestionImg(img_bin[qstart:i - 10, :], img_gray[qstart:i - 10, :]))
# cv2.imshow('questao', img_bin[qstart:i - 10, :])
# cv2.waitKey(0)
state = 3
elif state == 3: # linha de final de questao
if midcol[i] == 0:
state = 0
i += 1
return questions
class Marker:
def __init__(self, centroid, stats):
self.centroid = centroid
self.stats = stats
def draw(self, image, color):
int_centroid = int(self.centroid[0]), int(self.centroid[1])
cv2.circle(image, int_centroid, int(self.stats[cv2.CC_STAT_AREA]/(2*np.pi)), color, thickness=3)
class AnswerArea:
checked_threshold = 0.11
margin_h = 6
margin_v = 3
color_unchecked = (139, 227, 24) # verde
color_checked = (66, 106, 255) # coral
def __init__(self, xstart, xend, ystart, yend, image):
self.xstart = xstart + self.margin_h
self.xend = xend - self.margin_h
self.ystart = ystart + self.margin_v
self.yend = yend - self.margin_v
self.img_flat = image[self.ystart:self.yend, self.xstart:self.xend].flatten()
self.checked = self.measure() >= self.checked_threshold
def measure(self):
return 1.0 - (np.sum(self.img_flat) / (len(self.img_flat) * 255.0))
def draw(self, image):
c = self.color_checked if self.checked else self.color_unchecked
cv2.rectangle(image, (self.xstart, self.ystart), (self.xend, self.yend), c, thickness=2)
# print("BoxValue: %f" % self.measure())
class QuestionImg:
def __init__(self, img_bin, img_gray):
self.img_bin = img_bin
self.img_gray = img_gray
self.markers = []
self.find_markers()
self.answer_blocks = []
self.find_answer_blocks()
def find_markers(self):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
eroded = cv2.dilate(self.img_bin, kernel) # fecha possiveis buracos nos marcadores
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
eroded = cv2.morphologyEx(eroded, cv2.MORPH_ERODE, kernel)
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(eroded, connectivity=8)
all_markers = [Marker(c, stats[i]) for i, c in enumerate(centroids)]
# remove marcador do plano de fundo
max_area, inx = -1, -1
for i, m in enumerate(all_markers):
current_area = m.stats[cv2.CC_STAT_AREA]
if current_area > max_area:
max_area = current_area
inx = i
all_markers.pop(inx)
m_xmin = min(all_markers, key=lambda m: m.centroid[0])
m_xmax = max(all_markers, key=lambda m: m.centroid[0])
ymin = m_xmin.centroid[1] + 13
self.markers.append(m_xmin)
for m in all_markers:
if m.centroid[1] >= ymin:
self.markers.append(m)
self.markers.append(m_xmax)
def find_answer_blocks(self):
self.markers.sort(key=lambda m: m.centroid[0])
half_height = (self.markers[1].centroid[1] - self.markers[0].centroid[1]) / 2.0
y_center = (self.markers[0].centroid[1] + self.markers[-1].centroid[1]) / 2.0
ybegin = int(y_center - half_height)
yend = int(y_center + half_height)
# interpolacao linear para tolerancia a pequenas rotacoes
xbase, ybase = self.markers[0].centroid
m = (self.markers[-1].centroid[1] - ybase) / (self.markers[-1].centroid[0] - xbase)
for i in range(1, len(self.markers) - 1):
xcenter = self.markers[i].centroid[0]
xbegin = int((self.markers[i-1].centroid[0] + xcenter) // 2)
xend = int((self.markers[i+1].centroid[0] + xcenter) // 2)
# interpolacao linear
deltax = xcenter - xbase
deltay = int(np.round(m * deltax))
self.answer_blocks.append(AnswerArea(xbegin, xend, ybegin + deltay, yend + deltay, self.img_gray))
def show(self):
cv2.imshow('areas', self.marked_image())
cv2.waitKey(0)
def marked_image(self):
img_bgr = cv2.cvtColor(self.img_gray, cv2.COLOR_GRAY2BGR)
for ab in self.answer_blocks:
ab.draw(img_bgr)
for m in self.markers:
m.draw(img_bgr, (222, 204, 32))
return img_bgr
def evaluate(self):
return [i for i, a in enumerate(self.answer_blocks) if a.checked]
<file_sep># This Python file uses the following encoding: utf-8
class Questao:
def __init__(self, lst_marcacoes, gabarito):
self.marcacoes = lst_marcacoes
self.nota = 0
self.gabarito = gabarito
def avalia(self):
self.nota = 0
if self.gabarito.tipo == 1: # marcação única - Tipo C
if len(self.marcacoes) != 1:
return
elif self.marcacoes[0] == self.gabarito.resp[0]:
self.nota = self.gabarito.valor
return
else:
return
elif self.gabarito.tipo == 2: # múltiplas marcações - Tipo C
self.nota = self.gabarito.valor
item = self.gabarito.valor/len(self.gabarito.resp)
for i in xrange(0, self.gabarito.num_item):
if bool(i in self.marcacoes) ^ bool(i in self.gabarito.resp):
self.nota -= item
elif self.gabarito.tipo == 3: # V ou F - Tipo A
self.nota = 0
item = self.gabarito.valor*2/self.gabarito.num_item
for r in self.gabarito.resp:
if r in self.marcacoes:
if (r & 1) == 0: # número par - V
if r+1 not in self.marcacoes:
self.nota += item
else:
if r-1 not in self.marcacoes:
self.nota += item
class Gabarito:
def __init__(self, valor_questao, tipo_questao, respostas, num_item):
self.valor = valor_questao
self.num_item = num_item
self.resp = respostas
self.tipo = tipo_questao
class Prova:
def __init__(self, gabaritos, lst_marcacoes):
self.nota = 0
self.respostas = []
# self.respostas = [Questao(lst_marcacoes[i], r) for i, r in enumerate(gabaritos)]
for i, r in enumerate(gabaritos):
self.respostas.append(Questao(lst_marcacoes[i], r))
def avalia(self):
self.nota = 0
for g in self.respostas:
g.avalia()
self.nota += g.nota
<file_sep># This Python file uses the following encoding: utf-8
from classes_correcao import *
def cria_gabarito():
print("Bem-vindo ao Gerenciador de Gabarito!")
print("Trabalhamos com 3 tipos de questões:")
print("1 - Questões tipo C com uma única alternativa")
print("2 - Questões tipo C com múltiplas alternativas")
print("3 - Questões tipo A\n")
i = 1
a = int(raw_input("Forneça o tipo da questão %d: (ou 0 (zero) caso a questão não exista) " %i))
gabarito = []
while a != 0:
resp = []
while (a!=1) and (a!=2) and (a!=3):
print("Trabalhamos com 3 tipos de questões:")
print("1 - Questões tipo C com uma única alternativa")
print("2 - Questões tipo C com múltiplas alternativas")
print("3 - Questões tipo A\n")
a = int(raw_input("Forneça o tipo da questão %d: (ou 0 (zero) caso a questão não exista) " % i))
if a == 1:
letra = raw_input("Forneça a letra referente a resposta correta: ")
letra = letra.lower()
resp.append(ord(letra)-ord('a'))
num_item = 1
elif a == 2:
letras = raw_input("Forneça todas as letras que devem ser marcadas separadas apenas por espaços: ")
letras = letras.lower()
op = letras.split()
for r in op:
resp.append(ord(r)-ord('a'))
num_item = int(raw_input("Forneça o número total de opções da questão:"))
elif a == 3:
letras = raw_input("Forneça as respostas do item (V ou F) na ordem em que eles aparecem, separados"
" apenas por espaço: ")
letras = letras.lower()
op = letras.split()
num_item = len(op) * 2
for j, r in enumerate(op):
if r == 'v':
resp.append(2*j)
elif r == 'f':
resp.append(2*j + 1)
val = float(raw_input("Forneça o valor total da questão: "))
gabarito.append(Gabarito(val, a, resp, num_item))
i += 1
a = int(raw_input("Forneça o tipo da questão %d: (ou 0 (zero) caso a questão não exista) " % i))
return gabarito
def corrige_prova(gabaritos, lst_marcacoes):
prova = Prova(gabaritos, lst_marcacoes)
prova.avalia()
# print("A nota é: %f" % prova.nota)
return prova.nota
def write_from_zero(fileName, text):
file = open(fileName, 'w')
sizeText = file.write(text)
fechou = file.close()
return fechou and (sizeText == text.len()) # retorna true se houve sucesso na escrita
def read_all(fileName): # eh um problema se o arquivo for maior que a memoria da maquina
file = open(fileName, 'r') # modo pode ser ocultado, pois read eh default
text = file.read() # quando o parametro eh omitido tudo que tem no arquivo eh lido
file.close()
return text
def salva_gabarito_txt(full_path, gabarito):
text = ""
for r in gabarito:
text_questao = "%d %f %d" %(r.tipo, r.valor, r.num_item)
for y in r.resp:
text_questao = text_questao + " %d" %(y)
text += text_questao + "\n"
write_from_zero(full_path, text)
def le_gabarito_txt(full_path):
text = read_all(full_path)
gabarito = []
text_line = text.splitlines()
for r in text_line:
elementos = r.split()
tipo = int(elementos[0])
val = float(elementos[1])
num_item = int(elementos[2])
resp = []
for i in xrange(3, len(elementos)):
resp.append(int(elementos[i]))
gabarito.append(Gabarito(val, tipo, resp, num_item))
return gabarito
# if __name__ == '__main__':
# main()
<file_sep># This Python file uses the following encoding: utf-8
from func_correcao import *
import cv2
from matplotlib import pyplot as plt
import numpy as np
from question_recognition import *
if __name__ == '__main__':
# img_prova = cv2.imread('../img/scan/SemGrampoEMarcacao2.png', cv2.IMREAD_GRAYSCALE)
img_prova = cv2.imread('../img/scan/Folhas gabarito-4.png', cv2.IMREAD_GRAYSCALE)
# img_prova = cv2.imread('../img/scan/mult5.jpg', cv2.IMREAD_GRAYSCALE)
img_prova_bin = cv2.threshold(img_prova, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cv2.imwrite('../res/prova_bin.png', img_prova_bin)
questions = split_questions(img_prova_bin, img_prova, line_width=0.95)
for i, q in enumerate(questions):
cv2.imwrite('../res/res%d.png' % (i+1), q.marked_image())
q.show()
gabarito = le_gabarito_txt('../gabarito1.txt')
answers = [q.evaluate() for q in questions]
nota = corrige_prova(gabarito, answers)
print('Nota: %f de 50' % nota)
| 0efe966f1d5927bb7e806c8c46c7dccd34de8680 | [
"Python"
] | 4 | Python | ramkury/CorretorDeProvas | a8d184d6af213345c960564af50f9c0653e8fc4c | 176e51cd5f03bc6f625f9cad0372521c4b97e0e6 |
refs/heads/master | <repo_name>evolvedmicrobe/FreqSeq<file_sep>/FREQSeq/XML_Parser.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using System.Xml;
namespace FREQSeq
{
/// <summary>
/// Creates an Allele Finder class and parses the XML file for the
/// </summary>
public class XML_Parser
{
//Names of the various nodes
const string BASE_NODE_NAME = "AFSeq";
const string OPTIONS_NODE_NAME = "Options";
const string BARCODE_NODE_NAME = "Barcode";
const string BARCODES_NODE_NAME = "Barcodes";
const string VARIANTS_NODE_NAME = "Variants";
const string VARIANT_NODE_NAME = "Variant";
const string TYPE_NODE_NAME = "Type";
/// <summary>
/// Parses an XML file and returns an Allele Finder class
/// with the settings made appropriately
/// </summary>
/// <returns>An allele finder class which can be used to parse a series of FASTQ files </returns>
public static AlleleFinder CreateAlleleFinderFromXML(string Filename)
{
if (!File.Exists(Filename))
throw new IOException("File: " + Filename + " does not appear to exist and cannot be found.");
XmlDocument XmlDoc = new XmlDocument();
XmlTextReader XReader = new XmlTextReader(Filename);
XmlDoc.Load(XReader);
//first node is xml, second is the protocol, this is assumed and should be the case
XmlNode baseNode = XmlDoc;
ValidateXMLHasEssentialElements(baseNode);
//Get the barcode groups
XmlNode barcodeXML = baseNode.SelectSingleNode("//" + BARCODES_NODE_NAME);
BarCodeCollection BCC=CreateBarCodeCollectionFromBarcodeXMLNode(barcodeXML);
XmlNode variantXML=baseNode.SelectSingleNode("//"+ VARIANTS_NODE_NAME);
LocusCollection AC=CreateAlleleCollectionFromVariantsXMLNode(variantXML);
AlleleFinder AF = new AlleleFinder(BCC,AC);
AF.SetDefaultOptions();
XmlNode options = baseNode.SelectSingleNode("//" + OPTIONS_NODE_NAME);
if (options != null)
{
SetAlleleFinderValuesFromXML(options, AF);
}
return AF;
}
private static void ValidateXMLHasEssentialElements(XmlNode xmlNode)
{
XmlNode baseNode = xmlNode.SelectSingleNode("//" + BASE_NODE_NAME);
if (baseNode.Name != BASE_NODE_NAME)
{
throw new IOException("Base node in the XML file is not named "+BASE_NODE_NAME+". Remember it is case-sensitive.");
}
XmlNodeList variants=baseNode.SelectNodes("//"+VARIANTS_NODE_NAME);
XmlNodeList barcodes=baseNode.SelectNodes("//"+BARCODES_NODE_NAME);
if(variants.Count!=1 || barcodes.Count!=1)
{
throw new IOException("Not enough or two many "+BARCODES_NODE_NAME+" or "+VARIANTS_NODE_NAME+" nodes. Should be only one of each. Remember their names are case-sensitive.");
}
}
private static BarCodeCollection CreateBarCodeCollectionFromBarcodeXMLNode(XmlNode barCodeNode)
{
BarCodeCollection BCC = new BarCodeCollection();
foreach (XmlNode childNode in barCodeNode.ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element && childNode.Name == BARCODE_NODE_NAME)
{
string bcStr=childNode.InnerText.Trim();
BarCodeGroup bcg = new BarCodeGroup(bcStr);
BCC.AddBarCodeGroup(bcg);
}
}
if (BCC.AllBarCodes.Count == 0)
throw new IOException("No Barcodes were created from this XML file.");
return BCC;
}
private static LocusCollection CreateAlleleCollectionFromVariantsXMLNode(XmlNode variantsXML)
{
LocusCollection AC = new LocusCollection();
foreach (XmlNode childNode in variantsXML.ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element && childNode.Name == VARIANT_NODE_NAME)
{
XmlNodeList types = childNode.SelectNodes(TYPE_NODE_NAME);
if (types.Count < 2)
{
throw new IOException("Need more than two types defined in XML for all variants");
}
LocusCollection.Locus curAllele = new LocusCollection.Locus();
List<string> alleleSequences = new List<string>();
foreach (XmlNode node in types)
{
string curType = node.InnerText.Trim();
alleleSequences.Add(curType);
//curAllele.AddType(curType);
}
curAllele.AddTypes(alleleSequences);
AC.AddLocus(curAllele);
}
}
if (AC.AllSequences.Count == 0)
throw new IOException("No Alleles were created from this XML file. Check the formatting.");
return AC;
}
private static void SetAlleleFinderValuesFromXML(XmlNode optionsXML,AlleleFinder AF)
{
try
{
Type thisType = AF.GetType();
foreach (XmlNode childNode in optionsXML.ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element)
{
string propertyName = childNode.Name;
//get the variable type info
XmlNode typeNode = childNode.Attributes.RemoveNamedItem("Type");
if (typeNode == null)
{
throw new Exception("Option Type not set in xml, please declare the variable type for all "
+ " options including " + propertyName.ToString());
}
Type VariableType = System.Type.GetType(typeNode.Value);
var Value = Convert.ChangeType(childNode.InnerText, VariableType);
//now get the property and change it
var prop = thisType.GetProperty(propertyName);
if (prop == null)
{
throw new Exception("No option called " + propertyName
+ "\n so the xml file needs to be fixed");
}
prop.SetValue(AF, Value, null);
}
}
}
catch (Exception thrown)
{
IOException newExcept = new IOException("Could not parse the options XML node" + thrown.Message, thrown);
throw newExcept;
}
}
}
}
<file_sep>/FreqSeqWPF/FinishPage.xaml.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Shapes;
namespace FreqSeqWPF
{
public partial class FinishPage
{
public FinishPage(List<string> Data)
{
this.InitializeComponent();
lstResults.ItemsSource = Data;
// Insert code required on object creation below this point.
}
private void Button_Click(object sender, RoutedEventArgs e)
{
Application.Current.Shutdown();
}
}
}<file_sep>/freqout/Program.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using NDesk.Options;
using FREQSeq;
using System.IO;
using System.Globalization;
using System.Diagnostics;
namespace freqout
{
sealed class Program
{
private const double KB = 1024;
private const double MB = KB * KB;
private const double GB = MB * KB;
static List<string> FileNames = new List<string> ();
static bool SetVerboseAfterLoad = false;
/// <summary>
/// Used to manually override the output file name. If not String.Empty, we change the name.
/// </summary>
static string OutputNameAfterLoad = String.Empty;
static string XMLFilename;
static void Main (string[] args)
{
Stopwatch sw = new Stopwatch ();
sw.Start ();
ParseCommandLine (args);
VerifySettings ();
RunAnalysis ();
Process p = Process.GetCurrentProcess ();
Console.WriteLine ("Peak Memory used " + FormatMemorySize (p.PeakWorkingSet64));
Console.WriteLine ("Total CPU time taken: {0}", p.TotalProcessorTime);
sw.Stop ();
// Get the elapsed time as a TimeSpan value.
TimeSpan ts = sw.Elapsed;
// Format and display the TimeSpan value.
string elapsedTime = String.Format ("{0:00}:{1:00}:{2:00}.{3:00}",
ts.Hours, ts.Minutes, ts.Seconds,
ts.Milliseconds / 10);
Console.WriteLine ("Clock Run Time: " + elapsedTime);
}
static void VerifySettings ()
{
try {
if (XMLFilename == null || XMLFilename == "")
throw new IOException ("XML file not set, be sure to use -xml= flag");
if (FileNames.Count == 0) {
throw new IOException ("No FASTQ files specified for analysis");
}
} catch (Exception thrown) {
Console.WriteLine ("Error: Could not verify settings");
Console.WriteLine ("Exception is: " + thrown.Message);
Console.WriteLine ("Stack Trace is: " + thrown.StackTrace);
System.Environment.Exit (-1);
}
}
private static string FormatMemorySize (long value)
{
string result = null;
if (value > GB) {
result = (Math.Round (value / GB, 2)).ToString (CultureInfo.InvariantCulture) + " GB";
} else if (value > MB) {
result = (Math.Round (value / MB, 2)).ToString (CultureInfo.InvariantCulture) + " MB";
} else if (value > KB) {
result = (Math.Round (value / KB, 2)).ToString (CultureInfo.InvariantCulture) + " KB";
} else {
result = value.ToString (CultureInfo.InvariantCulture) + " Bytes";
}
return result;
}
static void SetXMLFile (string fname)
{
try {
if (!File.Exists (fname)) {
throw new IOException ("Could not find XML file:" + fname);
} else if (!fname.EndsWith (".xml")) {
throw new IOException ("XML file: " + fname + "\nDoes not have a .xml extension");
} else {
Program.XMLFilename = fname;
}
} catch (Exception thrown) {
Console.WriteLine ("Error: Could not get XMLfile");
Console.WriteLine ("Exception is: " + thrown.Message);
System.Environment.Exit (-1);
}
}
static void LoadFastQForDirectory (string direc)
{
try {
DirectoryInfo DI = new DirectoryInfo (direc);
bool FileAdded = false;
foreach (FileInfo FI in DI.GetFiles()) {
if (FI.Extension == ".fastq" || FI.Extension == FREQSeq.Helper.ZippedFileExtension) {
FileNames.Add (FI.FullName);
FileAdded = true;
}
}
if (!FileAdded) {
throw new IOException ("Could not find any files with extension .fastq in the directory");
}
} catch (Exception thrown) {
Console.WriteLine ("Error: Could not get FASTQ files from directory: ");
Console.WriteLine (direc);
Console.WriteLine ("Exception is: " + thrown.Message);
System.Environment.Exit (-1);
}
}
static void AddFileToList (string fname)
{
try {
if (!File.Exists (fname)) {
throw new IOException ("Could not find file:" + fname);
} else {
FileNames.Add (fname);
}
} catch (Exception thrown) {
Console.WriteLine ("Error: Could not get FASTQ files");
Console.WriteLine ("Exception is: " + thrown.Message);
System.Environment.Exit (-1);
}
}
static void ParseCommandLine (string[] args)
{
OptionSet OS = new OptionSet () {
{ "h|help","Show Help",v => ShowHelp () },
{ "d=","Search Directory For FASTQ Files",v => LoadFastQForDirectory (v) },
{ "v","Show verbose output",v => Program.SetVerboseAfterLoad = true },
{ "xml=","Set XML File",v => SetXMLFile (v) },
{ "o=", "Set Output File Prefix", v => OutputNameAfterLoad = v },
{ "<>","Fastq file to analyze",v => AddFileToList (v) }
};
OS.Parse (args);
OS.WriteOptionDescriptions (new StreamWriter (Console.OpenStandardOutput ()));
}
static void ShowHelp ()
{
List<string> Help = new List<string> () {"", "freqout - Freq-Seq Console Application",
"Program must specify an XML file and at least one FASTQ file (or directory)",
"-xml\tThe XML file with the analysis settings",
"",
"Additional Options:",
"-d\tDirectory to find FASTQ files in (files must have .fastq extension)",
"-v\tVerbose (overrides XML)",
"-o\tSet Output File Name Prefix (overrides XML)",
"-help\tShow Help",
"\n",
"Example: PC",
"freqout.exe -xml=Simple.xml C:\\SeqData\\MyFile.fastq",
"\n",
"Example: Apple/Linux",
"mono freqout.exe -xml=Simple.xml C:\\SeqData\\MyFile.fastq",
"",
"Note that Apple/Linux use requires installation of Mono: http://www.mono-project.com/Main_Page",
"",
"More info: http://www.evolvedmicrobe.com/FreqSeq/index.html",
""
};
foreach (string str in Help) {
Console.WriteLine (str);
}
System.Environment.Exit (0);
}
static void RunAnalysis ()
{
try {
DateTime start = DateTime.Now;
Console.WriteLine ("FreqOut Analysis of " + FileNames.Count.ToString () + " files started.\n");
AlleleFinder AF = XML_Parser.CreateAlleleFinderFromXML (XMLFilename);
if (SetVerboseAfterLoad) {
if (AF.Verbose == false)
Console.WriteLine ("Overriding XML and setting verbose option to true,");
AF.Verbose = true;
}
if (AF.Verbose) {
AF.LoggerEvent += new LogEventHandler (AF_LoggerEvent);
}
if (OutputNameAfterLoad != String.Empty) {
AF.OutputFileNamePrefix = OutputNameAfterLoad;
}
AF.SetFileNames (FileNames);
AF.ParseFiles ();
AF.MakeReport ();
double totMinutes = DateTime.Now.Subtract (start).TotalMinutes;
Console.WriteLine ("Finished successfully");
Console.WriteLine ("Analysis took: " + totMinutes.ToString ("F") + " minutes");
} catch (Exception thrown) {
Console.WriteLine ("Error: Could not run analysis");
Console.WriteLine ("Exception is: " + thrown.Message);
Console.WriteLine ("Stack Trace is: " + thrown.StackTrace);
if (thrown is AggregateException)
{
var age=thrown as AggregateException;
foreach (var ex in age.InnerExceptions)
{
Console.WriteLine(ex.Message);
}
}
System.Environment.Exit (-1);
}
}
static void AF_LoggerEvent (object sender, string Message)
{
Console.WriteLine (Message);
}
}
}
<file_sep>/FreqSeqWPF/HostShell.xaml.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Shapes;
using System.Windows.Navigation;
using FREQSeq;
namespace FreqSeqWPF
{
/// <summary>
/// Interaction logic for HostShell.xaml
/// </summary>
public partial class HostShell : NavigationWindow
{
public HostShell()
{
this.InitializeComponent();
// App.AlleleSearcher = XML_Parser.CreateAlleleFinderFromXML("DefaultSettings.xml");
// Insert code required on object creation below this point.
}
}
}<file_sep>/FREQSeq/BarCodeAndAlleleGroups.cs
//#define DEBUG
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
namespace FREQSeq
{
/// <summary>
/// A collection of barcodes used in the analysis
///
/// Also contains methods for assigning strings to groups
/// </summary>
public sealed class BarCodeCollection
{
public readonly SimpleSubstitutionMatrix BarCodeSubstitutionMatrix;
public List<BarCodeGroup> BarCodes = new List<BarCodeGroup> ();
private bool Frozen;
public long UnAssignedReadCount;
public long TooShortCount;
private System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch();
public long TotalReads;
public long LastReportValue = 1;
public long M13_Excluded;
private object ReadCountLock = new object ();
public List<string> AllBarCodes = new List<string> ();
public LocusCollection AC;
public event LogEventHandler LogEvent;
/// <summary>
/// Determines if we can take non-exact matches, this must be equal to true and the
/// number of exact matches
/// </summary>
public bool RequireExactMatchForAssignment = false;
private void FireLogEvent (string msg)
{
if (LogEvent != null) {
LogEvent (this, msg);
}
}
public BarCodeCollection ()
{
BarCodeSubstitutionMatrix = new SimpleSubstitutionMatrix (2, -1, -4, -2);
sw.Start();
}
public void AddBarCodeGroup (BarCodeGroup BCG)
{
if (Frozen)
throw new Exception ("Tried to add Barcode to collection after it was frozen.");
if (this.AllBarCodes.Contains (BCG.Identifier))
throw new IOException ("Tried to add the same barcode twice: " + BCG.Identifier);
else {
this.BarCodes.Add (BCG);
this.AllBarCodes.Add (BCG.Identifier);
}
}
public Dictionary<string, Dictionary<string,AssignmentResults>> ReturnIdentifyingDictionary ()
{
Dictionary<string, Dictionary<string,AssignmentResults>> toRetu = new Dictionary<string, Dictionary<string,AssignmentResults>> ();
foreach (BarCodeGroup BCG in BarCodes) {
toRetu [BCG.Identifier] = BCG.CreateAlleleCountDictionary ();
}
return toRetu;
}
private BarCodeAssigner mainBCA;
public BarCodeAssigner SpawnBarCodeAssigner ()
{
//seperate spawning was a bit slower
if (mainBCA == null) {
string[] AllBarCodes = this.AllBarCodes.ToArray ();
mainBCA = new BarCodeAssigner (AllBarCodes, !this.RequireExactMatchForAssignment);
}
return mainBCA;
//string[] AllBarCodes=this.AllBarCodes.ToArray();
//BarCodeAssigner bca=new BarCodeAssigner(AllBarCodes,this.RequireExactMatchForAssignment);
//return bca;
}
public void AddIdentifyingDictionary (Dictionary<string,Dictionary<string,AssignmentResults>> toAdd, int unassignedcount, int TotalProcessedCount, int noM13, int tooShortCount)
{
lock (ReadCountLock) {
UnAssignedReadCount += unassignedcount;
TotalReads += TotalProcessedCount;
M13_Excluded += noM13;
TooShortCount += tooShortCount;
if (TotalReads / 500000 > LastReportValue)
{
sw.Stop();
LastReportValue++;
Console.WriteLine("Processed: " + TotalReads.ToString() + " reads.");
Console.WriteLine("Time elapsed for batch: {0}", sw.Elapsed);
sw.Reset();
sw.Start();
}
}
foreach (KeyValuePair<string, Dictionary<string,AssignmentResults>> set in toAdd) {
var BCG = (from x in BarCodes
where x.Identifier == set.Key
select x).First ();
BCG.AddAssignmentResults (set.Value);
}
}
private sealed class OutputColumn
{
public readonly string Name;
public readonly Func<BarCodeGroup, string> outFunc;
public OutputColumn (string Name, Func<BarCodeGroup, string> OutputFunction)
{
this.Name = Name;
this.outFunc = OutputFunction;
}
}
public void MakeReport (string outputFileName)
{
FireLogEvent ("Writing output file: " + outputFileName);
StreamWriter SW = new StreamWriter (outputFileName);
SW.WriteLine ("Total Reads," + TotalReads.ToString ());
SW.WriteLine ("Reads Too Short For Consideration," + TooShortCount.ToString ());
SW.WriteLine ("Total Reads Not Assigned to Barcodes," + UnAssignedReadCount.ToString ());
SW.WriteLine ("Percentage Not Assigned to Barcodes," + (UnAssignedReadCount / (double)TotalReads).ToString ());
SW.WriteLine ("Reads Excluded for no M13," + M13_Excluded.ToString ());
var unAssignedWithin = BarCodes.Select (x => x.TotalUnassignedReads).Sum ();
SW.WriteLine ("Total Reads Not Assigned within Barcodes," + unAssignedWithin.ToString ());
SW.WriteLine ("Total Percentage Unassigned, " + ((unAssignedWithin + UnAssignedReadCount) / (double)TotalReads).ToString ());
SW.WriteLine ();
string toOut = "";
toOut += "Read Assignment Counts By Barcode\n";
//Create output functions
List<OutputColumn> Cols = new List<OutputColumn> () {
new OutputColumn ("Barcode", x => x.Identifier),
new OutputColumn ("Total Reads Assigned to Barcode", x => x.TotalReadsAssigned.ToString ()),
new OutputColumn ("Total Reads Unassigned to Alleles in Barcode", x => x.AlleleCounts [AlleleFinder.unknownID].totalInexactAssignments.ToString ()),
new OutputColumn ("Percentage UnAssigned", x => (x.TotalUnassignedReads / (double)x.TotalReadsAssigned).ToString ()),
new OutputColumn ("Avg QC Score", x => x.AvgAllAssignedReadQuality.ToString ()),
new OutputColumn ("Avg Exact QC Score", x => x.AvgExactAssignedReadQuality.ToString ()),
new OutputColumn ("Avg Inexact QC Score", x => x.AvgInExactAssignedReadQuality.ToString ()),
new OutputColumn ("Avg Unassigned QC Score", x => x.AvgUnassignedReadQCScore.ToString ()),
new OutputColumn ("Exactly Assigned Reads", x => x.CountofReadsExactlyAssigned.ToString ()),
new OutputColumn ("Inexactly Assigned Reads", x => (x.CountofReadsNotExactlyAssigned - x.TotalUnassignedReads).ToString ())
};
foreach (OutputColumn c in Cols) {
SW.Write (c.Name + ",");
}
SW.Write ("\n");
foreach (BarCodeGroup BCG in BarCodes) {
foreach (OutputColumn c in Cols) {
SW.Write (c.outFunc (BCG) + ",");
}
SW.Write ("\n");
}
SW.Write ("\n");
//Now for each of the allele groups
AC.WriteReport (SW, this);
SW.Close ();
}
/// <summary>
/// This method is called once all barcodes are loaded, it determines if the barcodes
/// are far enough apart to allow inexact matches
/// </summary>
public void FinishAndFreeze ()
{
Frozen = true;
if (AC == null) {
throw new Exception ("Tried to freeze barcode collection before specifying allele collection");
}
//First to determine the minimum hamming distance between a given set of sequences is greater than 2
//do all N choose 2 combinations to check this, if so we will except barcodes within a hamming distance of 1.
int[] Difs = new int[(AllBarCodes.Count * (AllBarCodes.Count - 1)) / 2];
int curValue = 0;
for (int i = 0; i < AllBarCodes.Count; i++) {
for (int j = (i + 1); j < AllBarCodes.Count; j++) {
Difs [curValue] = CalculateHammingDistance (AllBarCodes [i], AllBarCodes [j]);
curValue++;
}
}
int maxDif = Difs.Min ();
string msg = "Minimum hamming distance between barcodes is " + maxDif.ToString ();
if (maxDif >= 2) {
RequireExactMatchForAssignment = false;
msg += ". Assigning barcodes if within a hamming distance of 1.";
} else {
RequireExactMatchForAssignment = true;
msg += ". Not accepting any inexact barcode matches.";
}
FireLogEvent (msg);
}
public static int CalculateHammingDistance (string seq1, string seq2)
{
int difs = 0;
for (int i = 0; i < seq1.Length; i++) {
if (seq1 [i] != seq2 [i])
difs += 1;
}
return difs;
}
}
public struct Assignment
{
/// <summary>
/// Was this based on an exact match?
/// </summary>
public bool ExactAssignment;
/// <summary>
/// The group it was assigned to
/// </summary>
public string Group;
public Assignment (string group, bool exactMatch)
{
this.Group = group;
this.ExactAssignment = exactMatch;
}
}
public sealed class AssignmentResults
{
public long totalExactAssignments;
public long totalInexactAssignments;
/// <summary>
/// The sum of all the avg quality scores for each read
/// </summary>
public double totalExactAvgQualityScore;
public double totalInexactAvgQualityScore;
public long totalAssignments {
get { return totalInexactAssignments + totalExactAssignments; }
}
}
/// <summary>
/// A class that each thread can get an instance of, it
/// will assign each
/// </summary>
public sealed class BarCodeAssigner
{
public readonly string[] BarCodes;
public readonly bool UseInExactMatches;
public readonly HashSet<string> hBarCodes;
public BarCodeAssigner (string[] groups, bool AttemptInexactMatches)
{
hBarCodes = new HashSet<string> (groups);
BarCodes = groups;
UseInExactMatches = AttemptInexactMatches;
}
public Assignment AssignToGroup (FastQRead read)
{
string bc = read.Sequence.Substring (0, AlleleFinder.BARCODE_LENGTH);
if (hBarCodes.Contains (bc))
return new Assignment (bc, true);
else {
if (!UseInExactMatches) {
return new Assignment (AlleleFinder.unknownID, false);
} else {
//Assign anything with a hamming distance of 1
foreach (string str in BarCodes) {
if (BarCodeCollection.CalculateHammingDistance (str, bc) == 1)
return new Assignment (str, false);
}
}
}
return new Assignment (AlleleFinder.unknownID, false);
}
}
/// <summary>
/// A class that holds data from all samples with a
/// particular barcode used in the analysis
/// </summary>
public sealed class BarCodeGroup
{
public readonly string Identifier;
public Dictionary<string, AssignmentResults> AlleleCounts;
public LocusCollection alleleCollection;
/// <summary>
/// Create a new barcode group
/// </summary>
/// <param name="Barcode">The string in the Barcode</param>
/// <param name="Name">An alias for the group name - NOT YET IMPLEMENTED</param>
public BarCodeGroup (string Barcode, string Name = "None")
{
if (Barcode.Length != AlleleFinder.BARCODE_LENGTH) {
throw new IOException ("Barcode " + Barcode + " is not the correct length, check the XML file.");
}
MiscMethods.ValidateACGT (Barcode);
this.Identifier = Barcode;
}
public void AssignParentCollection (LocusCollection ac)
{
this.alleleCollection = ac;
this.AlleleCounts = CreateAlleleCountDictionary ();
}
private long pTotalReadsAssigned = -1;
public long TotalReadsAssigned {
get {
if (pTotalReadsAssigned == -1) {
var Values = AlleleCounts.Values;
pTotalReadsAssigned = 0;
foreach (var v in Values) {
pTotalReadsAssigned += (long)v.totalExactAssignments + (long)v.totalInexactAssignments;
}
}
return pTotalReadsAssigned;
}
}
public long TotalUnassignedReads {
get { return this.AlleleCounts [AlleleFinder.unknownID].totalInexactAssignments; }
}
private long pExactCount = -1;
public long CountofReadsExactlyAssigned {
get {
if (pExactCount == -1) {
var Values = AlleleCounts.Values;
pExactCount = 0;
foreach (var v in Values) {
pExactCount += (long)v.totalExactAssignments;
}
}
return pExactCount;
}
}
public double AvgAllAssignedReadQuality {
get {
var Values = AlleleCounts.Values;
long Count = 0;
double curSum = 0;
foreach (var v in Values) {
curSum += v.totalExactAvgQualityScore + v.totalInexactAvgQualityScore;
Count += (long)v.totalExactAssignments + v.totalInexactAssignments;
}
return curSum / (double)Count;
return Count;
}
}
public double AvgUnassignedReadQCScore {
get {
var res = AlleleCounts [AlleleFinder.unknownID];
return res.totalInexactAvgQualityScore / (double)res.totalInexactAssignments;
}
}
public double AvgExactAssignedReadQuality {
get {
var Values = AlleleCounts.Values;
long Count = 0;
double curSum = 0;
foreach (var v in Values) {
curSum += v.totalExactAvgQualityScore;
Count += (long)v.totalExactAssignments;
}
return curSum / (double)Count;
}
}
const double UNASSIGNED_VALUE = -1.0;
private double pAvgInExactAssignedReadQuality = UNASSIGNED_VALUE;
public double AvgInExactAssignedReadQuality {
get {
if (pAvgInExactAssignedReadQuality == UNASSIGNED_VALUE) {
long Count = 0;
double curSum = 0;
foreach (KeyValuePair<string, AssignmentResults> v in AlleleCounts) {
if (v.Key == AlleleFinder.unknownID)
continue;
curSum += v.Value.totalInexactAvgQualityScore;
Count += (long)v.Value.totalInexactAssignments;
}
pAvgInExactAssignedReadQuality = curSum / (double)Count;
}
return pAvgInExactAssignedReadQuality;
}
}
public long CountofReadsNotExactlyAssigned {
get {
var Values = AlleleCounts.Values;
long Count = 0;
foreach (var v in Values) {
Count += (long)v.totalInexactAssignments;
}
return Count;
}
}
public Dictionary<string,AssignmentResults> CreateAlleleCountDictionary ()
{
List<string> outputs = alleleCollection.AllSequences.ToList ();
outputs.Add (AlleleFinder.unknownID);
Dictionary<string,AssignmentResults> toReturn = new Dictionary<string,AssignmentResults> (outputs.Count);
foreach (string str in outputs) {
toReturn [str] = new AssignmentResults ();
}
return toReturn;
}
public void AddAssignmentResults (Dictionary<string, AssignmentResults> toAdd)
{
lock (this.AlleleCounts) {
foreach (KeyValuePair<string,AssignmentResults> bit in toAdd) {
var v = AlleleCounts [bit.Key];
v.totalInexactAvgQualityScore += bit.Value.totalInexactAvgQualityScore;
v.totalInexactAssignments += bit.Value.totalInexactAssignments;
v.totalExactAssignments += bit.Value.totalExactAssignments;
v.totalExactAvgQualityScore += bit.Value.totalExactAvgQualityScore;
}
}
}
}
public sealed class LocusCollection
{
public List<string> AllSequences
{
get {
return Loci.SelectMany(x => x.Alleles).ToList();
}
}
public List<Locus> Loci = new List<Locus> ();
public Dictionary<string,List<TypeToAlign>> HashedKMERS;
public SimpleSubstitutionMatrix SubMatForAlignment;
private int pMaxAllowableSize;
public int MaxReadSize
{
get { return pMaxAllowableSize; }
}
public float MinAverageQCScoreForInexactMatches;
public float MaxNPercForInexactMatches;
public bool AttemptInExactMatches;
private bool Frozen;
/// <summary>
/// The class the holds the locus and the alleles present at that locus
/// </summary>
public class Locus
{
/// <summary>
/// This will be used to determine if this is a SNP variant or not.
/// SNP variants have only two members, and are distinguished by a simple difference.
/// </summary>
public bool IsSNPVariant = false;
/// <summary>
/// The types of genetic variants present (usually only 2);
/// </summary>
public List<string> Alleles = new List<string> ();
public Locus ()
{
}
public void AddTypes (List<string> types)
{
// Verify equal length requirement.
for (int i = 0; i < types.Count; i++) {
for (int j = (i + 1); j < types.Count; j++) {
if (types [j].Length != types [i].Length) {
throw new IOException ("Two types have different lengths, which can skew assignment results. Please make all variants at a " +
"locus have the same input length. Problems with: \t" + types [j] + "\n" + types [i]);
}
if (types [j] == types [i]) {
throw new IOException("Two variants are the same, please fix this:\n " + types[j] +"\n"+ types[i]);
}
}
}
this.Alleles.AddRange(types);
if (Alleles.Count == 2 && AlleleTypeAssigner.GetHammingDistance(Alleles[0], Alleles[1]) == 1)
{
IsSNPVariant = true;
}
}
}
public LocusCollection ()
{
}
public void AddLocus (Locus toAdd)
{
if (Frozen)
throw new Exception ("Tried to add allele after collection was frozen");
foreach (string type in toAdd.Alleles) {
foreach (string alr in this.AllSequences) {
if (alr.StartsWith (type)) {
throw new IOException ("Type: " + type + " appears in different variants!");
}
}
}
this.Loci.Add (toAdd);
this.AllSequences.AddRange (toAdd.Alleles);
}
public void WriteReport (StreamWriter SW, BarCodeCollection BC)
{
int SetCount = 1;
foreach (Locus A in Loci) {
SW.WriteLine ("Allele Group " + SetCount.ToString ());
int NumTypes = A.Alleles.Count;
int i = 1;
string header = "Barcode,";
foreach (string t in A.Alleles) {
SW.WriteLine ("Type " + i.ToString () + ": " + t);
header += "Type " + i.ToString () + "%,";
i++;
}
header += "Total Allele Counts";
SW.WriteLine (header);
foreach (BarCodeGroup BCG in BC.BarCodes) {
ulong[] counts = new ulong[NumTypes];
int j = 0;
foreach (var type in A.Alleles) {
counts [j] = (ulong)BCG.AlleleCounts [type].totalAssignments;
j++;
}
SW.Write (BCG.Identifier + ",");
double total = (double)counts.Sum (x => (int)x);
for (j = 0; j < NumTypes; j++) {
SW.Write ((counts [j] / total).ToString () + ",");
}
SW.Write (((int)total).ToString () + "\n");
}
SW.WriteLine ();
SetCount++;
}
}
public void FinishAndFreeze (AlleleFinder parentAF)
{
Frozen = true;
this.AttemptInExactMatches = parentAF.AssignImperfectlyMatchedReads;
this.SubMatForAlignment = new SimpleSubstitutionMatrix (parentAF.MatchScore, parentAF.MisMatchPenalty, parentAF.GapStartPenalty, parentAF.GapExtendPenalty);
this.MaxNPercForInexactMatches = parentAF.MaxPercentageNForInexactMatches;
this.MinAverageQCScoreForInexactMatches = parentAF.MinAverageQualityForInexactMatches;
HashedKMERS = new Dictionary<string, List<TypeToAlign>> ();
foreach (Locus lc in Loci)
{
foreach (string str in lc.Alleles)
{
//Figure out minimum score required, harcoded at 75% of perfect
float MinScoreRequired = SubMatForAlignment.MatchScore * str.Length * .75F;// +SubMatForAlignment.gapExistPenalty + SubMatForAlignment.gapExistPenalty + SubMatForAlignment.MisMatchScore;
TypeToAlign t = new TypeToAlign(str, MinScoreRequired,lc);
string[] mers = AlleleTypeAssigner.CreateKMERS(str);
foreach (string mer in mers)
{
if (HashedKMERS.ContainsKey(mer))
{
HashedKMERS[mer].Add(t);
}
else
{
HashedKMERS[mer] = new List<TypeToAlign>() { t };
}
}
}
}
pMaxAllowableSize = AllSequences.Max(x => x.Length) + 10;
}
/// <summary>
/// Try to put memory near the rest of what the thread is working on by spawing off a type
/// </summary>
public AlleleTypeAssigner SpawnTypeAssigner ()
{
//Seperate spawing turned out to be slower
//return new AlleleTypeAssigner(this);
if (mainATA == null)
mainATA = new AlleleTypeAssigner (this);
return mainATA;
}
private AlleleTypeAssigner mainATA;
}
public sealed class TypeToAlign
{
public readonly float MinScoreForAssignment;
public readonly string TypeSeq;
public readonly LocusCollection.Locus Locus;
public TypeToAlign (string seq, float minScoreRequired, LocusCollection.Locus parentLocus)
{
this.MinScoreForAssignment = minScoreRequired;
this.TypeSeq = seq;
Locus = parentLocus;
}
}
public sealed class AlleleTypeAssigner
{
//Kmers for reduced word search
public const int KMER_SIZE = 9;
public readonly float MinScoreDifferenceRequired;
/// <summary>
/// Maps K-mers to possible sequences, used to avoid trying to align to all possible
/// sequences
/// </summary>
public readonly Dictionary<string,List<TypeToAlign>> HashedKMERS;
private readonly float MinAvgQCScoreToAttemptInexactMatch;
private readonly bool AssignInexactMatches;
private readonly float MaxNPercentageToAttemptInExactMatch;
private readonly string[] AllTypes;
private readonly SimpleSubstitutionMatrix subMat;
/// <summary>
/// If the reads are much larger than the alleles, we trim them down.
/// </summary>
private readonly int MaxAllowableSequenceLength;
public AlleleTypeAssigner (LocusCollection parentLC)
{
this.MaxAllowableSequenceLength = parentLC.MaxReadSize;
this.AssignInexactMatches = parentLC.AttemptInExactMatches;
//not deep copying type to align, not sure what, if any, performace implications this has
this.HashedKMERS = parentLC.HashedKMERS.ToDictionary (x => x.Key, x => x.Value);
this.MaxNPercentageToAttemptInExactMatch = parentLC.MaxNPercForInexactMatches;
this.MinAvgQCScoreToAttemptInexactMatch = parentLC.MinAverageQCScoreForInexactMatches;
this.subMat = parentLC.SubMatForAlignment.Clone ();
this.AllTypes = parentLC.AllSequences.ToArray ();
//make minimum dif equal to a SNP
this.MinScoreDifferenceRequired = this.subMat.MatchScore - this.subMat.MisMatchScore;
}
#if DEBUG
public TEMPCountDict tmp = new TEMPCountDict ();
public class TEMPCountDict
{
int tots = 0;
Dictionary<string, int> Counts = new Dictionary<string, int> ();
public void addSeq (string seq)
{
tots++;
int cur = 0;
if (Counts.ContainsKey (seq)) {
cur = Counts [seq];
}
Counts [seq] = cur + 1;
if (tots > 100000) {
int jjj = 1;
jjj++;
}
}
public void report ()
{
StreamWriter SQ = new StreamWriter ("Tmp.csv");
foreach (KeyValuePair<string, int> kvin in Counts) {
SQ.WriteLine (kvin.Key + "," + kvin.Value.ToString ());
}
SQ.Close ();
Console.WriteLine ("Outted");
}
}
#endif
public Assignment AssignReadToType (FastQRead read)
{
//get sequence
string seq = read.Sequence.Substring (AlleleFinder.ALLELE_START_POS);
if (seq.Length > MaxAllowableSequenceLength) { seq = seq.Substring(0, MaxAllowableSequenceLength); }
//try for exact match
foreach (string s in AllTypes) {
if (seq.StartsWith (s)) {
return new Assignment (s, true);
}
}
//Are we going to assign inexact matches?
if (AssignInexactMatches) {
//Make sure quality is high enough to even bother with an inexact match
if (read.AvgQuality >= MinAvgQCScoreToAttemptInexactMatch && read.PercN <= MaxNPercentageToAttemptInExactMatch) {
//Get KMERS
string[] kmers = CreateKMERS (seq);
//Use kmers to get small set to align to
Dictionary<TypeToAlign, int> countingDict = new Dictionary<TypeToAlign, int>();
foreach (string mer in kmers) {
List<TypeToAlign> att;
if (HashedKMERS.TryGetValue (mer, out att)) {
foreach (var at in att) {
if (countingDict.ContainsKey(at)) {
countingDict[at] += 1;
}
else {
countingDict[at] = 1;
}
}
}
}
kmers = null;
//Now decide between options based on counts.
//Compare based on scores and alignment.
var possibles=countingDict.ToList();
if (possibles.Count == 0)
{
return new Assignment(AlleleFinder.unknownID, false);
}
possibles.Sort((x, y) => -x.Value.CompareTo(y.Value));
//have to have at least 25% as many k-mer matches as top hit.
int topKmerMatchCountCutoff=(int)(possibles[0].Value*0.25);
var toAttempt = possibles.Where(z => (z.Value >= topKmerMatchCountCutoff)).ToList();
//TODO: Remove after experimental verification
if (possibles[0].Key != toAttempt[0].Key)
{
throw new InvalidOperationException("The best k-mer hit was not included as the top hit to attempt an inexact assignment. This is "
+ " a program bug, please report it to Nigel.");
}
possibles = null;
//If 1, see if the kmers indicate it is good enough, and if not, try an ungapped alignment
if (toAttempt.Count == 1)
{
var cur=toAttempt[0].Key;
{
if (
(GetUnGappedAlignmentScore(seq,cur.TypeSeq) >= cur.MinScoreForAssignment ) ||
(ScoreOnlySmithWatermanGotoh.GetSmithWatermanScore(cur.TypeSeq,seq,subMat) >= cur.MinScoreForAssignment))
{
return new Assignment(cur.TypeSeq, false);
}
else
{
return new Assignment(AlleleFinder.unknownID, false);
}
}
}
//see if it is a simple SNP, this means the locus (and kmer counts) are entirely the same except at that base
if (toAttempt.Count == 2 && toAttempt[0].Key.Locus == toAttempt[1].Key.Locus
&& toAttempt[0].Key.Locus.IsSNPVariant)
{
//let's just do a simple check to make sure we don't assign total garbage, min score has
//to be greater than 50% of max score
var best = toAttempt[0].Key;
var perfectScore = subMat.MatchScore * Math.Min(seq.Length, best.TypeSeq.Length);
var mustBeat = best.MinScoreForAssignment;
if (
(GetUnGappedAlignmentScore(best.TypeSeq, seq) >= mustBeat)
|| (ScoreOnlySmithWatermanGotoh.GetSmithWatermanScore(best.TypeSeq, seq, subMat) >= mustBeat))
{
return new Assignment(best.TypeSeq, false);
}
else
{
return new Assignment(AlleleFinder.unknownID, false);
}
}
else
{
//Otherwise time consuming pairwise global alignment
var Res = (from x in toAttempt
select new { type = x.Key, score = ScoreOnlySmithWatermanGotoh.GetSmithWatermanScore(x.Key.TypeSeq, seq, subMat) }).ToList();
if (Res.Count > 1)
{
Res.Sort((x, y) => -x.score.CompareTo(y.score));
var top = Res[0];
float scoreDif = top.score - Res[1].score;
//check that it is much better than the last one
if (scoreDif >= MinScoreDifferenceRequired && top.score>top.type.MinScoreForAssignment)
{
return new Assignment(top.type.TypeSeq, false);
}
else{
return new Assignment(AlleleFinder.unknownID, false);
}
}
}
}
}
return new Assignment (AlleleFinder.unknownID, false);
}
public float GetUnGappedAlignmentScore(string seq1, string seq2)
{
var size = Math.Min(seq1.Length, seq2.Length);
int dist = 0;
for (int i = 0; i < size; i++)
{
if (seq1[i] != seq2[i]) dist++;
}
return (size - dist) * subMat.MatchScore + dist * subMat.MisMatchScore;
}
/// <summary>
/// Gets the minimimum score based on the number of k-mers matching. Assuming all kmer hits are a in acontinguous line
/// </summary>
/// <param name="kmerBasedMatch"></param>
/// <returns></returns>
private float GetMinScore(KeyValuePair<TypeToAlign,int> kmerBasedMatch,int queryLength)
{
//Assume the lowest is all matches and the rest is a mismatch
//The most possible kmer hits
var alnLength=Math.Min(kmerBasedMatch.Key.TypeSeq.Length, queryLength);
int actualMers =Math.Max(kmerBasedMatch.Value, alnLength - KMER_SIZE + 1);
var minBPHit = actualMers + KMER_SIZE - 1;
var maxBPMissed = alnLength-minBPHit;
var minHitScore = minBPHit * subMat.MatchScore + (maxBPMissed) * subMat.MisMatchScore;
return minHitScore;
}
/// <summary>
/// Gets the maximum score based on the number of k-mers matching
/// </summary>
/// <param name="kmerBasedMatch"></param>
/// <returns></returns>
private float GetMaxScore(KeyValuePair<TypeToAlign, int> kmerBasedMatch,int queryLength)
{
//every gap or mismatch introduces a penalty of one, which would result in missing a
//a number of kmers equal to the kmer size
var minPenalty = Math.Max(subMat.gapExistPenalty, subMat.MisMatchScore);
var alnLength = Math.Min(kmerBasedMatch.Key.TypeSeq.Length, queryLength);
var missedHits =((alnLength-KMER_SIZE+1) - kmerBasedMatch.Value)/KMER_SIZE;
var maxScore = (alnLength-missedHits)* subMat.MatchScore + missedHits * minPenalty;
return maxScore;
}
public static string[] CreateKMERS (string seq)
{
int totalMers = seq.Length - KMER_SIZE + 1;
if (totalMers > 0) {
string[] to = new string[totalMers];
for (int i = 0; i < to.Length; i++) {
to [i] = seq.Substring (i, KMER_SIZE);
}
return to;
} else {
Console.WriteLine ("Failure to hash: " + seq);
throw new Exception ("Tried to hash read of length: " + seq.Length.ToString () + " into KMERS of size " + KMER_SIZE.ToString ());
}
}
public static int GetHammingDistance(string seq1, string seq2) {
int dist = 0;
for (int i = 0; i < seq1.Length; i++)
{
if (seq1[i] != seq2[i]) dist++;
}
return dist;
}
}
}
<file_sep>/MakeXML.py
direc=r'C:\Users\Clarity\Documents\My Dropbox\AFSeq\\'
fname=direc+"AF_BarCodes.txt"
data=open(fname).readlines()
for d in data:
print "<Barcode>"+d.strip()+"</Barcode>"<file_sep>/FreqSeqWPF/StartupPage.xaml.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Shapes;
using System.Diagnostics;
using Forms=System.Windows.Forms;
using FREQSeq;
namespace FreqSeqWPF
{
public partial class StartupPage
{
/// <summary>
/// Path of the selected file
/// </summary>
public string SelectedFilePath { get; set; }
/// <summary>
/// Flag to see if user requested to open a file from disk
/// </summary>
public bool ShowOpenFileDialog { get; set; }
public StartupPage()
{
this.InitializeComponent();
// Insert code required on object creation below this point.
}
/// <summary>
/// Close this window and take the user to the empty workspace
/// </summary>
private void OnCloseClick(object sender, RoutedEventArgs e)
{
ShowOpenFileDialog = false;
SelectedFilePath = null;
Application.Current.Shutdown();
}
/// <summary>
/// Raised when user clicks the open file button
/// </summary>
private void OnOpenClick(object sender, RoutedEventArgs e)
{
ShowOpenFileDialog = true;
//this.Close();
}
private void BeginClick(object sender, RoutedEventArgs e)
{
}
void HandleRequestNavigate(object sender, RoutedEventArgs e)
{
string navigateUri = hl.NavigateUri.ToString();
// if the URI somehow came from an untrusted source, make sure to
// validate it before calling Process.Start(), e.g. check to see
// the scheme is HTTP, etc.
try
{
Process.Start(new ProcessStartInfo(navigateUri));
}
catch(Exception thrown)
{
}
e.Handled = true;
}
private void button1_Click(object sender, RoutedEventArgs e)
{
// NavigationService.Navigate(new XMLSelect());
}
private void Button_Click(object sender, RoutedEventArgs e)
{
Forms.OpenFileDialog OFD = new System.Windows.Forms.OpenFileDialog();
OFD.Filter = "XML Files|*.xml|All Files|*.*";
Forms.DialogResult DR = OFD.ShowDialog();
if (DR == Forms.DialogResult.OK)
{
App.AlleleSearcher = XML_Parser.CreateAlleleFinderFromXML(OFD.FileName);
}
Continue();
}
private void Continue()
{
//Forms.SaveFileDialog SFD = new Forms.SaveFileDialog();
//SFD.Title = "Select Output File Name";
//SFD.Filter = "CSV File (*.csv)|*.csv";
//Forms.DialogResult DR = SFD.ShowDialog();
//if (DR == Forms.DialogResult.OK)
//{
// App.AlleleSearcher = XML_Parser.CreateAlleleFinderFromXML(SFD.FileName);
//}
//else
//{
// MessageBox.Show("You must pick an output file name before continuing");
// Application.Current.Shutdown();
//}
NavigationService.Navigate(new ImportSeqPage());
}
private void Button_Click_1(object sender, RoutedEventArgs e)
{
MessageBox.Show("Wait for this feature to appear soon! For now only XML file loading is supported.","Coming Soon",MessageBoxButton.OK);
}
}
}<file_sep>/README.md
FreqSeq
=======
Programs used to analyze data from Freq Seq Data. See the main documentation pages at:
http://www.evolvedmicrobe.com/FreqSeq/index.html
The source code is divided in to several folders.
* FreqSeqWPF - the WPF client on windows
* FreqSeq- The library file containing the methods used by the GUI and command line programs.
* freqout - The command line program.
<h2>New Version on 9/27/2015</h2>
Bugs Fixed:
* Warning message if unequal length alleles are input into the program.
Improvements:
* Compiled binary version released for Ubuntu 14.04
<h2>New Version on 12/4/2013</h2>
Bugs Fixed:
* Bug in Smith-waterman-Gotoh alignment (The +/- sign on the penalty for the gap creation and extension was switched, leading to incorrect alignments)
* Bug in barcode assigner – barcodes within hamming distance of 1 were not being assigned.
Improvements:
* Reads gzipped fastq files (ending with .gz)
* Changed command line executable name to freqout
* Trimmed long sequences to within only 10 bp longer than the expected read to improve alignment speed and accuracy
* Increased hashing k-mer length to 9 to reduce the number of alignment candidates identified
* Automatically skipping reads less than 75 bp (minimum length can be set in XML).
* Output column names changed
<file_sep>/freqout/README.txt
This file is the main command line executable.<file_sep>/FREQSeq/MiscMethods.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
namespace FREQSeq
{
//A delegat to handle log events
public delegate void LogEventHandler(object sender,string Message);
public class MiscMethods
{
public static void ValidateACGT(string seq)
{
foreach (char c in seq)
{
switch (c)
{
case 'A':
break;
case 'G':
break;
case 'C':
break;
case 'T':
break;
default:
throw new IOException("Sequence: "+seq+" contains non A, C, G or T elements.");
}
}
}
}
}
<file_sep>/CreateOptionsXML.py
import re
"""This file generates XML for the options settings tag based on some C# Code"""
typeToXML={ "float":"System.Single",
"double":"System.Double",
"bool":"System.Boolean",
"string":"System.String",
"int":"System.Int32"}
fieldRE=re.compile(r'((bool)|(float)|(double)|(string)|(int))\s+([^\s]*)')
txt="""
public const string unknownID = "UNKNOWN";
//length of the barcode sequence, should always be 6
public const int BARCODE_LENGTH = 6;
public const int ALLELE_START_POS = 23;
/// <summary>
/// Should reads be quality filtered?
/// </summary>
public bool QualityFilter { get; set; }
/// <summary>
/// Filter reads with an average quality below this value
/// if the QualityFilter flag is set.
/// </summary>
public float MinAverageQualityForInexactMatches { get; set; }
/// <summary>
/// The highest percentage of "N" we will allow in a read before we will not use the read
/// </summary>
public float MaxPercentageNForInexactMatches { get; set; }
/// <summary>
/// Determines if we should try to align and assign inexact matches
/// </summary>
public bool AssignImperfectlyMatchedReads { get; set; }
/// <summary>
/// Reads with AvgQC less than this quantile cutoff will not be used for
/// inexact read assignments by alignment
/// </summary>
public float QuantileOfAvgReadQualityCutoff { get; set; }
private SimpleSubstitutionMatrix SubMat;
public float MisMatchPenalty { get; set; }
public float MatchScore { get; set; }
public float GapStartPenalty { get; set; }
public float GapExtendPenalty { get; set; }
/// <summary>
/// The name to output the file with
/// </summary>
public string OutputFileName { get; set; }
/// <summary>
/// Determines how much information is printed to the screen
/// </summary>
public bool Verbose { get; set; }
public int InitialReadsToParse { get; set; }
"""
for match in fieldRE.findall(txt):
t=match[0]
val=match[-1]
nodeName=val
print "<"+nodeName+' Type="'+typeToXML[t]+'"> </'+nodeName+">"
<file_sep>/AddBarcode.py
import sys
inName = sys.argv[1]
outName = sys.argv[2]
# Create data to add in front of each read
barcode = "CGTGAT"
m13 = "GTAAAACGACGGCCAGT"
seq_to_add = barcode + m13
qv_to_add = "F" * len(seq_to_add)
# In and out files
d = open(inName)
o = open(outName, 'w')
# Now add them in.
while True:
name = d.readline()
if len(name) ==0:
break
seq = seq_to_add + d.readline()
o.write(name)
o.write(seq)
o.write(d.readline()) # '+'
qvs = d.readline()
quals = qvs[:len(seq_to_add)] + qvs
print quals
o.write(quals)
d.close()
o.close()<file_sep>/FREQSeq/README.txt
This file is the main class library used by the console application and the GUI.<file_sep>/FreqSeqWPF/README.txt
This project is the front end GUI<file_sep>/FREQSeq/Helper.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace FREQSeq
{
public class Helper
{
/// <summary>
/// The .gz extension to indicate gzipped files
/// </summary>
public const string ZippedFileExtension = ".gz";
public static bool FileEndsWithZippedExtension(string fileName)
{
return fileName.EndsWith(ZippedFileExtension);
}
}
}
<file_sep>/build.sh
#!/bin/sh
rm -rf build; mkdir build
rm freqseq_ubuntu.tar.gz
rm freqseq_win.zip
### Build Bio dependency
## Note this requires having mono with the PCL assemblies installed into:
# MONO_PATH/lib/mono/xbuild-frameworks/
# if they aren't there, you will likely have to copy over from a machine that has them
# on Mac OSX they are located at the location below
# /Library/Frameworks/Mono.framework/Versions/4.0.0/lib/mono/xbuild-frameworks
xbuild /p:Configuration=Release FreqSeq/FREQSeq.csproj
cp FreqSeq/bin/Release/* build/
xbuild /p:Configuration=Release freqout/freqout.csproj
cp freqout/bin/Release/* build/
# Now make a bundled executable
cd build
export PKG_CONFIG_PATH=$HOME/mono64/lib/pkgconfig/
mkbundle --keeptemp --static --deps --config-dir /nothing --config ../config -o freqout freqout.exe FreqSeq.dll
cd ../
# Make ubuntu distribution
# See my stack overflow notes on how to deal with libMonoPosixHelper
# at http://stackoverflow.com/questions/18489272/how-can-i-convince-mkbundle-to-include-monoposixhelper/33023767#33023767
#
rm -fr freqseq_ubuntu; mkdir freqseq_ubuntu
cp build/freqout freqseq_ubuntu
cp Example.xml freqseq_ubuntu/
cp ExampleCMD.sh freqseq_ubuntu/
cp README_UBUNTU freqseq_ubuntu/README
cp /home/UNIXHOME/ndelaney/mono64/lib/libMonoPosixHelper.so freqseq_ubuntu/
cp -r TestData freqseq_ubuntu/TestData
tar -zcvf freqseq_ubuntu.tar.gz freqseq_ubuntu
# Make Windows distribution
rm -rf freqseq_win; mkdir freqseq_win
cp build/*.dll freqseq_win/
cp build/*.exe freqseq_win/
cp Example.xml freqseq_win/
cp -r TestData freqseq_win/TestData
zip -r freqseq_win.zip freqseq_win/
<file_sep>/ExampleCMD.sh
./freqout -xml=TestData/freq2.xml TestData/Sample_1_run1_R1.fastq.gz
<file_sep>/FreqSeqWPF/ImportSeqPage.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using System.Collections;
using System.Collections.ObjectModel;
using System.IO;
using System.Threading;
using FREQSeq;
using Forms=System.Windows.Forms;
namespace FreqSeqWPF
{
/// <summary>
/// Interaction logic for ImportSeqPage.xaml
/// </summary>
public partial class ImportSeqPage : Page
{
/// <summary>
/// Describes Molecule Type
/// </summary>
/// <summary>
/// Describes the selected filenames.
/// </summary>
private ObservableCollection<string> fileNames;
#region -- Constructor --
/// <summary>
/// Initializes the Opendialog.
/// </summary>
/// <param name="types">Supported file Types</param>
/// <param name="info">Collection of the files and the sequences parsed from them</param>
/// <param name="showFileBrowserAtStartup">Indicates whether to show file browse dialog by default</param>
public ImportSeqPage()
{
this.fileNames = new ObservableCollection<string>();
this.InitializeComponent();
this.btnImport.Click += new RoutedEventHandler(this.OnImportButtonClick);
this.btnImportCancel.Click += new RoutedEventHandler(this.OnCancelAnimationButtonClick);
this.btnBrowse.Click += new RoutedEventHandler(this.OnBrowseButtonClick);
this.btnBrowse.Focus();
}
#endregion
#region -- Public Events --
/// <summary>
/// Event to close the Pop up, It informs the
/// Controller that the pop is closed and to
/// close the Gray background.
/// </summary>
public event EventHandler ClosePopup;
/// <summary>
/// Event to cancel the import of files, It informs the
/// Controller to cancel the import of files.
/// </summary>
public event EventHandler CancelImport;
#endregion
#region -- Public Methods --
/// <summary>
/// Hides the animation and shows the
/// import and cancel button
/// </summary>
public void OnCancelImport()
{
buttonPanel.Visibility = Visibility.Visible;
animationPanel.Visibility = Visibility.Collapsed;
}
#endregion
#region -- Private Methods --
public List<string> LogData=new List<string>();
void AF_LoggerEvent(object sender, string Message)
{
LogData.Add(Message);
}
/// <summary>
/// On import button click would inform the controller to import files,
/// would also pass the list of filenames and the molecule type as event args.
/// </summary>
/// <param name="sender">Framework Element</param>
/// <param name="e">Routed event args</param>
private void OnImportButtonClick(object sender, RoutedEventArgs e)
{
//// Creates the collection of the File names.
buttonPanel.Visibility = Visibility.Collapsed;
animationPanel.Visibility = Visibility.Visible;
AlleleFinder AF= App.AlleleSearcher;
Thread.Sleep(100);
Thread t = new Thread(RunAnalysis);
t.Start();
}
private void RunAnalysis()
{
try
{
AlleleFinder AF = App.AlleleSearcher;
Thread.Sleep(100);
DateTime start = DateTime.Now;
AF.Verbose = true;
AF.LoggerEvent += new LogEventHandler(AF_LoggerEvent);
AF.OutputFileNamePrefix = Environment.GetFolderPath(Environment.SpecialFolder.Desktop)+"\\" + AF.OutputFileNamePrefix.Replace(".csv", "");
AF.SetFileNames(fileNames.ToList());
AF.ParseFiles();
AF.MakeReport();
double totMinutes = DateTime.Now.Subtract(start).TotalMinutes;
LogData.Add("Finished successfully");
LogData.Add("Analysis took: " + totMinutes.ToString("F") + " minutes");
Dispatcher.Invoke((Action)(() => { Continue(); }));
}
catch (Exception thrown)
{
MessageBox.Show("Error: Could not run analysis\nException is: " + thrown.Message,"Error",MessageBoxButton.OK,MessageBoxImage.Error);
Dispatcher.BeginInvoke((Action)(() => { Application.Current.Shutdown(); }));
}
}
private void Continue()
{
FinishPage FP = new FinishPage(LogData);
this.NavigationService.Navigate(FP);
}
/// <summary>
/// On cancel button click would close the Importing dialog and would
/// inform the controller
/// </summary>
/// <param name="sender">Framework Element</param>
/// <param name="e">Routed Event args</param>
private void OnCancelButtonClick(object sender, RoutedEventArgs e)
{
//// Raise the event to controller, inform closing of the pop up
if (this.ClosePopup != null)
{
this.ClosePopup(sender, e);
}
}
/// <summary>
/// On cancel button click of Importing of files would inform
/// the controller to cancel the import of files through events
/// </summary>
/// <param name="sender">Framework Element</param>
/// <param name="e">Routed events args</param>
private void OnCancelAnimationButtonClick(object sender, RoutedEventArgs e)
{
//// Raise the event
MessageBox.Show("Ending Program");
Dispatcher.Invoke((Action)(()=>{Application.Current.Shutdown();}));
}
/// <summary>
/// Handles the click on the Browse button,Launches the Windows Open File dialog
/// with custom File formats filters being set to the dialog.
/// On selection of files shows the paths of the selected files on the screen.
/// Gives option to import the files.
/// </summary>
/// <param name="sender">Framework Element</param>
/// <param name="e">Routed event args</param>
private void OnBrowseButtonClick(object sender, RoutedEventArgs e)
{
//// Launch the FileDialog
this.LaunchWindowFileDialog();
}
/// <summary>
/// Launches the File Dialog, creates the selected filenames list,
/// also validates the selected file name for import.
/// </summary>
/// <param name="fileDialog">OpenFiledialog instance to be launched</param>
private void LaunchWindowFileDialog()
{
//// Create and launch the Windows File Dialog, Set various validations
using (System.Windows.Forms.OpenFileDialog fileDialog = new System.Windows.Forms.OpenFileDialog())
{
fileDialog.Multiselect = true;
fileDialog.CheckFileExists = true;
fileDialog.CheckPathExists = true;
fileDialog.Filter = "Fastq Files|*.*";
// On SuccessFull selection of the files.
if (fileDialog.ShowDialog() == System.Windows.Forms.DialogResult.OK)
{
// Reset the file name collection
this.fileNames = new ObservableCollection<string>();
this.fileNameList.Items.Clear();
//// Validate the file type and create a list of file names to be displayed on the screen.
foreach (string file in fileDialog.FileNames)
{
fileNames.Add(file);
}
fileNameList.ItemsSource = fileNames;
this.btnImport.Focus();
btnImport.IsEnabled = true;
}
else
{
this.btnBrowse.Focus();
}
}
}
#endregion
private void btnClose_Click(object sender, RoutedEventArgs e)
{
Application.Current.Shutdown();
}
}
}
<file_sep>/FREQSeq/AlleleFinder.cs
//#define DEBUG
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.IO;
namespace FREQSeq
{
/// <summary>
/// A class that takes a FASTQ file and various settings and parses the files to count the amount of alleles
/// present at each of the barcodes. Should usually be passed a BarCodecollection and AlleleCollection when initialized.
/// </summary>
public class AlleleFinder
{
public const string unknownID = "UNKNOWN";
//length of the barcode sequence, should always be 6
public const int BARCODE_LENGTH = 6;
public const int ALLELE_START_POS = 23;
public const string M13_SEQUENCE = "GTAAAACGACGGCCAGT";
public int InitialReadsToParse { get; set; }
public int MinReadLength { get; set; }
public bool RequireM13Sequence{ get; set; }
public bool AllowReadsWithM13WithinHamming1ToBeAssigned { get; set; }
public bool AllowBarCodesWithinHamming1ToBeAssigned { get; set; }
/// <summary>
/// Filter reads with an average quality below this value
/// if the QualityFilter flag is set.
/// </summary>
public float MinAverageQualityForInexactMatches { get; set; }
/// <summary>
/// The highest percentage of "N" we will allow in a read before we will not use the read
/// </summary>
public float MaxPercentageNForInexactMatches { get; set; }
/// <summary>
/// Determines if we should try to align and assign inexact matches
/// </summary>
public bool AssignImperfectlyMatchedReads { get; set; }
/// <summary>
/// Reads with AvgQC less than this quantile cutoff will not be used for
/// inexact read assignments by alignment
/// </summary>
public float QuantileOfAvgReadQualityCutoff { get; set; }
public float MisMatchPenalty { get; set; }
public float MatchScore { get; set; }
public float GapStartPenalty { get; set; }
public float GapExtendPenalty { get; set; }
/// <summary>
/// The name to output the file with
/// </summary>
public string OutputFileNamePrefix { get; set; }
/// <summary>
/// Determines how much information is printed to the screen
/// </summary>
public bool Verbose { get; set; }
FastQParser FQP;
BarCodeCollection BC;
LocusCollection AC;
List<string> FileNames = new List<string> ();
public void SetFileNames (List<string> FNames)
{
foreach (string fn in FNames) {
if (!File.Exists (fn))
throw new IOException ("File: " + fn + " cannot be found");
}
this.FQP = new FastQParser (FNames);
if (Verbose) {
FQP.LogEvent += new LogEventHandler (ReceiveChildEvent);
}
FileNames.AddRange (FNames);
//Now to parse through and attempt to get the relevant Statistics
List<FastQRead> firstReads = FQP.GetFirstReadsFromFile ();
var QCAvg = (from x in firstReads
select x.AvgQuality).ToList ();
var percNAvg = (from x in firstReads
select x.PercN).ToList ();
float avgQC = (float)QCAvg.Average ();
QCAvg.Sort ();
FireLogEvent ("Average scaled QC values based on initial reads is: " + avgQC.ToString ());
int LowIndex = (int)(QuantileOfAvgReadQualityCutoff * (float)QCAvg.Count);
MinAverageQualityForInexactMatches = (float)QCAvg [LowIndex];
FireLogEvent ("Requiring an average scaled read QC value of " + MinAverageQualityForInexactMatches.ToString ("F") + " before attempting assignment based on alignment.");
double avgN = percNAvg.Average ();
FireLogEvent ("The average percentage of 'N' basepairs in initial reads is: " + avgN.ToString ("F"));
}
public AlleleFinder (BarCodeCollection BCC, LocusCollection AC, List<string> FNames = null)
{
this.BC = BCC;
this.AC = AC;
this.BC.AC = AC;
foreach (BarCodeGroup BC in BCC.BarCodes) {
BC.AssignParentCollection (AC);
}
this.BC.LogEvent += new LogEventHandler (ReceiveChildEvent);
if (FNames != null) {
SetFileNames (FNames);
}
}
public void SetDefaultOptions ()
{
this.AssignImperfectlyMatchedReads = true;
this.InitialReadsToParse = 10000;
this.GapExtendPenalty = -1;
this.GapStartPenalty = -2;
this.MatchScore = 1;
this.MisMatchPenalty = -2;
this.QuantileOfAvgReadQualityCutoff = 0.02F;
this.MaxPercentageNForInexactMatches = 0.2F;
this.OutputFileNamePrefix = "Results";
this.AllowBarCodesWithinHamming1ToBeAssigned = true;
this.RequireM13Sequence = true;
this.MinReadLength = 75;
this.AllowReadsWithM13WithinHamming1ToBeAssigned = true;
}
/// <summary>
/// Register with this event to get update messages
/// </summary>
public event LogEventHandler LoggerEvent;
public void FireLogEvent (string message)
{
if (LoggerEvent != null && Verbose) {
LoggerEvent (this, message + "\n");
}
}
/// <summary>
/// Receives messages from AlleleCollection, FastQParser, etc. and passes them on to the main event
/// </summary>
/// <param name="sender"></param>
/// <param name="msg"></param>
public void ReceiveChildEvent (object sender, string msg)
{
FireLogEvent (msg);
}
public void MakeReport ()
{
BC.MakeReport (this.OutputFileNamePrefix + ".csv");
// BC.MakeReport(outFileName,"");
}
/// <summary>
/// Used to determine if the sequence contains an M13 Tag
/// </summary>
/// <param name="read"></param>
/// <returns>Whether it does by the criteria</returns>
private bool ContainsM13Sequence (FastQRead read)
{
string M13 = read.Sequence.Substring (BARCODE_LENGTH, M13_SEQUENCE.Length);
if (M13 == M13_SEQUENCE) {
return true;
}
if (AllowReadsWithM13WithinHamming1ToBeAssigned && BarCodeCollection.CalculateHammingDistance (M13_SEQUENCE, M13) < 2) {
return true;
}
return false;
}
/// <summary>
/// The main work horse function, responsible for parsing the FASTQ files
/// </summary>
public void ParseFiles ()
{
AC.FinishAndFreeze (this);
BC.FinishAndFreeze ();
#if DEBUG
AlleleTypeAssigner ata = AC.SpawnTypeAssigner ();
foreach (StreamReader FR in FQP.GetStreamReaderForSequences())
// Parallel.ForEach(FQP.GetStreamReaderForSequences(700000000), FR =>
#else
ParallelOptions po = new ParallelOptions ();
po.MaxDegreeOfParallelism = Environment.ProcessorCount;
SimplePartitioner<StreamReader> sp = new SimplePartitioner<StreamReader> (FQP.GetStreamReaderForSequences (700000));
Parallel.ForEach (sp, po, FR =>
#endif
{
//First convert the lines of text (already in memory) to FastQReads;
//List<FastQRead> reads = FastQParser.GetFastQReadsFromStream (FR);
var reads = FastQParser.GetFastQReadsFromStream (FR);
#if !DEBUG
AlleleTypeAssigner ata = AC.SpawnTypeAssigner ();
#endif
BarCodeAssigner bca = BC.SpawnBarCodeAssigner ();
//now loop through them
int UnassignedReads = 0;
int ReadsTooShort = 0;
int NoM13Reads = 0;
var CountingDictionary = BC.ReturnIdentifyingDictionary ();
int totalReads = 0;
foreach (FastQRead read in reads) {
totalReads++;
if (read.Sequence.Length < MinReadLength) {
ReadsTooShort++;
continue;
}
if (RequireM13Sequence && !ContainsM13Sequence (read)) {
UnassignedReads++;
NoM13Reads++;
continue;
}
Assignment barCodeAssignment = bca.AssignToGroup (read);
if (barCodeAssignment.Group != AlleleFinder.unknownID) {
Assignment typeAssignment = ata.AssignReadToType (read);
//now match the assignment
var toUpdate = CountingDictionary [barCodeAssignment.Group];
var Counter = toUpdate [typeAssignment.Group];
if (typeAssignment.ExactAssignment) {
Counter.totalExactAssignments++;
Counter.totalExactAvgQualityScore += read.AvgQuality;
} else {
Counter.totalInexactAssignments++;
Counter.totalInexactAvgQualityScore += read.AvgQuality;
}
} else {
UnassignedReads++;
}
}
//drop reference after conversion so GC can free memory
FR.Dispose ();
FR = null;
//Now To update
BC.AddIdentifyingDictionary (CountingDictionary, UnassignedReads, totalReads, NoM13Reads, ReadsTooShort);
GC.Collect ();
#if DEBUG
}
ata.tmp.report ();
#else
}
);
#endif
//Program has a bad tendency to let stuff sit around eating up memory
GC.Collect ();
}
}
}
<file_sep>/FreqSeqWPF/App.xaml.cs
using System;
using System.Collections.Generic;
using System.Configuration;
using System.Data;
using System.Windows;
using FREQSeq;
namespace FreqSeqWPF
{
/// <summary>
/// Interaction logic for App.xaml
/// </summary>
public partial class App : Application
{
public static AlleleFinder AlleleSearcher;
public static string OutFileName;
public static void RunAnalysis()
{
}
}
}<file_sep>/FREQSeq/AlignmentClasses.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace FREQSeq
{
/// <summary>
/// Modified from SmithWatermanGotoh class in Bio C# library
///
/// This class is a simpler version, which doesn't do the traceback step, only reports the score
/// and is hopefully immutable for parrallel purposes
/// </summary>
public sealed class ScoreOnlySmithWatermanGotoh
{
private enum LastMove : byte
{
Diagonal = 1,
Insertion = 2,
Deletion = 3,
None = 4}
;
private struct MatrixPosition
{
public float score;
public LastMove lastMove;
public MatrixPosition (float score, LastMove lastMove)
{
this.score = score;
this.lastMove = lastMove;
}
}
public static float GetSmithWatermanScore (string databaseSequence, string querySequence, SimpleSubstitutionMatrix substitutionMatrix)
{
int sizeCutoff = Math.Max(databaseSequence.Length,querySequence.Length) + 10;
if (querySequence.Length > sizeCutoff) {
querySequence = querySequence.Substring (0, sizeCutoff);
}
if (databaseSequence.Length > sizeCutoff)
{
databaseSequence = databaseSequence.Substring(0, sizeCutoff);
}
//Get relevant variables
float penaltyGapExist = -substitutionMatrix.gapExistPenalty;
float penaltyGapExtend = -substitutionMatrix.gapExtendPenalty;
float MatchScore = substitutionMatrix.MatchScore;
float MisMatchScore = substitutionMatrix.MisMatchScore;
float[] Dk_1 = new float[databaseSequence.Length + 1];
float[] Qk_1 = new float[querySequence.Length + 1];
float highScore = -999;
//Initialize matrix
MatrixPosition[,] matrix = new MatrixPosition[databaseSequence.Length + 1, querySequence.Length + 1];
//First set all values in the horizontal (database) to zero
for (int i = 0; i < matrix.GetLength (0); i++) {
matrix [i, 0] = new MatrixPosition (0, LastMove.None);
Dk_1 [i] = 0 - penaltyGapExist;
}
//Second set all values in the vertical (query) to zero
for (int i = 0; i < matrix.GetLength (1); i++) {
matrix [0, i] = new MatrixPosition (0, LastMove.None);
Qk_1 [i] = 0 - penaltyGapExist;
}
//Go down use dimension of the query
for (int k = 1; k < matrix.GetLength (1); k++) {
for (int i = 1; i < matrix.GetLength (0); i++) {
//i=database sequence in the horizontal
//k=query sequence in the vertical
//the database sequence is in the horizontal, the query in the vertical axis of the matrix
//Diagonal score is the previous score and in addition the similarityValue;
float ScoreUpdate = databaseSequence [i - 1] == querySequence [k - 1] ? MatchScore : MisMatchScore;
float scoreDiagonal = matrix [i - 1, k - 1].score + ScoreUpdate;
//Find the highest scoring insertion, testing all matrix to the upper side;
float downScoreInsertion;
downScoreInsertion = Math.Max (matrix [i, k - 1].score - penaltyGapExist, Dk_1 [i] - penaltyGapExtend);
Dk_1 [i] = downScoreInsertion;
//Find the highest scroing deletion, testing all matrix entries to the left side
float rightScoreDeletion;
rightScoreDeletion = Math.Max (matrix [i - 1, k].score - penaltyGapExist, Qk_1 [k] - penaltyGapExtend);
Qk_1 [k] = rightScoreDeletion;
var current = GetMaximumPosition (scoreDiagonal, downScoreInsertion, rightScoreDeletion, 0);
matrix [i, k] = current;
//Updating the highest scoring matrix entry
if (current.score > highScore) {
//new highscore
highScore = current.score;
}
}
}
System.Diagnostics.Debug.Assert (highScore <= MatchScore * databaseSequence.Length && highScore <= MatchScore * querySequence.Length);
return highScore;
}
private static MatrixPosition GetMaximumPosition (float scoreDiagonal, float scoreInsertion, float scoreDeletion, float scoreNone)
{
MatrixPosition position;
if (scoreDiagonal > scoreNone) {
//exclude scoreNone
if (scoreDiagonal >= scoreInsertion) {
//exclude scoreNone & scoreInsertion
if (scoreDiagonal >= scoreDeletion) {
//exclude scoreNone & scoreInsertion & scoreDeletion => DIAGONAL
position = new MatrixPosition (scoreDiagonal, LastMove.Diagonal);
} else {
//exclude scoreNone & scoreInsertion & scoreDiagonal => DELETION
position = new MatrixPosition (scoreDeletion, LastMove.Deletion);
}
} else {
//exclude scoreNone & scoreDiagonal
if (scoreInsertion > scoreDeletion) {
//exclude scoreNone & scoreDiagonal & scoreDeletion => INSERTION
position = new MatrixPosition (scoreInsertion, LastMove.Insertion);
} else {
//exclude scoreNone &scoreDiagonal & scoreInsertion => DELETION
position = new MatrixPosition (scoreDeletion, LastMove.Deletion);
}
}
} else {
//exclude scoreDiagonal
if (scoreInsertion > scoreNone) {
//exclude scoreDiagonal & scoreNone
if (scoreInsertion > scoreDeletion) {
//exclude scoreDiagonal & scoreNone & scoreDeletion => INSERTION
position = new MatrixPosition (scoreInsertion, LastMove.Insertion);
} else {
//exclude scoreDiagonal & scoreNone & scoreInsertion => DELETION
position = new MatrixPosition (scoreDeletion, LastMove.Deletion);
}
} else {
//exclude scoreDiagonal & scoreInsertion
if (scoreDeletion > scoreNone) {
//exclude scoreDiagonal & scoreInsertion & scoreNone => DELETION
position = new MatrixPosition (scoreDeletion, LastMove.Deletion);
} else {
//exclude scoreDiagonal & scoreInsertion & scoreDeletion =>NONE
position = new MatrixPosition (scoreNone, LastMove.None);
}
}
}
return position; //That was annoying
}
private static MatrixPosition GetMaximumPosition (float scoreDiagonal, float scoreInsertion, float scoreDeletion)
{
MatrixPosition position;
//exclude scoreNone
if (scoreDiagonal >= scoreInsertion) {
//exclude scoreNone & scoreInsertion
if (scoreDiagonal >= scoreDeletion) {
//exclude scoreNone & scoreInsertion & scoreDeletion => DIAGONAL
position = new MatrixPosition (scoreDiagonal, LastMove.Diagonal);
} else {
//exclude scoreNone & scoreInsertion & scoreDiagonal => DELETION
position = new MatrixPosition (scoreDeletion, LastMove.Deletion);
}
} else {
//exclude scoreNone & scoreDiagonal
if (scoreInsertion > scoreDeletion) {
//exclude scoreNone & scoreDiagonal & scoreDeletion => INSERTION
position = new MatrixPosition (scoreInsertion, LastMove.Insertion);
} else {
//exclude scoreNone &scoreDiagonal & scoreInsertion => DELETION
position = new MatrixPosition (scoreDeletion, LastMove.Deletion);
}
}
return position; //That was annoying
}
}
/// <summary>
/// Substitution matrix, hopefully immutable
/// </summary>
public sealed class SimpleSubstitutionMatrix
{
public readonly float gapExistPenalty;
public readonly float gapExtendPenalty;
public readonly float MatchScore;
public readonly float MisMatchScore;
public SimpleSubstitutionMatrix (float match, float mismatch, float gapexist, float gapExtend)
{
gapExistPenalty = gapexist;
gapExtendPenalty = gapExtend;
MatchScore = match;
MisMatchScore = mismatch;
}
public SimpleSubstitutionMatrix Clone ()
{
return new SimpleSubstitutionMatrix (MatchScore, MisMatchScore, gapExistPenalty, gapExtendPenalty);
}
}
}
<file_sep>/FREQSeq/FastQParser.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using System.IO.Compression;
namespace FREQSeq
{
public class FastQParser
{
string DirecName;
public static int LeadLength;
public static int ReadLength;
public static int totalLength;
int MaxReadSize;
List<string> FastQNames = new List<string>();
public event LogEventHandler LogEvent;
public FastQParser(List<string> FileNames)
{
this.FastQNames.AddRange(FileNames);
}
private void FireLogEvent(string msg)
{
if (LogEvent != null)
{
LogEvent(this, msg);
}
}
public List<FastQRead> GetFirstReadsFromFile(int SizeToGrab = 5000)
{
try
{
if (this.FastQNames.Count == 0)
throw new Exception("Tried to Parse FastQ files before they were set");
string FileName = this.FastQNames[0];
FireLogEvent("Pre-parsing " + SizeToGrab.ToString() + " reads from " + FileName + " to get initial statistics for filter");
StreamReader SR = GetFileStreamReader(FileName);
List<FastQRead> toReturn = new List<FastQRead>();
string line;
while ((line = SR.ReadLine()) != null)
{
///The array might be too big
//Not sure why below was in there, it is an igornable character
//if (line.StartsWith("\0",StringComparison.InvariantCulture)) { break; }
string line2, line3, line4;
line2 = SR.ReadLine();
//cheap way to skip lines
line3 = SR.ReadLine();
line4 = SR.ReadLine();
if (line2 == null || line3 == null || line4 ==null) {
throw new IOException("FASTQ entry did not have all four lines. Problem Entry starts with: " + line);
}
FastQRead fqr = new FastQRead(line, line2, line3, line4);
toReturn.Add(fqr);
if (toReturn.Count >= SizeToGrab) break;
}
FireLogEvent("Successfully grabbed " + toReturn.Count.ToString() + " reads.");
SR.Close();
return toReturn;
}
catch (Exception thrown)
{
FireLogEvent("Error: Could not parse the start of a FASTQ file to obtain statistics");
throw thrown;
}
}
public IEnumerable<StreamReader> GetStreamReaderForSequences(int SizeToReturn = 75000)
{
Encoding ENC = Encoding.Unicode;
Encoding AscEnc = Encoding.ASCII;
foreach (string FileName in this.FastQNames)
{
FireLogEvent("Parsing: " + FileName);
//This should be streamlined so it only happens once, kinda slow now
StreamReader SR = GetFileStreamReader(FileName);//
LeadLength = SR.ReadLine().Length + 1;
ReadLength = SR.ReadLine().Length;
totalLength = SR.ReadLine().Length + 1 + SR.ReadLine().Length + 1 + LeadLength + ReadLength + 1;
MaxReadSize = totalLength * 2;
SR.Close();
int MinLength = MaxReadSize * 200;
if (SizeToReturn < MinLength)
{
SizeToReturn = MinLength;
}
int SizeToRead = SizeToReturn - MaxReadSize*16;//Lot bigger to give room on the end
SR = GetFileStreamReader(FileName);// StreamReader(FileName, AscEnc, true, SizeToReturn);
char[] ReadsToReturn = new char[SizeToReturn];
while (SR.Peek() != -1)
{
int ReturnedCount = SR.Read(ReadsToReturn, 0, SizeToRead);
int CurrentPos = ReturnedCount;
//Bullshit because of use of '+' and '@' as QC quality score characters
//Here is the plan, going to add 8 more lines, figure out where I am
//based on that, then add an appropriate number of additional lines
//Position verified by '@' plus a '+' line a couple of lines after
if (ReturnedCount == SizeToRead)
{
List<string> toAdd=new List<string>();
//We are starting in the middle of a line so need to finish that off
string line = SR.ReadLine();
//are we not miraculously at the end?
if (line != null)
{
toAdd.Add(line);
for (int i = 0; i < 8; i++)
{
line = SR.ReadLine();
if (line == null) break;
//Just in case we land at "\n"
//if (line == "") { line = SR.ReadLine(); }
if (line == null) break;
toAdd.Add(line);
}
if (toAdd.Count == 9)
{
//Find out where I am
int ReadStart = -9;
for (int i = 0; i < 4; i++)
{
//Already need to add the first, line so offset by one
if (toAdd[i+1][0] == '@' && toAdd[(i +1)+ 2][0] == '+')
{
ReadStart = i;
break;
}
}
for (int i = 0; i < ReadStart; i++)
{
toAdd.Add(SR.ReadLine());
}
}
foreach (string str in toAdd)
{
char[] toCopy = (str + "\n").ToArray();
Array.Copy(toCopy, 0, ReadsToReturn, CurrentPos, toCopy.Length);
CurrentPos += toCopy.Length;
}
}
}
byte[] arr = ENC.GetBytes(ReadsToReturn);
Array.Resize (ref arr, CurrentPos*2);
MemoryStream MS = new MemoryStream(arr);
arr = null;
StreamReader SR2 = new StreamReader(MS, ENC, false);
Array.Clear(ReadsToReturn, 0, ReadsToReturn.Length);
yield return SR2;
}
}
}
private StreamReader GetFileStreamReader(string fname)
{
// int BufferSize = 1 << 17;
// StreamReader(FileName, Encoding.UTF8, false, BufferSize);
if(Helper.FileEndsWithZippedExtension(fname))
{
GZipStream gz = new GZipStream((new FileInfo(fname)).OpenRead(), CompressionMode.Decompress);
return new StreamReader(gz);
}
else
{
return new StreamReader(fname);
}
}
/// <summary>
/// Takes a StreamReader on a memory stream containing the raw unicode data for a portion of a FASTQ
/// file and converts them all to FASTQ read classes. The idea is that this conversion can be done
/// by multiple threads while the main thread reads off the disk and creates the streams.
/// </summary>
/// <returns>A collection of FASTQ Reads</returns>
public static IEnumerable<FastQRead> GetFastQReadsFromStream(StreamReader FastQPortionStream)
{
List<FastQRead> toReturn = new List<FastQRead>();
string line;
while ((line = FastQPortionStream.ReadLine()) != null)
{
///The array might be too big
//if (line.StartsWith('\0')) { break; } //This is always true in MONO
if (line[0]=='\0') break;
string line2, line3, line4;
line2 = FastQPortionStream.ReadLine();
//cheap way to skip lines
line3 = FastQPortionStream.ReadLine();
line4 = FastQPortionStream.ReadLine();
if (line2 == null || line3 == null || line4 == null) {
throw new IOException("FASTQ entry did not have all four lines. Problem Entry starts with: " + line);
}
FastQRead fqr = new FastQRead(line, line2, line3, line4);
yield return fqr;
//toReturn.Add(fqr);
}
FastQPortionStream.BaseStream.Dispose();
FastQPortionStream.Close();
//return toReturn;
}
}
/// <summary>
/// A FastQRead
/// </summary>
public class FastQRead
{
public readonly string Sequence,id;
public readonly sbyte[] QCscores;
double pAvgQuality=-999;
public double AvgQuality
{
get
{
if (pAvgQuality == -999)
{
//pAvgQuality = QCscores.Sum()/(double) QCscores.Length;
//pAvgQuality = ((double) QCscores.Sum())/(double) QCscores.Length;
//pAvgQuality = QCscores.Sum(x=>(double)x) / (double)QCscores.Length;
int t = 0;
for (int j = 0; j < QCscores.Length; j++)
t += QCscores[j];
pAvgQuality = t/(double) QCscores.Length;
}
return pAvgQuality;
}
}
private float pPercN = -999;
public float PercN
{
get
{
if (pPercN == -999) CalcPercentageN();
return pPercN;
}
}
private void CalcPercentageN()
{
float count=0;
foreach (char c in Sequence)
{
if (c == 'N') count = count + 1;
}
pPercN = count / (float)Sequence.Length;
}
public FastQRead(string line1, string line2, string line3, string line4)
{
//First to validate lines
if (line1[0]!='@' || line3[0]!='+')
{
string ExceptionMessage = "FastQ Lines Formatted Poorly, missing @ or + symbol in correct spot\nData From File Below\n";
ExceptionMessage += line1 + "\n" + line2 + "\n" + line3 + "\n" + line4;
throw new IOException(ExceptionMessage);
}
//QC line same length as sequence line
if(line2.Length!=line4.Length)
{
//verify this isn't some dumb string issue, I should really just resize that array before handing it off
int u = line2.Length;
int k = line4.Length;
string ExceptionMessage = "FastQ QC line length ("+k.ToString()+") does not equal to sequence length("+u.ToString()+")";
ExceptionMessage += line1 + "\n" + line2 + "\n" + line3 + "\n" + line4;
throw new IOException(ExceptionMessage);
}
this.Sequence = line2;
this.id = line1;
//Now to get QC Score, I believe this is a phred score obtained by subtracting 64 from the ASCII character
QCscores = new sbyte[line2.Length];
//QCscores=new int[line2.Length];
int i=0;
int dif = 32;//Amount added to each Illumina ASCII character to convert from ASCII to phred.
int sum = 0;
foreach(char c in line4)
{
//QCscores[i] = (c - dif);
int tmp = (c - dif);
sum += tmp;
QCscores[i] = (sbyte) tmp;
//QCscores[i] = (sbyte)(c - dif);
i++;
}
pAvgQuality = sum/(double) QCscores.Length;
}
public override string ToString()
{
return this.id +"\n"+ this.Sequence+"\n";
}
}
}
| b0481357fcf414ad72d5b50dfedc5363b0e7a49e | [
"Markdown",
"C#",
"Python",
"Text",
"Shell"
] | 22 | C# | evolvedmicrobe/FreqSeq | 6d953139181497ab483ae3e6d46db091647bf128 | b0e885e89431cc118866c5284b8197080c5bbcd8 |
refs/heads/master | <file_sep>// Commander X16 Emulator
// Copyright (c) 2019 <NAME>
// All rights reserved. License: 2-clause BSD
#include <stdio.h>
#include <stdbool.h>
#include "ps2.h"
#define PS2_BUFFER_SIZE 32
struct
{
uint8_t data[PS2_BUFFER_SIZE];
uint8_t read;
uint8_t write;
} ps2_buffer[2];
void
ps2_buffer_add(int i, uint8_t byte)
{
if ((ps2_buffer[i].write + 1) % PS2_BUFFER_SIZE == ps2_buffer[i].read) {
// buffer full
return;
}
ps2_buffer[i].data[ps2_buffer[i].write] = byte;
ps2_buffer[i].write = (ps2_buffer[i].write + 1) % PS2_BUFFER_SIZE;
}
uint8_t
ps2_buffer_remove(int i)
{
if (ps2_buffer[i].read == ps2_buffer[i].write) {
return 0; // empty
} else {
uint8_t byte = ps2_buffer[i].data[ps2_buffer[i].read];
ps2_buffer[i].read = (ps2_buffer[i].read + 1) % PS2_BUFFER_SIZE;
return byte;
}
}
static bool sending = false;
static bool has_byte = false;
static uint8_t current_byte;
static int bit_index = 0;
static int data_bits;
static int send_state = 0;
#define HOLD 25 * 8 /* 25 x ~3 cycles at 8 MHz = 75µs */
ps2_port_t ps2_port[2];
void
ps2_step(int i)
{
if (!ps2_port[i].clk_in && ps2_port[i].data_in) { // communication inhibited
ps2_port[i].clk_out = 0;
ps2_port[i].data_out = 0;
sending = false;
// printf("PS2: STATE: communication inhibited.\n");
return;
} else if (ps2_port[i].clk_in && ps2_port[i].data_in) { // idle state
// printf("PS2: STATE: idle\n");
if (!sending) {
// get next byte
if (!has_byte) {
current_byte = ps2_buffer_remove(i);
if (!current_byte) {
// we have nothing to send
ps2_port[i].clk_out = 1;
ps2_port[i].data_out = 0;
// printf("PS2: nothing to send.\n");
return;
}
// printf("PS2: current_byte: %x\n", current_byte);
has_byte = true;
}
data_bits = current_byte << 1 | (1 - __builtin_parity(current_byte)) << 9 | (1 << 10);
// printf("PS2: data_bits: %x\n", data_bits);
bit_index = 0;
send_state = 0;
sending = true;
}
if (send_state <= HOLD) {
ps2_port[i].clk_out = 0; // data ready
ps2_port[i].data_out = data_bits & 1;
// printf("PS2: [%d]sending #%d: %x\n", send_state, bit_index, data_bits & 1);
if (send_state == 0 && bit_index == 10) {
// we have sent the last bit, if the host
// inhibits now, we'll send the next byte
has_byte = false;
}
if (send_state == HOLD) {
data_bits >>= 1;
bit_index++;
}
send_state++;
} else if (send_state <= 2 * HOLD) {
// printf("PS2: [%d]not ready\n", send_state);
ps2_port[i].clk_out = 1; // not ready
ps2_port[i].data_out = 0;
if (send_state == 2 * HOLD) {
// printf("XXX bit_index: %d\n", bit_index);
if (bit_index < 11) {
send_state = 0;
} else {
sending = false;
}
}
if (send_state) {
send_state++;
}
}
} else {
// printf("PS2: Warning: unknown PS/2 bus state: CLK_IN=%d, DATA_IN=%d\n", ps2_port[i].clk_in, ps2_port[i].data_in);
ps2_port[i].clk_out = 0;
ps2_port[i].data_out = 0;
}
}
// fake mouse
static uint8_t buttons;
static uint16_t mouse_x = 0;
static uint16_t mouse_y = 0;
void
mouse_button_down(int num)
{
buttons |= 1 << num;
}
void
mouse_button_up(int num)
{
buttons &= (1 << num) ^ 0xff;
}
void
mouse_move(int x, int y)
{
mouse_x = x;
mouse_y = y;
}
uint8_t
mouse_read(uint8_t reg)
{
switch (reg) {
case 0:
return mouse_x & 0xff;
case 1:
return mouse_x >> 8;
case 2:
return mouse_y & 0xff;
case 3:
return mouse_y >> 8;
case 4:
return buttons;
default:
return 0xff;
}
}
| 1f0ed425dc204d7f5ab2cae4c5949f2fe270f37d | [
"C"
] | 1 | C | RUA71/x16-emulator | 91625a5bb57bb12cb087e9287e93971ed8ef4f06 | c838cee274203215c54c633205da32280b54c726 |
refs/heads/master | <repo_name>zigorrom/PyIV<file_sep>/backend.py
import numpy as np
import time
from PyQt4 import QtCore
class TimetraceMeasurement(QtCore.QThread):
TimetraceStarted = QtCore.pyqtSignal()
TimetraceStopped = QtCore.pyqtSignal()
def __init__(self, data_storage, parent = None):
super().__init__(parent)
self.data_storage = data_storage
self.alive = False
self.process = None
def stop(self):
## self.process_stop()
self.alive = False
self.wait()
def setup(self):
print("setup")
def process_start(self):
print("process strart")
def process_stop(self):
print("process stop")
def run(self):
self.process_start()
self.alive = True
self.TimetraceStarted.emit()
counter = 0.0
length = 50000
data = {}
while True:
if not self.alive:
break
print("count {0}".format(counter))
cpl = counter + length
data = {"t":list(np.arange(counter, cpl, dtype = float)),
"id":list(np.random.rand(length)),
"ig":list(np.random.rand(length)),
"vd":list(np.random.rand(length)),
"vg":list(np.random.rand(length))}
self.data_storage.update(data)
counter = cpl
time.sleep(0.1)
self.process_stop()
self.alive = False
self.TimetraceStopped.emit()
<file_sep>/pyiv.py
import PyCmdMessenger
arduino = PyCmdMessenger.ArduinoBoard("COM26",baud_rate = 115200,timeout=10.0)
commands = [["Watchdog","s"],
["Acknowledge","s"],
["SwitchChannel","i?"],#"i?"],
["Error","s"],
["MotorCommand","ii"]]#"ii"]]
c = PyCmdMessenger.CmdMessenger(arduino,commands)
print("start sending")
c.send("SwitchChannel",5,False)
msg = c.receive()
print(msg)
print("start sending")
c.send("MotorCommand",1,250)
msg = c.receive()
print(msg)
##c.send("kSwitchChannel",1,True)
##msg = c.receive()
##print(msg)
<file_sep>/plot.py
import collections, math
from PyQt4 import QtCore
import pyqtgraph as pg
# Basic PyQtGraph settings
pg.setConfigOptions(antialias=True)
class IVplotWidget:
def __init__(self,layout):
if not isinstance(layout, pg.GraphicsLayoutWidget):
raise ValueError("layout must be instance of pyqtgraph.GraphicsLayoutWidget")
self.layout = layout
class TimetraceIVplotWidget:
def __init__(self, layout, x_axis = "t", ly_axis = "id",ry_axis ="vd"):
if not isinstance(layout, pg.GraphicsLayoutWidget):
raise ValueError("layout must be instance of pyqtgraph.GraphicsLayoutWidget")
self.layout = layout
self.x_axis = x_axis
self.ly_axis = ly_axis
self.ry_axis = ry_axis
## self.max_points = 1000
self.current_curve = None
self.current_color = pg.mkColor("y")
self.voltage_curve = None
self.voltage_color = pg.mkColor("g")
#for plot resizing
self.p1 = None
self.p2 = None
## print("Attr names: {0},{1},{2}".format(self.x_axis,self.ly_axis,self.ry_axis))
self.create_plot()
def create_plot(self):
self.posLabel = self.layout.addLabel(row=0, col=0, justify="right")
self.plot = self.layout.addPlot(row=1,col =0)
print(self.plot)
## self.plot.showGrid(x=True, y=True)
p1 = self.plot
p1.setLabel("left","Current",units="A")
p1.setLabel("bottom",'Time',units="s")
p2 = pg.ViewBox()
p1.showAxis('right')
p1.scene().addItem(p2)
p1.getAxis('right').linkToView(p2)
p2.setXLink(p1)
p1.getAxis('right').setLabel("Voltage")
## p1.setYRange(0.1,0.2)
## p2.setYRange(0.1,0.2)
self.current_curve = p1.plot(pen=self.current_color)
self.current_curve.setZValue(900)
self.voltage_curve = pg.PlotCurveItem(pen=self.voltage_color)
self.voltage_curve.setZValue(800)
p2.addItem(self.voltage_curve)
# for resizong handling
self.p1 = p1
self.p2 = p2
self.p1.vb.sigResized.connect(self.updateViews)
def updateViews(self):
self.p2.setGeometry(self.p1.vb.sceneBoundingRect())
def clear_plot(self):
self.curve.clear()
def update_plot(self,data_storage,force = False):
try:
time = data_storage.data[self.x_axis]
current =data_storage.data[self.ly_axis]
voltage = data_storage.data[self.ry_axis]
if time and current and voltage:
self.current_curve.setData(time,current)
self.voltage_curve.setData(time,voltage)
print("updating plot")
except Exception as e:
print(str(e))
## print("Error attribute not found: {0},{1},{2}".format(self.x_axis,self.ly_axis,self.ry_axis))
<file_sep>/data.py
import time, sys
from PyQt4 import QtCore
import numpy as np
class HistoryBuffer:
"""Fixed-size NumPy array ring buffer"""
def __init__(self, data_size, max_history_size, dtype=float):
self.data_size = data_size
self.max_history_size = max_history_size
self.history_size = 0
self.counter = 0
self.buffer = np.empty(shape=(max_history_size, data_size), dtype=dtype)
def append(self, data):
"""Append new data to ring buffer"""
self.counter += 1
if self.history_size < self.max_history_size:
self.history_size += 1
self.buffer = np.roll(self.buffer, -1, axis=0)
self.buffer[-1] = data
def get_buffer(self):
"""Return buffer stripped to size of actual data"""
if self.history_size < self.max_history_size:
return self.buffer[-self.history_size:]
else:
return self.buffer
def __getitem__(self, key):
return self.buffer[key]
class TaskSignals(QtCore.QObject):
"""Task signals emitter"""
result = QtCore.pyqtSignal(object)
class Task(QtCore.QRunnable):
"""Threaded task (run it with QThreadPool worker threads)"""
def __init__(self, task, *args, **kwargs):
super().__init__()
self.task = task
self.args = args
self.kwargs = kwargs
self.signals = TaskSignals()
def run(self):
"""Run task in worker thread and emit signal with result"""
#print('Running', self.task, 'in thread', QtCore.QThread.currentThreadId())
result = self.task(*self.args, **self.kwargs)
self.signals.result.emit(result)
class TimetraceDataStorage(QtCore.QObject):
history_updated = QtCore.pyqtSignal(object)
data_updated = QtCore.pyqtSignal(object)
data_recalculated = QtCore.pyqtSignal(object)
average_updated = QtCore.pyqtSignal(object)
def __init__(self, max_history_size=100, parent=None):
super().__init__(parent)
self.max_history_size = max_history_size
self.data = {}
# Use only one worker thread because it is not faster
# with more threads (and memory consumption is much higher)
self.threadpool = QtCore.QThreadPool()
self.threadpool.setMaxThreadCount(1)
self.reset()
def reset(self):
"""Reset all data"""
self.wait()
data = {}
self.t = None
self.history = None
self.reset_data()
def reset_data(self):
"""Reset current data"""
self.wait()
self.id = None
self.ig = None
self.vd = None
self.vg = None
self.average_counter = 0
self.average = None
## def start_recording(self, working_folder, exp_name):
## pass
## self.description_file = open("\\".join([working_folder,exp_name+".dat"])
##
## def stop_recording(self):
## pass
##
def start_task(self, fn, *args, **kwargs):
"""Run function asynchronously in worker thread"""
task = Task(fn, *args, **kwargs)
self.threadpool.start(task)
def wait(self):
"""Wait for worker threads to complete all running tasks"""
self.threadpool.waitForDone()
def update(self, data):
"""Update data storage"""
self.average_counter += 1
if self.t is None:
self.t = data["t"]
self.start_task(self.update_history, data.copy())
self.start_task(self.update_data, data)
def update_data(self, data):
"""Update main spectrum data (and possibly apply smoothing)"""
##
self.data = data
self.id = data["id"]
self.ig = data["ig"]
self.vd = data["vd"]
self.vg = data["vg"]
self.data_updated.emit(self)
## self.start_task(self.update_average, data)
def update_history(self, data):
"""Update spectrum measurements history"""
pass
## if self.history is None:
## self.history = HistoryBuffer(len(data["y"]), self.max_history_size)
##
## self.history.append(data["y"])
## self.history_updated.emit(self)
def update_average(self, data):
"""Update average data"""
pass
## if self.average is None:
## self.average = data["y"].copy()
## else:
## self.average = np.average((self.average, data["y"]), axis=0, weights=(self.average_counter - 1, 1))
## self.average_updated.emit(self)
<file_sep>/iv_measurement.py
import visa
import keithley24xx
class IVmeasurement:
def __init__(self,data_storage):
self.data_storage = data_storage
def initialize(gateInstr_resource, drainInstr_resource):
self.gateInstr = Keithley24XX(gateInstr_resource)
self.drainInstr = Keithley24XX(drainInstr_resource)
def configure(self, config):
pass
def run(self):
pass
<file_sep>/__main__.py
import sys, signal, time
from PyQt4 import QtCore, QtGui
from plot import TimetraceIVplotWidget
from backend import TimetraceMeasurement
from data import TimetraceDataStorage
from timetrace_view import Ui_TimetraceView
# Allow CTRL+C and/or SIGTERM to kill us (PyQt blocks it otherwise)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
class TimetraceMainWindow(QtGui.QMainWindow, Ui_TimetraceView):
def __init__(self,parent=None):
super().__init__(parent)
self.setupUi(self)
self.DrainTimetracePlotWidget = TimetraceIVplotWidget(self.drain_current_plot,ly_axis = "id",ry_axis = "vd")
self.GateTimetracePlotWidget = TimetraceIVplotWidget(self.gate_current_plot,ly_axis = "ig",ry_axis = "vg")
self.DrainTimetracePlotWidget.plot.setXLink(self.GateTimetracePlotWidget.plot)
self.data_storage = None
self.timetrace_thread= None
self.prev_timestamp = None
self.setup_timetrace_measurement()
self.counter = 0
self.update_buttons()
self.load_settings()
self.show()
def setup_timetrace_measurement(self):
if self.timetrace_thread:
self.stop()
settings = QtCore.QSettings()
self.data_storage = TimetraceDataStorage(max_history_size=settings.value("timetrace_history_size", 100, int))
self.data_storage.data_updated.connect(self.update_data)
self.data_storage.data_updated.connect(self.DrainTimetracePlotWidget.update_plot)
## self.data_storage.data_updated.connect(self.GateTimetracePlotWidget.update_plot)
self.timetrace_thread = TimetraceMeasurement(self.data_storage)
self.timetrace_thread.TimetraceStarted.connect(self.update_buttons)
self.timetrace_thread.TimetraceStopped.connect(self.update_buttons)
def save_settings(self):
"""Save spectrum analyzer settings and window geometry"""
settings = QtCore.QSettings()
settings.setValue("ds_voltage", self.dsVoltageSet.value())
settings.setValue("gs_voltage", self.gsVoltageSet.value())
settings.setValue("pulsed_gate", int(self.pulsedVoltageCheckBox.isChecked()))
settings.setValue("pulse_width", self.pulseWidth.value())
settings.setValue("pulse_delay", self.pulseDelay.value())
settings.setValue("pulse_count", self.pulseCount.value())
settings.setValue("gate_cutoff", self.gateIcutoff.value())
settings.setValue("gate_cutoff_units", self.gateIcutoffUnits.currentIndex())
# Save window state and geometry
settings.setValue("window_geometry", self.saveGeometry())
settings.setValue("window_state", self.saveState())
settings.setValue("plotsplitter_state", self.plotSplitter.saveState())
def load_settings(self):
"""Restore spectrum analyzer settings and window geometry"""
settings = QtCore.QSettings()
self.dsVoltageSet.setValue(settings.value("ds_voltage", 0, float))
self.gsVoltageSet.setValue(settings.value("gs_voltage", 0, float))
self.pulsedVoltageCheckBox.setChecked(settings.value("pulsed_gate",0 , int))
self.setPulseParamsState(self.pulsedVoltageCheckBox.isChecked())
self.pulseWidth.setValue(settings.value("pulse_width", 0, float))
self.pulseDelay.setValue(settings.value("pulse_delay", 0, float))
self.pulseCount.setValue(settings.value("pulse_count", 0, int))
self.gateIcutoff.setValue(settings.value("gate_cutoff", 0, float))
self.gateIcutoffUnits.setCurrentIndex(settings.value("gate_cutoff_units", 0, int))
# Restore window state
if settings.value("window_state"):
self.restoreState(settings.value("window_state"))
if settings.value("plotsplitter_state"):
self.plotSplitter.restoreState(settings.value("plotsplitter_state"))
## # Migration from older version of config file
## if settings.value("config_version", 1, int) < 2:
## # Make tabs from docks when started for first time
## self.tabifyDockWidget(self.settingsDockWidget, self.levelsDockWidget)
## self.settingsDockWidget.raise_()
## self.set_dock_size(self.controlsDockWidget, 0, 0)
## self.set_dock_size(self.frequencyDockWidget, 0, 0)
## # Update config version
## settings.setValue("config_version", 2)
# Window geometry has to be restored only after show(), because initial
# maximization doesn't work otherwise (at least not in some window managers on X11)
if settings.value("window_geometry"):
self.restoreGeometry(settings.value("window_geometry"))
def show_status(self,message, timeout = 2000):
self.statusbar.showMessage(message,timeout)
def start(self):
self.prev_timestamp = time.time()
if not self.timetrace_thread.alive:
self.timetrace_thread.start()
self.show_status("started")
def stop(self):
if self.timetrace_thread.alive:
self.timetrace_thread.stop()
self.show_status("stopped")
def update_buttons(self):
"""Update state of control buttons"""
self.StartButton.setEnabled(not self.timetrace_thread.alive)
## self.singleShotButton.setEnabled(not self.rtl_power_thread.alive)
self.StopButton.setEnabled(self.timetrace_thread.alive)
def update_data(self):
self.counter += 1
if self.counter>50:
self.on_StopButton_clicked()
timestamp = time.time()
sweep_time = timestamp - self.prev_timestamp
self.prev_timestamp = timestamp
fps = 0
try:
fps= 1/sweep_time
except ZeroDivisionError:
pass
finally:
self.show_status("sweep time: {:10.5f}; FPS{:10.5f};".format(sweep_time,fps))
@QtCore.pyqtSlot()
def on_StartButton_clicked(self):
print("start pressed\n")
self.start()
@QtCore.pyqtSlot()
def on_StopButton_clicked(self):
print("stop pressed\n")
self.stop()
@QtCore.pyqtSlot()
def on_pulsButton_clicked(self):
print("pulse\n")
def setPulseParamsState(self,state):
self.pulseWidth.setEnabled(state)
self.pulseDelay.setEnabled(state)
self.pulseCount.setEnabled(state)
self.pulsButton.setEnabled(state)
@QtCore.pyqtSlot(bool)
def on_pulsedVoltageCheckBox_toggled(self,checked):
print("pulse checked: {0}".format(checked))
self.setPulseParamsState(checked)
@QtCore.pyqtSlot(float)
def on_gateIcutoff_valueChanged(self,value):
print("gate {0}".format(value))
@QtCore.pyqtSlot(float)
def on_dsVoltageSet_valueChanged(self,value):
print("ds changed")
@QtCore.pyqtSlot(float)
def on_gsVoltageSet_valueChanged(self,value):
print("gs changed")
def closeEvent(self, event):
"""Save settings when main window is closed"""
self.stop()
self.save_settings()
print("close event")
def main():
app = QtGui.QApplication(sys.argv)
app.setOrganizationName("TimetraceMeasurementModule")
app.setOrganizationDomain("fz.juelich.de")
app.setApplicationName("TimetraceMeasurementModule")
window = TimetraceMainWindow()
sys.exit(app.exec_())
if __name__== "__main__":
main()
<file_sep>/py_pulse.py
import visa
import time
class Keithley24XX:
def __init__(self,resource):
rm = visa.ResourceManager()
self.instrument = rm.open_resource(resource)
##################################################################################
##
## SET FUNCTION SHAPE
##
FUNCTION_SHAPES = ['DC', 'PULS']
DC_SHAPE,PULSE_SHAPE=FUNCTION_SHAPES
def SetFunctionShape(self,shape):
if shape in self.FUNCTION_SHAPES:
self.instrument.write("SOUR:FUNC:SHAP {0}".format(shape))
def SetDC(self):
self.SetFunctionShape(self.DC_SHAPE)
def SetPulse(self):
self.SetFunctionShape(self.PULSE_SHAPE)
##
## END SET FUNCTION SHAPE
##
##################################################################################
##
## SET SOURCE FUNCTION
##
SOURCE_FUNCTIONS = ['VOLT','CURR']
VOLT_SOURCE_FUNCTION, CURR_SOURCE_FUNCTION = SOURCE_FUNCTIONS
def SetSourceFunction(self,func):
if func in self.SOURCE_FUNCTIONS:
self.instrument.write("SOUR:FUNC {0}".format(func))
def SetVoltageSourceFunction(self):
self.SetSourceFunction(self.VOLT_SOURCE_FUNCTION)
def SetCurrentSourceFunction(self):
self.SetSourceFunction(self.CURR_SOURCE_FUNCTION)
##
## END SET SOURCE FUNCTION
##
##################################################################################
##
## SET SOURCING MODE
##
SOURSING_MODES = ['FIX','LIST','SWE']
FIXED_SOURCING_MODE, lIST_SOURCING_MODE,SWEEP_SOURCING_MODE = SOURSING_MODES
def SetSourceMode(self,func, mode):
if (mode in self.SOURSING_MODES) and (func in self.SOURCE_FUNCTIONS):
self.instrument.write("SOUR:{f}:MODE {m}".format(f=func,m=mode))
def SetFixedVoltageSourceMode(self):
self.SetSourcingMode(VOLT_SOURCE_FUNCTION,FIXED_SOURCING_MODE)
def SetFixedCurrentSourceMode(self):
self.SetSourcingMode(CURR_SOURCE_FUNCTION,FIXED_SOURCING_MODE)
def SetListVoltageSourceMode(self):
self.SetSourcingMode(VOLT_SOURCE_FUNCTION,lIST_SOURCING_MODE)
def SetLisrCurrentSourceMode(self):
self.SetSourcingMode(CURR_SOURCE_FUNCTION,lIST_SOURCING_MODE)
def SetSweepVoltageSourceMode(self):
self.SetSourcingMode(VOLT_SOURCE_FUNCTION,SWEEP_SOURCING_MODE)
def SetSweepCurrentSourceMode(self):
self.SetSourcingMode(CURR_SOURCE_FUNCTION,SWEEP_SOURCING_MODE)
##
## END SET SOURCING MODE
##
##################################################################################
##
## SET SOURCING RANGE
##
DEFAULT_RANGES = ['DEF','MIN','MAX','UP','DOWN']
DEFAULT_RANGE,MIN_RANGE,MAX_RANGE,UP_RANGE,DOWN_RANGE = DEFAULT_RANGES
AUTO_RANGE_STATES = AUTO_RANGE_ON, AUTO_RANGE_OFF = ['ON','OFF']
ALL_VOLTAGE_RANGES = ['200E-3','2','20','100']
VOLT_RANGE_200mV,VOLT_RANGE_2V,VOLT_RANGE_20V,VOLT_RANGE_100V = ALL_VOLTAGE_RANGES
ALL_CURRENT_RANGES = ['10E-6','100E-6','1E-3','10E-3','100E-3','1']
CURR_RANGE_10uA,CURR_RANGE_100uA,CURR_RANGE_1mA,CURR_RANGE_10mA,CURR_RANGE_100mA,CURR_RANGE_1A = ALL_CURRENT_RANGES
def SetSourceRange(self,func,range):
if func in self.SOURCE_FUNCTIONS:
if(range in self.DEFAULT_RANGES) or (range in self.ALL_VOLTAGE_RANGES) or (range in self.ALL_CURRENT_RANGES):
self.instrument.write("SOUR:{f}:RANG {r}".format(f=func,r=range))
def SetVoltageSourceRange(self,range):
if range in self.ALL_VOLTAGE_RANGES:
self.SetSourceRange(self.VOLT_SOURCE_FUNCTION,range)
def SetCurrentSourceRange(self,range):
if range in self.ALL_CURRENT_RANGES:
self.SetSourceRange(self.CURR_SOURCE_FUNCTION,range)
def SetAutoRange(self,func, state):
if func in self.SOURCE_FUNCTIONS:
if state in self.AUTO_RANGE_STATES:
self.instrument.write("SOUR:{f}:RANG:AUTO {s}".format(f = func,s = state))
##
## END SET SOURCING RANGE
##
##################################################################################
##
## SET SOURCING RANGE
##
##
## END SET SOURCING RANGE
##
MIN_PULSE_WIDTH = 0.00015
MAX_PULSE_WIDTH = 0.005
def SetPulseWidth(self,seconds):
if seconds<self.MIN_PULSE_WIDTH:
seconds = self.MIN_PULSE_WIDTH
elif seconds >self.MAX_PULSE_WIDTH :
seconds = self.MAX_PULSE_WIDTH
self.instrument.write("SOUR:PULS:WIDT {0}".format(seconds))
MIN_DELAY = 0
MAX_DELAY = 9999.99872
def SetPulseDelay(self,delay):
if delay<self.MIN_DELAY:
delay = self.MIN_DELAY
elif delay > self.MAX_DELAY:
delay = self.MAX_DELAY
self.instrument.write("SOUR:PULS:WIDT {0}".format(delay))
## implement fixed sourcing mode
## implement range
def SetVoltageAmplitude(self,voltage):
self.instrument.write("SOUR:VOLT:LEV {0}".format(voltage))
def SetCurrentAmplitude(self,current):
self.instrument.write("SOUR:CURR:LEV {0}".format(current))
def OutputOff(self):
self.instrument.write("OUTP:STAT OFF")
if self.instrument.ask("OUTP:STAT?") == 'OFF':
return True
else:
return False
def StartOutput(self):
self.instrument.write(":INIT")
def OutputOn(self):
self.instrument.write("OUTP:STAT ON")
if self.instrument.ask("OUTP:STAT?") == 'ON':
return True
else:
return False
def StartOutputAndRead(self):
return self.instrument.ask(":READ?")
def DisablePulseMeasurements(self):
self.instrument.write(":SENSe:FUNCtion:OFF:ALL")
MIN_TRIG_COUNT = 1
MAX_TRIG_COUNT = 2500
def SetTriggerCount(self, count):
if count<self.MIN_TRIG_COUNT:
count = self.MIN_TRIG_COUNT
elif count> self.MAX_TRIG_COUNT:
count = self.MAX_TRIG_COUNT
self.instrument.write(":TRIG:COUN {0}".format(count))
def IDN(self):
if self.instrument:
return self.instrument.ask("*IDN?")
def Reset(self):
self.instrument.ask("*RST")
if __name__ == "__main__":
k = Keithley24XX('GPIB0::5::INSTR')
## k.Reset()
print(k.IDN())
k.SetCurrentSourceFunction()
time.sleep(1)
k.SetVoltageSourceFunction()
time.sleep(1)
k.SetPulse()
k.DisablePulseMeasurements()
k.SetPulseWidth(0.005)
k.SetTriggerCount(1)
time.sleep(1)
k.SetVoltageSourceRange(k.MIN_RANGE)
time.sleep(1)
k.SetAutoRange(k.VOLT_SOURCE_FUNCTION,k.AUTO_RANGE_ON)
print(k.AUTO_RANGE_ON)
## k.SetSourcingRange(k.VOLT_SOURCE_FUNCTION,k.VOLT_RANGE_200mV)
k.SetVoltageAmplitude(100)
time.sleep(1)
## k.StartOutput()
k.OutputOn()
## print(k.StartOutputAndRead())
time.sleep(2)
k.OutputOff()
k.SetDC()
time.sleep(1)
<file_sep>/timetrace_view.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'timetrace_view.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_TimetraceView(object):
def setupUi(self, TimetraceView):
TimetraceView.setObjectName(_fromUtf8("TimetraceView"))
TimetraceView.resize(950, 755)
self.centralwidget = QtGui.QWidget(TimetraceView)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.plotSplitter = QtGui.QSplitter(self.centralwidget)
self.plotSplitter.setOrientation(QtCore.Qt.Vertical)
self.plotSplitter.setObjectName(_fromUtf8("plotSplitter"))
self.drain_current_plot = GraphicsLayoutWidget(self.plotSplitter)
self.drain_current_plot.setObjectName(_fromUtf8("drain_current_plot"))
self.gate_current_plot = GraphicsLayoutWidget(self.plotSplitter)
self.gate_current_plot.setObjectName(_fromUtf8("gate_current_plot"))
self.horizontalLayout.addWidget(self.plotSplitter)
TimetraceView.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(TimetraceView)
self.menubar.setGeometry(QtCore.QRect(0, 0, 950, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuWindow = QtGui.QMenu(self.menubar)
self.menuWindow.setObjectName(_fromUtf8("menuWindow"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
TimetraceView.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(TimetraceView)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
TimetraceView.setStatusBar(self.statusbar)
self.voltagesDock = QtGui.QDockWidget(TimetraceView)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.voltagesDock.sizePolicy().hasHeightForWidth())
self.voltagesDock.setSizePolicy(sizePolicy)
self.voltagesDock.setMaximumSize(QtCore.QSize(300, 300))
self.voltagesDock.setObjectName(_fromUtf8("voltagesDock"))
self.dockWidgetContents_2 = QtGui.QWidget()
self.dockWidgetContents_2.setObjectName(_fromUtf8("dockWidgetContents_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.dockWidgetContents_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox = QtGui.QGroupBox(self.dockWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.dsVoltageSet = QtGui.QDoubleSpinBox(self.groupBox)
self.dsVoltageSet.setDecimals(3)
self.dsVoltageSet.setSingleStep(0.001)
self.dsVoltageSet.setObjectName(_fromUtf8("dsVoltageSet"))
self.gridLayout_2.addWidget(self.dsVoltageSet, 0, 1, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(self.dockWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.groupBox_2)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.pulseDelay = QtGui.QDoubleSpinBox(self.groupBox_2)
self.pulseDelay.setObjectName(_fromUtf8("pulseDelay"))
self.gridLayout.addWidget(self.pulseDelay, 3, 1, 1, 2)
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.pulsedVoltageCheckBox = QtGui.QCheckBox(self.groupBox_2)
self.pulsedVoltageCheckBox.setObjectName(_fromUtf8("pulsedVoltageCheckBox"))
self.gridLayout.addWidget(self.pulsedVoltageCheckBox, 1, 0, 1, 3)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.pulseWidth = QtGui.QDoubleSpinBox(self.groupBox_2)
self.pulseWidth.setObjectName(_fromUtf8("pulseWidth"))
self.gridLayout.addWidget(self.pulseWidth, 2, 1, 1, 2)
self.gsVoltageSet = QtGui.QDoubleSpinBox(self.groupBox_2)
self.gsVoltageSet.setDecimals(3)
self.gsVoltageSet.setSingleStep(0.001)
self.gsVoltageSet.setObjectName(_fromUtf8("gsVoltageSet"))
self.gridLayout.addWidget(self.gsVoltageSet, 0, 1, 1, 2)
self.pulseCount = QtGui.QSpinBox(self.groupBox_2)
self.pulseCount.setObjectName(_fromUtf8("pulseCount"))
self.gridLayout.addWidget(self.pulseCount, 4, 1, 1, 2)
self.pulsButton = QtGui.QPushButton(self.groupBox_2)
self.pulsButton.setObjectName(_fromUtf8("pulsButton"))
self.gridLayout.addWidget(self.pulsButton, 5, 0, 1, 3)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.voltagesDock.setWidget(self.dockWidgetContents_2)
TimetraceView.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.voltagesDock)
self.measuredDock = QtGui.QDockWidget(TimetraceView)
self.measuredDock.setMaximumSize(QtCore.QSize(524287, 200))
self.measuredDock.setObjectName(_fromUtf8("measuredDock"))
self.dockWidgetContents_3 = QtGui.QWidget()
self.dockWidgetContents_3.setObjectName(_fromUtf8("dockWidgetContents_3"))
self.gridLayout_3 = QtGui.QGridLayout(self.dockWidgetContents_3)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_7 = QtGui.QLabel(self.dockWidgetContents_3)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_3.addWidget(self.label_7, 1, 0, 1, 1)
self.label_6 = QtGui.QLabel(self.dockWidgetContents_3)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_3.addWidget(self.label_6, 0, 0, 1, 3)
self.label_9 = QtGui.QLabel(self.dockWidgetContents_3)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_3.addWidget(self.label_9, 3, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.dockWidgetContents_3)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_3.addWidget(self.label_8, 2, 0, 1, 1)
self.measureDrainV = QtGui.QLineEdit(self.dockWidgetContents_3)
self.measureDrainV.setObjectName(_fromUtf8("measureDrainV"))
self.gridLayout_3.addWidget(self.measureDrainV, 0, 3, 1, 1)
self.measuredDrainI = QtGui.QLineEdit(self.dockWidgetContents_3)
self.measuredDrainI.setObjectName(_fromUtf8("measuredDrainI"))
self.gridLayout_3.addWidget(self.measuredDrainI, 1, 3, 1, 1)
self.measureGateV = QtGui.QLineEdit(self.dockWidgetContents_3)
self.measureGateV.setObjectName(_fromUtf8("measureGateV"))
self.gridLayout_3.addWidget(self.measureGateV, 2, 3, 1, 1)
self.measuredGateI = QtGui.QLineEdit(self.dockWidgetContents_3)
self.measuredGateI.setObjectName(_fromUtf8("measuredGateI"))
self.gridLayout_3.addWidget(self.measuredGateI, 3, 3, 1, 1)
self.measuredDock.setWidget(self.dockWidgetContents_3)
TimetraceView.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.measuredDock)
self.settingsDock = QtGui.QDockWidget(TimetraceView)
self.settingsDock.setMaximumSize(QtCore.QSize(524287, 100))
self.settingsDock.setObjectName(_fromUtf8("settingsDock"))
self.dockWidgetContents_4 = QtGui.QWidget()
self.dockWidgetContents_4.setObjectName(_fromUtf8("dockWidgetContents_4"))
self.gridLayout_4 = QtGui.QGridLayout(self.dockWidgetContents_4)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_10 = QtGui.QLabel(self.dockWidgetContents_4)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_4.addWidget(self.label_10, 0, 0, 1, 1)
self.gateIcutoff = QtGui.QDoubleSpinBox(self.dockWidgetContents_4)
self.gateIcutoff.setObjectName(_fromUtf8("gateIcutoff"))
self.gridLayout_4.addWidget(self.gateIcutoff, 0, 1, 1, 1)
self.gateIcutoffUnits = QtGui.QComboBox(self.dockWidgetContents_4)
self.gateIcutoffUnits.setObjectName(_fromUtf8("gateIcutoffUnits"))
self.gateIcutoffUnits.addItem(_fromUtf8(""))
self.gateIcutoffUnits.addItem(_fromUtf8(""))
self.gateIcutoffUnits.addItem(_fromUtf8(""))
self.gateIcutoffUnits.addItem(_fromUtf8(""))
self.gateIcutoffUnits.addItem(_fromUtf8(""))
self.gridLayout_4.addWidget(self.gateIcutoffUnits, 0, 2, 1, 1)
self.settingsDock.setWidget(self.dockWidgetContents_4)
TimetraceView.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.settingsDock)
self.controlDock = QtGui.QDockWidget(TimetraceView)
self.controlDock.setMaximumSize(QtCore.QSize(524287, 200))
self.controlDock.setObjectName(_fromUtf8("controlDock"))
self.dockWidgetContents_6 = QtGui.QWidget()
self.dockWidgetContents_6.setObjectName(_fromUtf8("dockWidgetContents_6"))
self.gridLayout_5 = QtGui.QGridLayout(self.dockWidgetContents_6)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.StopButton = QtGui.QPushButton(self.dockWidgetContents_6)
self.StopButton.setObjectName(_fromUtf8("StopButton"))
self.gridLayout_5.addWidget(self.StopButton, 7, 1, 1, 1)
self.StartButton = QtGui.QPushButton(self.dockWidgetContents_6)
self.StartButton.setObjectName(_fromUtf8("StartButton"))
self.gridLayout_5.addWidget(self.StartButton, 7, 0, 1, 1)
self.experiment_name = QtGui.QLineEdit(self.dockWidgetContents_6)
self.experiment_name.setInputMask(_fromUtf8(""))
self.experiment_name.setDragEnabled(False)
self.experiment_name.setObjectName(_fromUtf8("experiment_name"))
self.gridLayout_5.addWidget(self.experiment_name, 2, 0, 1, 2)
self.label_11 = QtGui.QLabel(self.dockWidgetContents_6)
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_5.addWidget(self.label_11, 1, 0, 1, 2)
self.controlDock.setWidget(self.dockWidgetContents_6)
TimetraceView.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.controlDock)
self.actionWorkinFolder = QtGui.QAction(TimetraceView)
self.actionWorkinFolder.setObjectName(_fromUtf8("actionWorkinFolder"))
self.actionExit = QtGui.QAction(TimetraceView)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionRestore_windows = QtGui.QAction(TimetraceView)
self.actionRestore_windows.setObjectName(_fromUtf8("actionRestore_windows"))
self.actionAbout = QtGui.QAction(TimetraceView)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.menuFile.addAction(self.actionWorkinFolder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuWindow.addAction(self.actionRestore_windows)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuWindow.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(TimetraceView)
QtCore.QMetaObject.connectSlotsByName(TimetraceView)
def retranslateUi(self, TimetraceView):
TimetraceView.setWindowTitle(_translate("TimetraceView", "TMM (Timetrace Measurement Module)", None))
self.menuFile.setTitle(_translate("TimetraceView", "File", None))
self.menuWindow.setTitle(_translate("TimetraceView", "Window", None))
self.menuHelp.setTitle(_translate("TimetraceView", "Help", None))
self.voltagesDock.setWindowTitle(_translate("TimetraceView", "Voltages", None))
self.groupBox.setTitle(_translate("TimetraceView", "Drain-Source Voltage", None))
self.label.setText(_translate("TimetraceView", "DS_Voltage", None))
self.groupBox_2.setTitle(_translate("TimetraceView", "Gate Voltage", None))
self.label_3.setText(_translate("TimetraceView", "Pulse width", None))
self.label_4.setText(_translate("TimetraceView", "Pulse delay", None))
self.label_5.setText(_translate("TimetraceView", "Pulse count", None))
self.pulsedVoltageCheckBox.setText(_translate("TimetraceView", "Pulsed", None))
self.label_2.setText(_translate("TimetraceView", "GS_Voltage", None))
self.pulsButton.setText(_translate("TimetraceView", "Pulse", None))
self.measuredDock.setWindowTitle(_translate("TimetraceView", "Measured", None))
self.label_7.setText(_translate("TimetraceView", "Drain I", None))
self.label_6.setText(_translate("TimetraceView", "Drain V", None))
self.label_9.setText(_translate("TimetraceView", "Gate I", None))
self.label_8.setText(_translate("TimetraceView", "Gate V", None))
self.settingsDock.setWindowTitle(_translate("TimetraceView", "Settings", None))
self.label_10.setText(_translate("TimetraceView", "Gate Current Cutoff", None))
self.gateIcutoffUnits.setItemText(0, _translate("TimetraceView", "A", None))
self.gateIcutoffUnits.setItemText(1, _translate("TimetraceView", "mA", None))
self.gateIcutoffUnits.setItemText(2, _translate("TimetraceView", "uA", None))
self.gateIcutoffUnits.setItemText(3, _translate("TimetraceView", "nA", None))
self.gateIcutoffUnits.setItemText(4, _translate("TimetraceView", "pA", None))
self.controlDock.setWindowTitle(_translate("TimetraceView", "Controls", None))
self.StopButton.setText(_translate("TimetraceView", "Stop", None))
self.StartButton.setText(_translate("TimetraceView", "Start", None))
self.label_11.setText(_translate("TimetraceView", "Experimant name", None))
self.actionWorkinFolder.setText(_translate("TimetraceView", "WorkinFolder", None))
self.actionExit.setText(_translate("TimetraceView", "Exit", None))
self.actionRestore_windows.setText(_translate("TimetraceView", "Restore windows", None))
self.actionAbout.setText(_translate("TimetraceView", "About", None))
from pyqtgraph import GraphicsLayoutWidget
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
TimetraceView = QtGui.QMainWindow()
ui = Ui_TimetraceView()
ui.setupUi(TimetraceView)
TimetraceView.show()
sys.exit(app.exec_())
<file_sep>/keithley24xx.py
import visa
import time
class Keithley24XX:
def __init__(self,resource):
rm = visa.ResourceManager()
self.instrument = rm.open_resource(resource)
##################################################################################
##
## SET FUNCTION SHAPE
##
FUNCTION_SHAPES = ['DC', 'PULS']
DC_SHAPE,PULSE_SHAPE=FUNCTION_SHAPES
def SetFunctionShape(self,shape):
if shape in self.FUNCTION_SHAPES:
self.instrument.write("SOUR:FUNC:SHAP {0}".format(shape))
def SetDC(self):
self.SetFunctionShape(self.DC_SHAPE)
def SetPulse(self):
self.SetFunctionShape(self.PULSE_SHAPE)
##
## END SET FUNCTION SHAPE
##
##################################################################################
##
## SET SOURCE FUNCTION
##
SOURCE_FUNCTIONS = ['VOLT','CURR']
VOLT_SOURCE_FUNCTION, CURR_SOURCE_FUNCTION = SOURCE_FUNCTIONS
def SetSourceFunction(self,func):
if func in self.SOURCE_FUNCTIONS:
self.instrument.write("SOUR:FUNC {0}".format(func))
def SetVoltageSourceFunction(self):
self.SetSourceFunction(self.VOLT_SOURCE_FUNCTION)
def SetCurrentSourceFunction(self):
self.SetSourceFunction(self.CURR_SOURCE_FUNCTION)
##
## END SET SOURCE FUNCTION
##
##################################################################################
##
## SET SOURCING MODE
##
SOURSING_MODES = ['FIX','LIST','SWE']
FIXED_SOURCING_MODE, lIST_SOURCING_MODE,SWEEP_SOURCING_MODE = SOURSING_MODES
def SetSourceMode(self,func, mode):
if (mode in self.SOURSING_MODES) and (func in self.SOURCE_FUNCTIONS):
self.instrument.write("SOUR:{f}:MODE {m}".format(f=func,m=mode))
def SetFixedVoltageSourceMode(self):
self.SetSourcingMode(VOLT_SOURCE_FUNCTION,FIXED_SOURCING_MODE)
def SetFixedCurrentSourceMode(self):
self.SetSourcingMode(CURR_SOURCE_FUNCTION,FIXED_SOURCING_MODE)
def SetListVoltageSourceMode(self):
self.SetSourcingMode(VOLT_SOURCE_FUNCTION,lIST_SOURCING_MODE)
def SetLisrCurrentSourceMode(self):
self.SetSourcingMode(CURR_SOURCE_FUNCTION,lIST_SOURCING_MODE)
def SetSweepVoltageSourceMode(self):
self.SetSourcingMode(VOLT_SOURCE_FUNCTION,SWEEP_SOURCING_MODE)
def SetSweepCurrentSourceMode(self):
self.SetSourcingMode(CURR_SOURCE_FUNCTION,SWEEP_SOURCING_MODE)
##
## END SET SOURCING MODE
##
##################################################################################
##
## SET SOURCING RANGE
##
DEFAULT_RANGES = ['DEF','MIN','MAX','UP','DOWN']
DEFAULT_RANGE,MIN_RANGE,MAX_RANGE,UP_RANGE,DOWN_RANGE = DEFAULT_RANGES
SWITCH_STATES = STATE_ON, STATE_OFF = ['ON','OFF']
ALL_VOLTAGE_RANGES = ['200E-3','2','20','100']
VOLT_RANGE_200mV,VOLT_RANGE_2V,VOLT_RANGE_20V,VOLT_RANGE_100V = ALL_VOLTAGE_RANGES
ALL_CURRENT_RANGES = ['10E-6','100E-6','1E-3','10E-3','100E-3','1']
CURR_RANGE_10uA,CURR_RANGE_100uA,CURR_RANGE_1mA,CURR_RANGE_10mA,CURR_RANGE_100mA,CURR_RANGE_1A = ALL_CURRENT_RANGES
def SetSourceRange(self,func,rang):
if func in self.SOURCE_FUNCTIONS:
if(rang in self.DEFAULT_RANGES) or (rang in self.ALL_VOLTAGE_RANGES) or (rang in self.ALL_CURRENT_RANGES):
self.instrument.write("SOUR:{f}:RANG {r}".format(f=func,r=rang))
def SetVoltageSourceRange(self,rang):
self.SetSourceRange(self.VOLT_SOURCE_FUNCTION,rang)
def SetCurrentSourceRange(self,rang):
self.SetSourceRange(self.CURR_SOURCE_FUNCTION,rang)
def SetAutoRange(self,func, state):
if func in self.SOURCE_FUNCTIONS:
if state in self.SWITCH_STATES:
self.instrument.write("SOUR:{f}:RANG:AUTO {s}".format(f = func,s = state))
##
## END SET SOURCING RANGE
##
##################################################################################
##
## SET SOURCING AMPLITUDE
##
DEFAULT_AMPLITUDES = ['DEF','MIN','MAX']
DEFAULT_AMPLITUDE,MIN_AMPLITUDE,MAX_AMPLITUDE = DEFAULT_AMPLITUDES
MAX_VOLT_AMPL_VALUE, MIN_VOLT_AMPL_VALUE = [105,-105]
MAX_CURR_AMPL_VALUE, MIN_CURR_AMPL_VALUE = [10.5,-10.5]
def SetFixedModeAmplitude(self,func,ampl):
if func in self.SOURCE_FUNCTIONS:
strFmt = "SOUR:{f} {a}"
if ampl in self.DEFAULT_AMPLITUDES:
self.instrument.write(strFmt.format(f=func,a=ampl))
elif func == self.VOLT_SOURCE_FUNCTION:
if (ampl >= self.MIN_VOLT_AMPL_VALUE) and (ampl<=self.MAX_VOLT_AMPL_VALUE):
self.instrument.write(strFmt.format(f=func,a=ampl))
elif func == self.CURR_SOURCE_FUNCTION:
if(ampl >= self.MIN_CURR_AMPL_VALUE) and (ampl<=self.MAX_CURR_AMPL_VALUE):
self.instrument.write(strFmt.format(f=func,a=ampl))
def SetVoltageAmplitude(self,volt):
self.SetFixedModeAmplitude(self.VOLT_SOURCE_FUNCTION,volt)
def SetCurrentAmplitude(self,curr):
self.SetFixedModeAmplitude(self.CURR_SOURCE_FUNCTION,curr)
##
## END SET SOURCING AMPLITUDE
##
##################################################################################
##
## SET FIXED AMPLITUDE WHEN TRIGGERED
##
def SetFixedModeAmplitudeWhenTriggered(self,func, ampl):
if func in self.SOURCE_FUNCTIONS:
strFmt = "SOUR:{f}:TRIG {a}"
if ampl in self.DEFAULT_AMPLITUDES:
self.instrument.write(strFmt.format(f=func,a=ampl))
elif func == self.VOLT_SOURCE_FUNCTION:
if (ampl >= self.MIN_VOLT_AMPL_VALUE) and (ampl<=self.MAX_VOLT_AMPL_VALUE):
self.instrument.write(strFmt.format(f=func,a=ampl))
elif func == self.CURR_SOURCE_FUNCTION:
if(ampl >= self.MIN_CURR_AMPL_VALUE) and (ampl<=self.MAX_CURR_AMPL_VALUE):
self.instrument.write(strFmt.format(f=func,a=ampl))
def SetVoltageAmplitudeWhenTriggered(self,volt):
self.SetFixedModeAmplitudeWhenTriggered(self.VOLT_SOURCE_FUNCTION,volt)
def SetCurrentAmplitudeWhenTriggered(self,curr):
self.SetFixedModeAmplitudeWhenTriggered(self.CURR_SOURCE_FUNCTION,curr)
##
## END SET FIXED AMPLITUDE WHEN TRIGGERED
##
##################################################################################
##
## SET FIXED AMPLITUDE WHEN TRIGGERED
##
def SetVoltageSourceLimit(self,level):
strFmt = "SOUR:VOLT:PROT {l}"
if level in self.DEFAULT_AMPLITUDES:
self.instrument.write(strFmt.format(l=level))
elif (level <= MAX_VOLT_AMPL_VALUE)and (level>=MIN_VOLT_AMPL_VALUE):
self.instrument.write(strFmt.format(l=level))
##
## END SET FIXED AMPLITUDE WHEN TRIGGERED
##
##################################################################################
##
## SET DELAY (NOT USED FOR PULSE MODE)
##
DELAY_VALUES = [0,999.9999]
MIN_DELAY, MAX_DELAY = DELAY_VALUES
def SetDelay(self,delay):
strFmt = "SOUR:DEL {d}"
if delay in self.DEFAULT_AMPLITUDES:
self.instrument.write(strFmt.format(d=delay))
elif (delay>=self.MIN_DELAY) and (delay<=self.MAX_DELAY):
self.instrument.write(strFmt.format(d=delay))
##
## END SET DELAY (NOT USED FOR PULSE MODE)
##
##################################################################################
##
## SET PULSE WIDTH (USED FOR PULSE MODE)
##
MIN_PULSE_WIDTH = 0.00015
MAX_PULSE_WIDTH = 0.005
def SetPulseWidth(self,seconds):
if seconds<self.MIN_PULSE_WIDTH:
seconds = self.MIN_PULSE_WIDTH
elif seconds >self.MAX_PULSE_WIDTH :
seconds = self.MAX_PULSE_WIDTH
self.instrument.write("SOUR:PULS:WIDT {0}".format(seconds))
##
## END SET PULSE WIDTH (USED FOR PULSE MODE)
##
##################################################################################
##
## SET PULSE WIDTH (USED FOR PULSE MODE)
##
MIN_DELAY = 0
MAX_DELAY = 9999.99872
def SetPulseDelay(self,delay):
if delay<self.MIN_DELAY:
delay = self.MIN_DELAY
elif delay > self.MAX_DELAY:
delay = self.MAX_DELAY
self.instrument.write("SOUR:PULS:DEL {0}".format(delay))
##
## END SET PULSE WIDTH (USED FOR PULSE MODE)
##
### SENSE1 SUBSYSTEM
##################################################################################
##
## SET CONCURRENT MEASUREMENT
##
## For the Model 2430 Pulse Mode, concurrent measurements are always disabled.
## Sending this command results in error +831.
def SetConcurrentMeasurement(self,state):
if state in self.SWITCH_STATES:
self.instrument.write("SENS:FUNC:CONC {0}".format(state))
##
## END SET CONCURRENT MEASUREMENT
##
##################################################################################
##
## ON/OFF FUNCTIONS
##
SENSE_FUNCTIONS = ['VOLT','CURR','RES']
VOLT_SENSE_FUNCTION, CURR_SENSE_FUNCTION, RES_SENSE_FUNCTION = SENSE_FUNCTIONS
def SwitchFunction(self, state, func_List):
if (func_list is list) and (state in self.SWITCH_STATES):
if all(item in self.SENSE_FUNCTIONS for item in func_list):
self.instrument.write("FUNC:{0} \"{1}\"".format(state,"\",\"".join(func_List)))
def ON_Function(self, func_list):
self.SwitchFunction(self.STATE_ON,func_list)
def OFF_Function(self, func_list):
self.SwitchFunction(self.STATE_OFF,func_list)
def SwitchAllFunctions(self,state):
if state in self.SWITCH_STATES:
self.instrument.write("FUNC:{0}:ALL".format(state))
##
## END ON/OFF FUNCTIONS
##
##################################################################################
##
## ON/OFF FUNCTIONS
##
##
## END ON/OFF FUNCTIONS
##
## implement fixed sourcing mode
## implement range
## def SetVoltageAmplitude(self,voltage):
## self.instrument.write("SOUR:VOLT:LEV {0}".format(voltage))
##
##
## def SetCurrentAmplitude(self,current):
## self.instrument.write("SOUR:CURR:LEV {0}".format(current))
def OutputOff(self):
self.instrument.write("OUTP:STAT OFF")
if self.instrument.ask("OUTP:STAT?") == 'OFF':
return True
else:
return False
def StartOutput(self):
self.instrument.write(":INIT")
def OutputOn(self):
self.instrument.write("OUTP:STAT ON")
if self.instrument.ask("OUTP:STAT?") == 'ON':
return True
else:
return False
def StartOutputAndRead(self):
return self.instrument.ask(":READ?")
def DisablePulseMeasurements(self):
self.instrument.write(":SENSe:FUNCtion:OFF:ALL")
MIN_TRIG_COUNT = 1
MAX_TRIG_COUNT = 2500
def SetTriggerCount(self, count):
if count<self.MIN_TRIG_COUNT:
count = self.MIN_TRIG_COUNT
elif count> self.MAX_TRIG_COUNT:
count = self.MAX_TRIG_COUNT
self.instrument.write(":TRIG:COUN {0}".format(count))
def IDN(self):
if self.instrument:
return self.instrument.ask("*IDN?")
def Reset(self):
self.instrument.write("*RST")
if __name__ == "__main__":
k = Keithley24XX('GPIB0::5::INSTR')
k.Reset()
time.sleep(1)
print(k.IDN())
k.SetCurrentSourceFunction()
time.sleep(1)
k.SetVoltageSourceFunction()
## k.SwitchAllFunctions(k.STATE_ON)
## time.sleep(1)
k.SetPulse()
k.DisablePulseMeasurements()
print("pw")
k.SetPulseWidth(0.005)
print("pd")
k.SetPulseDelay(1)
print("pc")
k.SetTriggerCount(3)
print("a")
time.sleep(1)
k.SetVoltageSourceRange(k.MAX_RANGE)#VOLT_RANGE_100V)
time.sleep(1)
k.SetVoltageAmplitude(100)
time.sleep(1)
k.StartOutput()
## k.OutputOn()
## print(k.StartOutputAndRead())
time.sleep(2)
k.OutputOff()
k.SetDC()
time.sleep(1)
| e4ddebbe36d4ea06b3565b113c9530cd267bad47 | [
"Python"
] | 9 | Python | zigorrom/PyIV | 178ba0c75e65b72daeabb6a7538c83e972ce6b2f | a66b0b352369c955ef062d190fa693cb5c7d8f14 |
refs/heads/main | <file_sep>package com.solvd.projectUnicen.gui.pages;
import java.util.List;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.support.FindBy;
import com.qaprosoft.carina.core.foundation.webdriver.decorator.ExtendedWebElement;
import com.solvd.projectUnicen.gui.components.Footer;
import com.solvd.projectUnicen.gui.components.Header;
public class TutorialsPage extends UnicenAbstractPage {
@FindBy(xpath="//*[@class='content']//h3/span/strong")
private ExtendedWebElement sectionTitle;
@FindBy(xpath="//*[@class='panel-pane pane-token pane-node-body']/div[1]")
private ExtendedWebElement firstParagraph;
@FindBy(xpath="//*[@class='panel-pane pane-token pane-node-body']/div[2]")
private ExtendedWebElement secondParagraph;
public TutorialsPage(WebDriver driver ) {
super(driver);
}
public String getSectionTitleTitle() {
return sectionTitle.getText();
}
public String getFirstParagraph() {
return firstParagraph.getText();
}
public String getSecondParagraph() {
return secondParagraph.getText();
}
}
<file_sep>package com.solvd.projectUnicen.gui.pages;
import java.util.List;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.support.FindBy;
import com.solvd.projectUnicen.gui.components.SearchResultItem;
public class SearchPage extends UnicenAbstractPage{
@FindBy(xpath="//*[@id='content']/ol")
private List<SearchResultItem> items;
public SearchPage(WebDriver driver) {
super(driver);
}
public List<SearchResultItem> getResultItems() {
return items;
}
}
<file_sep>package com.solvd.projectUnicen.gui.components;
import java.util.List;
import org.openqa.selenium.SearchContext;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.support.FindBy;
import com.qaprosoft.carina.core.foundation.webdriver.decorator.ExtendedWebElement;
import com.qaprosoft.carina.core.gui.AbstractUIObject;
public class Footer extends AbstractUIObject {
@FindBy(xpath="//*[@id='views_slideshow_cycle_main_carrousel-block']")
private ExtendedWebElement imageLinks;
@FindBy(xpath="//*[@id='block-block-1']/p")
private ExtendedWebElement contactInformationList;
public Footer(WebDriver driver,SearchContext searchContext) {
super(driver, searchContext);
}
public ExtendedWebElement getImageLinks() {
return imageLinks;
}
public String getContactInformationList() {
return contactInformationList.getText();
}
}
<file_sep>package com.solvd.projectUnicen.gui.pages;
import java.util.List;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.support.FindBy;
import com.qaprosoft.carina.core.foundation.webdriver.decorator.ExtendedWebElement;
import com.solvd.projectUnicen.gui.components.Footer;
import com.solvd.projectUnicen.gui.components.Header;
public class PPSPage extends UnicenAbstractPage{
@FindBy(xpath="//*[@id='content']//p/strong")
private ExtendedWebElement sectionTitle;
@FindBy(xpath="//*[@class='panel-pane pane-token pane-node-body']/div/div")
private ExtendedWebElement paragraph;
@FindBy(xpath="//*[@class='panel-pane pane-token pane-node-body']//ul")
private List<ExtendedWebElement> listLinks;
public PPSPage(WebDriver driver) {
super(driver);
}
public String getSectionTitle() {
return sectionTitle.getText();
}
public String getParagraph() {
return paragraph.getText();
}
public List<String> getListLinks() {
//TODO
return null;
}
}
<file_sep>package com.solvd.projectUnicen.gui.pages;
import java.util.List;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.support.FindBy;
import com.qaprosoft.carina.core.foundation.webdriver.decorator.ExtendedWebElement;
import com.qaprosoft.carina.core.gui.AbstractPage;
import com.solvd.projectUnicen.gui.components.*;
public abstract class UnicenAbstractPage extends AbstractPage{
@FindBy(xpath="//*[@id=\"footer\"]")
private Footer footer;
@FindBy(xpath="//*[@id=\"header\"]")
private Header header;
@FindBy(xpath="//*[@id=\"content\"]/div/div/div[2]/div/div[1]/div/div/div/ul")
private List<ExtendedWebElement> menu;
@FindBy(xpath="//*[@id=\"content\"]/div/div/div[1]/div/div[1]/div/div/div")
private ExtendedWebElement menuTitle;
@FindBy(xpath="//*[@id=\"content\"]/div/div/div[1]/div/div[2]/div/div[2]/h1")
private ExtendedWebElement title;
public UnicenAbstractPage(WebDriver driver) {
super(driver);
}
public Footer getFooter() {
return footer;
}
public Header getHeader() {
return header;
}
public List<ExtendedWebElement> getMenu() {
return menu;
}
public ExtendedWebElement getMenuTitle() {
return menuTitle;
}
public ExtendedWebElement getTitle() {
return title;
}
}
| a5b8c50d19a5f59504fc99cdd2fd73f85ceaef4c | [
"Java"
] | 5 | Java | magaliboulanger/Solvd-course-test | 3ed745840724dab8a60ed9c061faef5e013cb83c | c1f0014aee0b22a15258dd2bb11d9b5ffc0c0824 |
refs/heads/master | <repo_name>TentacleSoft/workshop-symfony2-volcanica<file_sep>/src/TS/Bundle/MinesweeperBundle/Controller/GameController.php
<?php
namespace TS\Bundle\MinesweeperBundle\Controller;
use Symfony\Component\HttpKernel\Exception\BadRequestHttpException;
use TS\Bundle\MinesweeperBundle\Entity\Game;
use TS\Bundle\MinesweeperBundle\Entity\User;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\HttpFoundation\JsonResponse;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Method;
/**
* @Route("/games")
*/
class GameController extends Controller
{
/**
* @Route("/")
* @Method("POST")
*/
public function newGameAction()
{
$playerIds = $this->getRequest()->get('players');
$activePlayer = $this->getRequest()->get('activePlayer');
if (is_null($playerIds)) {
throw new BadRequestHttpException('Player ids missing');
}
$gameManager = $this->get('ts_minesweeper.game_manager');
$userRepository = $this->getDoctrine()->getRepository('TSMinesweeperBundle:User');
$players = array();
$playerIdsArray = explode(',', $playerIds);
if (!in_array($this->getUser()->getId(), $playerIdsArray)) {
throw new BadRequestHttpException('Player is not in the new game');
}
foreach ($playerIdsArray as $playerId) {
$players[] = $userRepository->findOneById($playerId);
}
try {
$gameId = $gameManager->create($players, $activePlayer);
} catch (\Exception $e) {
throw new BadRequestHttpException('Failed to create game: ' . $e->getMessage());
}
return new JsonResponse($this->getGameInfo($gameId));
}
/**
* @Route("/{gameId}")
* @Method("GET")
*/
public function gameAction($gameId)
{
return new JsonResponse($this->getGameInfo($gameId));
}
/**
* @Route("/{gameId}")
* @Method("POST")
*/
public function openCellAction($gameId)
{
$request = $this->getRequest();
$row = $request->get('row');
$col = $request->get('col');
if (is_null($row) || is_null($col)) {
throw new BadRequestHttpException('Row or col empty');
}
$gameManager = $this->get('ts_minesweeper.game_manager');
$gameManager->open($gameId, $this->getUser(), $row, $col);
return new JsonResponse($this->getGameInfo($gameId));
}
/**
* @Route("/{gameId}/chat")
* @Method("POST")
*/
public function sendChatAction($gameId)
{
$request = $this->getRequest();
$message = $request->get('message');
if (empty($message)) {
throw new BadRequestHttpException('Empty text');
}
$gameManager = $this->get('ts_minesweeper.game_manager');
$gameManager->sendUserChat($gameId, $this->getUser(), $message);
/** @var Game $game */
$game = $this->get('ts_minesweeper.game_manager')->get($gameId);
return new JsonResponse(
array('chat' => $game->getChat()),
200
);
}
/**
* @param int $gameId
*
* @return array
*/
private function getGameInfo($gameId)
{
/** @var Game $game */
$game = $this->get('ts_minesweeper.game_manager')->get($gameId);
$players = array();
/** @var User $player */
foreach ($game->getPlayers() as $player) {
$players[] = array(
'id' => $player->getId(),
'name' => $player->getName(),
'username' => $player->getUsername()
);
}
return array(
'id' => $gameId,
'players' => $players,
'activePlayer' => $game->getActivePlayer(),
'scores' => $game->getScores(),
'board' => $game->getVisibleBoard(),
'chat' => $game->getChat()
);
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Controller/LobbyController.php
<?php
namespace TS\Bundle\MinesweeperBundle\Controller;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Method;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Bundle\FrameworkBundle\Controller\Controller as BaseController;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\Exception\AccessDeniedHttpException;
use TS\Bundle\MinesweeperBundle\Entity\User;
/**
* @Route("/lobby")
*/
class LobbyController extends BaseController
{
/**
* @Route("/")
* @Method("GET")
*/
public function lobbyInfoAction()
{
$lobby = $this->getLobby();
$userManager = $this->get('ts_minesweeper.user_manager');
return new JsonResponse(
array(
'chat' => $lobby->getChat(),
'users' => $userManager->getAllUsersInfo()
),
200
);
}
/**
* @Route("/users/{userId}", requirements={"userId"="\d+"})
* @Method("PUT")
*/
public function lobbyJoinAction($userId)
{
$user = $this->getUser();
if ($userId !== $user->getId()) {
throw new AccessDeniedHttpException('This is not your user Id');
}
if (null === $user->getLobby()) {
$user->setLobby($this->getLobby());
return new Response('', 201);
} else {
return new Response('', 204);
}
}
/**
* @Route("/chat")
* @Method("POST")
*/
public function chatPostAction(Request $request)
{
if (!$request->request->has('message')) {
throw new \HttpRequestException();
}
$lobby = $this->getLobby();
/** @var User $user */
$user = $this->getUser();
$username = $user->getUsername();
$message = $request->request->get('message');
$lobby->addChatLine($username, $message);
$this->getDoctrine()->getManager()->flush();
return new JsonResponse(
array(
'chat' => $lobby->getChat(),
),
200
);
}
/**
* @return \TS\Bundle\MinesweeperBundle\Entity\Lobby
*/
private function getLobby()
{
$lobbies = $this->getDoctrine()->getRepository('TSMinesweeperBundle:Lobby')->findAll();
return $lobbies[0];
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Repository/UserRepository.php
<?php
namespace TS\Bundle\MinesweeperBundle\Repository;
use Doctrine\ORM\EntityRepository;
class UserRepository extends EntityRepository
{
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Listener/GameManagerNotFoundExceptionListener.php
<?php
namespace TS\Bundle\MinesweeperBundle\Listener;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\Event\GetResponseForExceptionEvent;
use TS\Bundle\MinesweeperBundle\Exception\GameNotFoundException;
class GameManagerNotFoundExceptionListener
{
public function onKernelException(GetResponseForExceptionEvent $event)
{
$exception = $event->getException();
if ($exception instanceof GameNotFoundException) {
$response = new Response($exception->getMessage(), 404);
$event->setResponse($response);
}
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/TSMinesweeperBundle.php
<?php
namespace TS\Bundle\MinesweeperBundle;
use Symfony\Component\HttpKernel\Bundle\Bundle;
class TSMinesweeperBundle extends Bundle
{
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Exception/GameNotFoundException.php
<?php
namespace TS\Bundle\MinesweeperBundle\Exception;
class GameNotFoundException extends \Exception
{
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Service/Symbols.php
<?php
namespace TS\Bundle\MinesweeperBundle\Service;
final class Symbols
{
const MINE = 'M';
const UNKNOWN = '';
const GAME_OVER = -1;
const CHAT_INFO = -1;
const CHAT_ERROR = -2;
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Exception/UserNotFoundException.php
<?php
namespace TS\Bundle\MinesweeperBundle\Exception;
class UserNotFoundException extends \Exception
{
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Tests/Controller/DefaultControllerTest.php
<?php
namespace TS\Bundle\MinesweeperBundle\Tests\Controller;
use Symfony\Bundle\FrameworkBundle\Test\WebTestCase;
class DefaultControllerTest extends WebTestCase
{
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/DataFixtures/ORM/LoadGameData.php
<?php
namespace TS\Bundle\MinesweeperBundle\DataFixtures\ORM;
use Doctrine\Common\DataFixtures\AbstractFixture;
use Doctrine\Common\Persistence\ObjectManager;
use Doctrine\Common\DataFixtures\OrderedFixtureInterface;
use TS\Bundle\MinesweeperBundle\Entity\Game;
use TS\Bundle\MinesweeperBundle\Service\BoardFactory;
use TS\Bundle\MinesweeperBundle\Service\Symbols;
class LoadGameData extends AbstractFixture implements OrderedFixtureInterface
{
public function load(ObjectManager $manager)
{
$game = new Game();
$board = BoardFactory::create(16, 50);
$visibleBoard = array();
foreach (range(0, 15) as $row) {
foreach (range(0, 15) as $col) {
$visibleBoard[$row][$col] = Symbols::UNKNOWN;
}
}
$game->setBoard($board);
$game->setVisibleBoard($visibleBoard);
$game->addChatLine(
Symbols::CHAT_INFO,
sprintf('Players: %s, %s', $this->getReference('user1')->getUsername(), $this->getReference('user2')->getUsername())
);
$game->setActivePlayer($this->getReference('user1')->getId());
$game->setScores(array(0, 0));
$game->addPlayer($this->getReference('user1'))->addPlayer($this->getReference('user2'));
$manager->persist($game);
$manager->flush();
}
public function getOrder()
{
return 2;
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Entity/User.php
<?php
// src/Acme/UserBundle/Entity/User.php
namespace TS\Bundle\MinesweeperBundle\Entity;
use FOS\UserBundle\Model\User as BaseUser;
use Doctrine\ORM\Mapping as ORM;
/**
* @ORM\Entity
* @ORM\Table(name="fos_user")
*/
class User extends BaseUser
{
/**
* @ORM\Id
* @ORM\Column(type="integer")
* @ORM\GeneratedValue(strategy="AUTO")
*/
protected $id;
/**
* @var string
*
* @ORM\Column(name="name", type="string", length=255, nullable=true)
*/
private $name;
/**
* @ORM\ManyToMany(targetEntity="Game", mappedBy="players")
*/
private $games;
/**
* Set name
*
* @param string $name
* @return User
*/
public function setName($name)
{
$this->name = $name;
return $this;
}
/**
* Get name
*
* @return string
*/
public function getName()
{
return $this->name;
}
/**
* Constructor
*/
public function __construct()
{
parent::__construct();
$this->games = new \Doctrine\Common\Collections\ArrayCollection();
}
/**
* Get id
*
* @return integer
*/
public function getId()
{
return $this->id;
}
/**
* Add games
*
* @param \TS\Bundle\MinesweeperBundle\Entity\Game $games
* @return User
*/
public function addGame(\TS\Bundle\MinesweeperBundle\Entity\Game $games)
{
$this->games[] = $games;
return $this;
}
/**
* Remove games
*
* @param \TS\Bundle\MinesweeperBundle\Entity\Game $games
*/
public function removeGame(\TS\Bundle\MinesweeperBundle\Entity\Game $games)
{
$this->games->removeElement($games);
}
/**
* Get games
*
* @return \Doctrine\Common\Collections\Collection
*/
public function getGames()
{
return $this->games;
}
}<file_sep>/src/TS/Bundle/MinesweeperBundle/DataFixtures/ORM/LoadUserData.php
<?php
namespace TS\Bundle\MinesweeperBundle\DataFixtures\ORM;
use Doctrine\Common\DataFixtures\AbstractFixture;
use Doctrine\Common\DataFixtures\OrderedFixtureInterface;
use Doctrine\Common\Persistence\ObjectManager;
use TS\Bundle\MinesweeperBundle\Entity\User;
class LoadUserData extends AbstractFixture implements OrderedFixtureInterface
{
private $userData = array(
array(
'id' => 1,
'name' => 'Generated User 1',
'username' => 'genUser1',
'password' => '<PASSWORD>',
'email' => '<EMAIL>',
),
array(
'id' => 2,
'name' => 'Generated User 2',
'username' => 'genUser2',
'password' => '<PASSWORD>',
'email' => '<EMAIL>',
)
);
public function load(ObjectManager $manager)
{
}
public function getOrder()
{
return 1;
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Entity/Game.php
<?php
namespace TS\Bundle\MinesweeperBundle\Entity;
use Doctrine\ORM\Mapping as ORM;
use TS\Bundle\MinesweeperBundle\Service\Symbols;
/**
* Game
*
* @ORM\Table()
* @ORM\Entity
*/
class Game
{
/**
* @var integer
*
* @ORM\Column(name="id", type="integer")
* @ORM\Id
* @ORM\GeneratedValue(strategy="AUTO")
*/
private $id;
/**
* @ORM\ManyToMany(targetEntity="User", inversedBy="games")
* @ORM\JoinTable(name="games_players")
*/
private $players;
/**
* @var array
*
* @ORM\Column(name="board", type="array")
*/
private $board;
/**
* @var array
*
* @ORM\Column(name="visibleBoard", type="array")
*/
private $visibleBoard;
/**
* @var array
*
* @ORM\Column(name="scores", type="array")
*/
private $scores;
/**
* @var array
*
* @ORM\Column(name="chat", type="json_array")
*/
private $chat = array();
/**
* @var integer
*
* @ORM\Column(name="activePlayer", type="integer")
*/
private $activePlayer;
/**
* @var array
*
* @ORM\ManyToOne(targetEntity="User")
*/
private $winner;
/**
* Get id
*
* @return integer
*/
public function getId()
{
return $this->id;
}
/**
* Set players
*
* @param array $players
* @return Game
*/
public function setPlayers($players)
{
$this->players = $players;
return $this;
}
/**
* Get players
*
* @return array
*/
public function getPlayers()
{
return $this->players;
}
/**
* Set board
*
* @param array $board
* @return Game
*/
public function setBoard($board)
{
$this->board = $board;
return $this;
}
/**
* Get board
*
* @return array
*/
public function getBoard()
{
return $this->board;
}
/**
* Set visibleBoard
*
* @param array $visibleBoard
* @return Game
*/
public function setVisibleBoard($visibleBoard)
{
$this->visibleBoard = $visibleBoard;
return $this;
}
/**
* Get visibleBoard
*
* @return array
*/
public function getVisibleBoard()
{
return $this->visibleBoard;
}
/**
* Set scores
*
* @param array $scores
* @return Game
*/
public function setScores($scores)
{
$this->scores = $scores;
return $this;
}
/**
* Get scores
*
* @return array
*/
public function getScores()
{
return $this->scores;
}
/**
* Set chat
*
* @param array $chat
* @return Game
*/
public function setChat(array $chat)
{
$this->chat = $chat;
return $this;
}
/**
* Get chat
*
* @return array
*/
public function getChat()
{
return $this->chat;
}
/**
* Set activePlayer
*
* @param integer $activePlayer
* @return Game
*/
public function setActivePlayer($activePlayer)
{
$this->activePlayer = $activePlayer;
return $this;
}
/**
* Get activePlayer
*
* @return integer
*/
public function getActivePlayer()
{
return $this->activePlayer;
}
/**
* Constructor
*/
public function __construct()
{
$this->players = new \Doctrine\Common\Collections\ArrayCollection();
}
/**
* Add players
*
* @param \TS\Bundle\MinesweeperBundle\Entity\User $players
* @return Game
*/
public function addPlayer(\TS\Bundle\MinesweeperBundle\Entity\User $players)
{
$this->players[] = $players;
return $this;
}
/**
* Remove players
*
* @param \TS\Bundle\MinesweeperBundle\Entity\User $players
*/
public function removePlayer(\TS\Bundle\MinesweeperBundle\Entity\User $players)
{
$this->players->removeElement($players);
}
/**
* @return bool
*/
public function isOver()
{
return $this->activePlayer === Symbols::GAME_OVER;
}
/**
* @param int $from
* @param string $message
*/
public function addChatLine($from, $message)
{
$this->chat[] = array(
'from' => $from,
'message' => $message,
);
}
/**
* Set winner
*
* @param \TS\Bundle\MinesweeperBundle\Entity\User $winner
* @return Game
*/
public function setWinner(\TS\Bundle\MinesweeperBundle\Entity\User $winner = null)
{
$this->winner = $winner;
return $this;
}
/**
* Get winner
*
* @return \TS\Bundle\MinesweeperBundle\Entity\User
*/
public function getWinner()
{
return $this->winner;
}
}<file_sep>/src/TS/Bundle/MinesweeperBundle/Controller/UserController.php
<?php
namespace TS\Bundle\MinesweeperBundle\Controller;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
use TS\Bundle\MinesweeperBundle\Entity\Game;
use TS\Bundle\MinesweeperBundle\Entity\User;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\HttpFoundation\JsonResponse;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Method;
/**
* @Route("/users")
*/
class UserController extends Controller
{
/**
* @Route("/")
* @Method("GET")
*/
public function usersAction()
{
$userManager = $this->get('ts_minesweeper.user_manager');
return new JsonResponse($userManager->getAllUsersInfo());
}
/**
* @Route("/{userId}", requirements={"userId"="\d+"})
* @Method("GET")
*/
public function userAction($userId)
{
}
/**
* @Route("/{userId}/games")
* @Method("GET")
*/
public function userGamesAction($userId)
{
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Service/UserManager.php
<?php
namespace TS\Bundle\MinesweeperBundle\Service;
use Doctrine\ORM\EntityManager;
use TS\Bundle\MinesweeperBundle\Entity\Game;
use TS\Bundle\MinesweeperBundle\Entity\User;
use Doctrine\ORM\EntityRepository;
use TS\Bundle\MinesweeperBundle\Exception\UserNotFoundException;
class UserManager
{
/**
* @var EntityRepository
*/
private $userRepository;
/**
* @var EntityManager
*/
private $entityManager;
public function __construct(EntityRepository $userRepository, EntityManager $entityManager)
{
$this->userRepository = $userRepository;
$this->entityManager = $entityManager;
}
public function getAllUsersInfo()
{
$users = $this->userRepository->findAll();
return array_map(
function ($user) {
return $this->processUserInfo($user);
},
$users
);
}
/**
* @param int $userId
*
* @return array
*/
public function getUserInfo($userId)
{
$user = $this->getUser($userId);
return $this->processUserInfo($user);
}
public function getUserGames($userId)
{
$user = $this->getUser($userId);
$games = array_map(function (Game $game) {
$players = array();
/** @var User $player */
foreach ($game->getPlayers() as $player) {
$players[] = array(
'id' => $player->getId(),
'name' => $player->getName(),
'username' => $player->getUsername()
);
}
return array(
'id' => $game->getId(),
'players' => $players,
'activePlayer' => $game->getActivePlayer(),
'scores' => $game->getScores(),
'board' => $game->getVisibleBoard(),
'chat' => $game->getChat()
);
}, $user->getGames()->toArray());
return $games;
}
/**
* @param $userId
*
* @return User
*
* @throws \TS\Bundle\MinesweeperBundle\Exception\UserNotFoundException
*/
private function getUser($userId)
{
throw new UserNotFoundException(sprintf('User %s not found'), $userId);
}
/**
* @param User $user
*
* @return array
*/
private function processUserInfo(User $user)
{
$active = array();
$won = array();
$lost = array();
/** @var Game $game */
foreach ($user->getGames() as $game) {
if ($game->isOver()) {
if ($game->getWinner()->getId() === $user->getId()) {
$won[] = $this->getGameInfo($game);
} else {
$lost[] = $this->getGameInfo($game);
}
} else {
$active[] = $this->getGameInfo($game);
}
}
return array(
'id' => $user->getId(),
'username' => $user->getUsername(),
'name' => $user->getName(),
'games' => array(
'active' => $active,
'won' => $won,
'lost' => $lost,
)
);
}
private function getGameInfo(Game $game)
{
return array(
'id' => $game->getId(),
'scores' => $game->getScores(),
);
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Service/BoardFactory.php
<?php
namespace TS\Bundle\MinesweeperBundle\Service;
class BoardFactory
{
/**
* @param int $size
* @param int $mines
*
* @return array
*/
public static function create($size, $mines)
{
$board = array();
for ($i = 0; $i < $size; $i++) {
for ($j = 0; $j < $size; $j++) {
$board[$i][$j] = 0;
}
}
// Populate mines
foreach (range(1, $mines) as $mine) {
do {
$row = mt_rand(0, $size - 1);
$col = mt_rand(0, $size - 1);
} while ($board[$row][$col] === Symbols::MINE);
$board[$row][$col] = Symbols::MINE;
}
// Calculate cell numbers
foreach ($board as $row => $rowCells) {
foreach ($rowCells as $col => $value) {
if (Symbols::MINE !== $value) {
$board[$row][$col] = static::borderingMines($board, $row, $col);
}
}
}
return $board;
}
/**
* @param array $board
* @param int $row
* @param int $col
*
* @return int
*/
private static function borderingMines(array $board, $row, $col)
{
$mines = 0;
for ($i = $row - 1; $i <= $row + 1; $i++) {
for ($j = $col - 1; $j <= $col + 1; $j++) {
if (isset($board[$i][$j]) && Symbols::MINE === $board[$i][$j]) {
$mines++;
}
}
}
return $mines;
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Resources/public/js/game.js
'use strict';
var gameId = 1,
game = {},
pollingRate = 1000,
section = 'lobby';
$(document).ready(function () {
setInterval(function () {
if (typeof globals != 'undefined') {
if (section == 'lobby') {
$.getJSON('/lobby/', function (data) {
updateLobbyInfo(data);
});
$.getJSON('/users/' + globals.user.id + '/games', function (data) {
updateGamesInfo(data);
});
} else if (section == 'game') {
$.getJSON('/games/' + gameId, function (data) {
updateGameInfo(data);
});
}
}
}, pollingRate);
$('#title').find('a').click(function (event) {
event.preventDefault();
changeSection('lobby');
});
$('.board-cell.enabled').click(function () {
if (game.activePlayer != globals.user.id) {
return;
}
var row = $(this).data('row'),
col = $(this).data('col');
$.post('/games/' + gameId, {row: row, col: col}, function (data) {
updateGameInfo(data);
});
console.log('Click (' + row + ', ' + col + ')');
});
$('#form-chat').submit(function () {
var $message = $('#message'),
message = $message.val();
if (message != '') {
var url;
$message.val('');
if (section == 'lobby') {
url = '/lobby/chat';
} else if (section == 'game') {
url = '/games/' + gameId + '/chat';
}
$.post(url, {message: message}, function (data) {
updateChatInfo(data.chat);
});
}
return false;
});
$('#game-list').find('a').on('click', function (event) {
event.preventDefault();
$.getJSON($(this).attr('href'), function (data) {
updateGameInfo(data);
changeSection('game');
});
return false;
});
$('#user-list').find('a').on('click', function (event) {
event.preventDefault();
$.post($(this).attr('href'), function (data) {
updateGameInfo(data);
changeSection('game');
});
return false;
});
});
function updateLobbyInfo(data) {
updateChatInfo(data.chat);
updateUsersInfo(data.users);
}
function updateGameInfo(data) {
gameId = data.id;
updateChatInfo(data.chat);
drawBoard(data.board);
var scoreboard = $('#scoreboard');
scoreboard.find('div[data-player="0"] .score').text(data.scores[0]);
scoreboard.find('div[data-player="1"] .score').text(data.scores[1]);
scoreboard.find('div[data-player="0"] .username').text(data.players[0].username);
scoreboard.find('div[data-player="1"] .username').text(data.players[1].username);
var turn = $('#turn');
if (data.activePlayer == globals.user.id) {
turn.text('Your turn');
if (data.players[0].id == globals.user.id) {
turn.addClass('player0');
} else {
turn.addClass('player1');
}
} else {
if (data.players[0].id == data.activePlayer) {
turn.text(data.players[0].name + '\'s turn');
} else {
turn.text(data.players[1].name + '\'s turn');
}
turn.removeClass('player0').removeClass('player1');
}
game = data;
}
function updateChatInfo(chatData) {
var chat = $('#chat').html('');
for (var chatLineKey in chatData) {
var chatLine = chatData[chatLineKey],
line = $('<p>').text(chatLine.message);
switch (chatLine.from) {
case -1:
line.addClass('info');
break;
case -2:
line.addClass('error');
break;
default:
line.prepend($('<span>').text(chatLine.from).addClass('username'));
}
chat.append(line);
}
chat.scrollTop(chat[0].scrollHeight);
}
function updateUsersInfo(usersData) {
var userList = $('#user-list').html('');
for (var userKey in usersData) {
var user = usersData[userKey];
if (user.id != globals.user.id) {
var userElement = $('<li>').text(user.username + ' (' + user.games.won.length + '-' + user.games.lost.length + ')'),
link = $('<a>').attr('href', '/games/' + '?players=' + globals.user.id + ',' + user.id).text('New game');
userList.append(link.click(onUserClick).add(userElement));
}
}
}
function updateGamesInfo(gamesData) {
var gameList = $('#game-list').html('');
for (var gameKey in gamesData) {
var game = gamesData[gameKey],
player1 = game.players[0].username,
player2 = game.players[1].username,
score1 = game.scores[0],
score2 = game.scores[1],
gameElement = $('<li>').text(player1 + ' ' + score1 + ' - ' + score2 + ' ' + player2),
link = $('<a>').attr('href', '/games/' + game.id);
gameList.append(link.append(gameElement).click(onGameClick));
}
}
function drawBoard(board) {
var cells = $('.board-cell');
for (var pos = 0; pos < cells.length; pos++) {
var cell = $(cells[pos]),
cellValue = board[cell.data('row')][cell.data('col')];
if (cell.hasClass('enabled')) {
switch (cellValue) {
case 'M0':
cell.removeClass('enabled').addClass('mine').attr('data-player', 0);
break;
case 'M1':
cell.removeClass('enabled').addClass('mine').attr('data-player', 1);
break;
case '':
break;
case 0:
cell.removeClass('enabled').addClass('open');
break;
default: // if it's a number
cell.removeClass('enabled').addClass('open');
cell.attr('data-number', cellValue);
cell.html(cellValue);
}
}
}
}
function clearBoard() {
$('.board-cell').removeClass().addClass('board-cell').addClass('enabled').html('');
}
function changeSection(newSection) {
section = newSection;
clearBoard();
$('#chat').html('');
$('.section').hide();
$('.section-' + newSection).show();
}
var onGameClick = function (event) {
event.preventDefault();
$.getJSON($(this).attr('href'), function (data) {
updateGameInfo(data);
changeSection('game');
});
};
var onUserClick = function (event) {
event.preventDefault();
$.post($(this).attr('href'), function (data) {
updateGameInfo(data);
changeSection('game');
});
};
<file_sep>/src/TS/Bundle/MinesweeperBundle/Entity/Lobby.php
<?php
namespace TS\Bundle\MinesweeperBundle\Entity;
use Doctrine\ORM\Mapping as ORM;
use Doctrine\Common\Collections\ArrayCollection;
/**
* Lobby
*
* @ORM\Table()
* @ORM\Entity
*/
class Lobby
{
/**
* @var integer
*
* @ORM\Column(name="id", type="integer")
* @ORM\Id
* @ORM\GeneratedValue(strategy="AUTO")
*/
private $id;
/**
* @var array
*
* @ORM\Column(name="chat", type="json_array")
*/
private $chat = array();
/**
* @var ArrayCollection
*
* @ORM\OneToMany(targetEntity="User", mappedBy="lobby")
*/
private $onlineUsers;
/**
* Get id
*
* @return integer
*/
public function getId()
{
return $this->id;
}
/**
* Set chat
*
* @param array $chat
* @return Lobby
*/
public function setChat($chat)
{
$this->chat = $chat;
return $this;
}
/**
* Get chat
*
* @return array
*/
public function getChat()
{
return $this->chat;
}
/**
* Constructor
*/
public function __construct()
{
$this->onlineUsers = new \Doctrine\Common\Collections\ArrayCollection();
}
/**
* Remove onlineUsers
*
* @param \TS\Bundle\MinesweeperBundle\Entity\User $onlineUsers
*/
public function removeOnlineUser(\TS\Bundle\MinesweeperBundle\Entity\User $onlineUsers)
{
$this->onlineUsers->removeElement($onlineUsers);
}
/**
* @param int $from
* @param string $message
*/
public function addChatLine($from, $message)
{
$this->chat[] = array(
'from' => $from,
'message' => $message,
);
}
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Repository/GameRepository.php
<?php
namespace TS\Bundle\MinesweeperBundle\Repository;
use Doctrine\ORM\EntityRepository;
class GameRepository extends EntityRepository
{
}
<file_sep>/src/TS/Bundle/MinesweeperBundle/Service/GameManager.php
<?php
namespace TS\Bundle\MinesweeperBundle\Service;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\EntityRepository;
use TS\Bundle\MinesweeperBundle\Entity\Game;
use TS\Bundle\MinesweeperBundle\Entity\User;
use TS\Bundle\MinesweeperBundle\Exception\GameNotFoundException;
class GameManager
{
const BOARD_SIZE = 16;
const MINES = 49;
/**
* @var EntityRepository
*/
private $gameRepository;
/**
* @var EntityManager
*/
private $entityManager;
public function __construct(EntityRepository $gameRepository, EntityManager $entityManager)
{
$this->gameRepository = $gameRepository;
$this->entityManager = $entityManager;
}
/**
* @param int $gameId
*
* @throws GameNotFoundException
*
* @return Game
*/
public function get($gameId)
{
$game = $this->gameRepository->findOneById($gameId);
if (!$game) {
throw new GameNotFoundException(sprintf('Game %s not found', $gameId));
}
return $game;
}
/**
* @param User[] $players
* @param int|null $activePlayer
*
* @return int
*/
public function create(array $players, $activePlayer = null)
{
$game = new Game();
$scores = array();
foreach ($players as $player) {
$game->addPlayer($player);
$player->addGame($game);
$this->entityManager->persist($player);
$scores[] = 0;
}
$game->setScores($scores);
if (null === $activePlayer) {
$activePlayer = array_rand(array_map(function (User $player) {
return $player->getId();
}, $players));
}
$game->setActivePlayer($activePlayer);
$game->setBoard(BoardFactory::create(static::BOARD_SIZE, static::MINES));
$visibleBoard = array();
foreach (range(0, 15) as $row) {
foreach (range(0, 15) as $col) {
$visibleBoard[$row][$col] = Symbols::UNKNOWN;
}
}
$game->setVisibleBoard($visibleBoard);
$this->entityManager->persist($game);
$this->entityManager->flush();
return $game->getId();
}
/**
* Open cell
*
* @param int $gameId
* @param User $player
* @param int $row
* @param int $col
*
* @throws \Exception
*/
public function open($gameId, User $player, $row, $col)
{
$game = $this->get($gameId);
$activePlayer = $game->getActivePlayer();
if ($player->getId() !== $activePlayer) {
throw new \Exception(sprintf('User %s is not currently active or game is already over', $activePlayer));
}
$players = $game->getPlayers();
foreach ($players as $pos => $player) {
if ($player->getId() === $activePlayer) {
$this->openCell($game, $pos, $row, $col);
if ($game->isOver()) {
$game->setWinner($player);
}
$this->entityManager->flush();
return;
}
}
throw new \Exception('Corrupt game, active player not in players');
}
/**
* @param int $gameId
* @param User $user
* @param string $message
*
* @return Game
*/
public function sendUserChat($gameId, User $user, $message)
{
$this->sendChat($gameId, $user->getUsername(), $message);
}
public function sendSystemChat($gameId, $message, $type)
{
$from = Symbols::CHAT_INFO;
if ($type == 'error') {
$from = Symbols::CHAT_ERROR;
}
$this->sendChat($gameId, $from, $message);
}
/**
* @param int $gameId
* @param int $from user id or system id
* @param $message
*/
private function sendChat($gameId, $from, $message)
{
/** @var Game $game */
$game = $this->get($gameId);
$game->addChatLine($from, $message);
$this->entityManager->flush();
}
/**
* @param Game $game
* @param int $playerPos
* @param int $row
* @param int $col
*
* @return string|null Symbol opened (if any)
*/
private function openCell(Game $game, $playerPos, $row, $col)
{
$board = $game->getBoard();
$visibleBoard = $game->getVisibleBoard();
if (!isset($board[$row][$col]) || $visibleBoard[$row][$col] !== Symbols::UNKNOWN) {
return null;
}
$visibleBoard[$row][$col] = $board[$row][$col];
if ($board[$row][$col] === Symbols::MINE) {
$visibleBoard[$row][$col] .= $playerPos;
$scores = $game->getScores();
$scores[$playerPos] += 1;
// End game (no next turn, a player has already won)
if ($scores[$playerPos] > static::MINES / 2) {
$game->setActivePlayer(Symbols::GAME_OVER);
}
$game->setScores($scores);
} else {
$players = $game->getPlayers();
$nextPlayerPos = ($playerPos + 1) % count($players);
$game->setActivePlayer($players[$nextPlayerPos]->getId());
}
$game->setVisibleBoard($visibleBoard);
if (0 === $board[$row][$col]) {
$this->openCell($game, $playerPos, $row - 1, $col - 1);
$this->openCell($game, $playerPos, $row - 1, $col );
$this->openCell($game, $playerPos, $row - 1, $col + 1);
$this->openCell($game, $playerPos, $row , $col - 1);
$this->openCell($game, $playerPos, $row , $col + 1);
$this->openCell($game, $playerPos, $row + 1, $col - 1);
$this->openCell($game, $playerPos, $row + 1, $col );
$this->openCell($game, $playerPos, $row + 1, $col + 1);
}
return $board[$row][$col];
}
}
| b66ea8bb90dcde55af58fbbac1c20364cafb2af9 | [
"JavaScript",
"PHP"
] | 20 | PHP | TentacleSoft/workshop-symfony2-volcanica | 40233998ca132441631f55c6bbb33fe8934daa0f | 599b581706015d8fd1836f5cdfc43316b7a8b360 |
refs/heads/master | <repo_name>jebbates/cms-workshop<file_sep>/geometry_analysis.py
"""
geometry_analysis.py
This module contains the geometry analysis project from MolSSI workshop.
Author : JEBates
"""
import numpy
import os
import sys
########################################################################
# HEPLER FUNCTIONS
def open_xyz(input_file):
"""
function to open and process an xyz coordinate file.
Input :
input_file - name of the file to process
Output :
symbols - numpy array of chemical symbols
coords - numpy array of 3D coordinates
"""
# since we assume xyz, skip the first two lines
data = numpy.genfromtxt(fname=input_file, dtype='unicode', skip_header=2)
symbols = data[:,0]
coords = data[:,1:].astype(numpy.float)
return symbols, coords
# add a default value for min and max distance
def bond_check(distance, min_distance=0, max_distance=1.5):
"""
Check if a given distance is between two cutoffs for a bond length.
Input :
distance : bond length to check
min_distance : minimum distance allowed - default 0. Angstroms
max_distance : maximum distance allowed - default 1.5 Angstroms
Output :
is_a_bond : Boolean (True or False) indicating if distance is between the cutoffs
"""
# check the inputs
if distance < 0.:
raise ValueError("bond_check has detected a NEGATIVE distance! Check your inputs.")
is_a_bond = False
if (distance < max_distance) and (distance > min_distance):
is_a_bond = True
else:
is_a_bond = False
return is_a_bond
# calculate a distance for two atom positions
def calculate_distance(atom1_coord, atom2_coord):
"""
function to calculate the distance between two atoms.
Inputs :
atom1_coord - 3D coordinates of atom 1
atom2_coord - 3D coordinates of atom 2
Outputs :
distance between atom1 and atom2
"""
# check the inputs
if (len(atom1_coord) != 3) or (len(atom2_coord) != 3):
raise ValueError("The shape of an atom's coordinates are incorrect. Double check your inputs")
x_distance = atom1_coord[0] - atom2_coord[0]
y_distance = atom1_coord[1] - atom2_coord[1]
z_distance = atom1_coord[2] - atom2_coord[2]
distance = numpy.sqrt(x_distance**2. + y_distance**2. + z_distance**2.)
return distance
########################################################################
if __name__ == "__main__":
if len(sys.argv) < 2:
raise NameError("Did NOT specify an input file. Add an argument for the file you want to analyze.")
# get file name
file_location = sys.argv[1]
# extract symbols and coordinates
symbols, coords = open_xyz(file_location)
# process and print bonded atoms
numcol = len(coords[0,:])
for ii, coords_ii in enumerate(coords):
for jj, coords_jj in enumerate(coords[ii:,:]):
# calculate bond length
bond_length12 = calculate_distance(coords_ii,coords_jj)
if bond_check(bond_length12) is True:
print(F'{symbols[ii]} to {symbols[jj]} : {bond_length12:.3f}')
| 4d817cacc12c40127933b65d1f94baefad622fd0 | [
"Python"
] | 1 | Python | jebbates/cms-workshop | c288f336514f06166ed007d20c6c486a8f0be73d | 280453cbb9ff4d77c11b9be7c8e872e7c913fcfd |
refs/heads/master | <file_sep>import jwt
from flask import request, url_for, jsonify
from flask_restplus import Resource
from functools import wraps
from app import api, app
from ..database.models import User
from app.parsers import pagination_parser
from ..endpoints.post import init_post
from ..endpoints.user import init_user
from ..endpoints.login import init_login
from ..endpoints.reset_password import reset_password
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
print("token")
if "x-access-token" in request.headers:
token = request.headers["x-access-token"]
if not token:
return {"message": "Token is missing!"}, 401
try:
data = jwt.decode(
token, app.config["SECRET_KEY"], algorithms=["HS256"])
current_user = User.query.filter_by(
id=data["id"]).first()
except:
{"message": "Token is invalid!"}, 401
return f(current_user, *args, **kwargs)
return decorated
init_post(api, pagination_parser, Resource, request, token_required)
init_user(api, pagination_parser, Resource, request, token_required)
reset_password(api, Resource)
init_login(api, Resource)
<file_sep>FLASK_DEBUG = True
PORT = 5000<file_sep>aniso8601==3.0.2
astroid==1.6.5
autopep8==1.3.5
blinker==1.4
click==6.7
Flask==1.0.2
Flask-Cors==3.0.6
Flask-Mail==0.9.1
flask-restplus==0.11.0
Flask-SQLAlchemy==2.3.2
gunicorn==19.8.1
isort==4.3.4
itsdangerous==0.24
Jinja2==2.10
jsonschema==2.6.0
lazy-object-proxy==1.3.1
MarkupSafe==1.0
mccabe==0.6.1
pycodestyle==2.4.0
PyJWT==1.6.4
pylint==1.9.2
pytz==2018.4
six==1.11.0
SQLAlchemy==1.2.8
Werkzeug==0.14.1
wrapt==1.10.11
<file_sep>from flask_restplus import fields, Model
from app import api
pagination_model = api.model("A page of results", {
"page": fields.Integer(description="Number of this page of results"),
"pages": fields.Integer(description="Total number of pages of results"),
"per_page": fields.Integer(description="Number of items per page of results"),
"total": fields.Integer(description="Total number of results"),
"has_prev": fields.Integer(description="Is prev page?"),
"has_next": fields.Integer(description="Is next page?"),
})
user_model = api.model("user_model", {
"id": fields.Integer,
"username": fields.String(required=True, description="User name"),
"email": fields.String(description="User email"),
"password": fields.String(description="<PASSWORD>"),
"role_id": fields.Integer,
"role": fields.String(attributes="role.name"),
})
user_model_list = api.inherit("Page of blog posts", pagination_model,
{"items": fields.List(
fields.Nested(user_model))},
)
post_model = api.model("post_model", {
"id": fields.String,
"title": fields.String(required=True),
"content": fields.String,
"author": fields.String,
"create_at": fields.DateTime(dt_format='rfc822'),
"category_id": fields.Integer,
"category": fields.String(attributes="category.category_name")
})
category_model = api.model("category_model", {
"id": fields.Integer,
"category_name": fields.String
})
post_model_list = api.inherit("Page of posts", pagination_model,
{"items": fields.List(
fields.Nested(post_model))},
)
login_model = api.model("login_model", {
"username": fields.String(required=True),
"password": fields.String(required=True)
})
send_email_model = api.model("reset_password_model", {
"email": fields.String(required=True)
})
reset_password_model = api.model("reset_password_model", {
"password": fields.String(required=True),
"token": fields.String(required=True)
})
<file_sep>from flask import Flask
from flask_restplus import Api
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
app.config.from_pyfile("config.cfg")
api = Api(app)
db = SQLAlchemy(app)
from app.endpoints import post, user
<file_sep>from flask_sqlalchemy import SQLAlchemy
from app import db
from datetime import datetime
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80))
email = db.Column(db.String(120))
password = db.Column(db.String(200))
role_id = db.Column(db.Integer, db.ForeignKey("role.id"), nullable=False)
role = db.relationship("Role", backref=db.backref("user", lazy=True))
def __repr__(self):
return '<User %r>' % self.username
class Role(db.Model):
id = db.Column(db.Integer, primary_key=True)
role = db.Column(db.String(50))
def __repr__(self):
return "Role {}".format(self.role)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
content = db.Column(db.Text)
author = db.Column(db.String(120))
create_at = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
category_id = db.Column(db.Integer, db.ForeignKey("category.id"), nullable=False)
category = db.relationship("Category", backref=db.backref("post", lazy=True))
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
category_name = db.Column(db.String(120))<file_sep>from app.database.models import User, Role, Post, Category
from flask_sqlalchemy import SQLAlchemy
from app import db, app
from werkzeug.security import check_password_hash
from flask import jsonify
import datetime
import jwt
from flask_mail import Mail, Message
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
from werkzeug.security import generate_password_hash, check_password_hash
s = URLSafeTimedSerializer(app.config["SECRET_KEY"])
mail = Mail(app)
def create_new_user(data):
print(data["username"])
role = Role.query.filter_by(id=1).first()
hashed_password = generate_password_hash(data["password"], method="sha256")
user = User(username=data["username"], email=data["email"],
password=<PASSWORD>, role=role)
db.session.add(user)
db.session.commit()
def edit_user(id, data):
role = Role.query.filter_by(id=data["role_id"]).first()
user = User.query.filter_by(id=id).first()
user.username = data["username"]
user.email = data["email"]
user.role = role
db.session.add(user)
db.session.commit()
def delete_user(id):
user = User.query.filter_by(id=id).first()
db.session.delete(user)
db.session.commit()
def create_new_post(data):
category = Category.query.filter_by(id=data["category_id"]).first()
post = Post(title=data["title"], content=data["content"],
author="tom 5", category=category)
print(post)
db.session.add(post)
db.session.commit()
def edit_post(id, data):
category = Category.query.filter_by(id=data["category_id"]).first()
post = Post.query.filter_by(id=id).first()
post.title = data["title"]
post.content = data["content"]
post.author = data["author"]
post.category = category
db.session.add(post)
db.session.commit()
def get_post(id):
return Post.query.filter_by(id=id).first()
def delete_post(id):
post = Post.query.filter_by(id=id).first()
db.session.delete(post)
db.session.commit()
def login(data):
token = None
user = User.query.filter_by(username=data["username"]).first()
if (check_password_hash(user.password, data["password"])):
data = {"id": user.id,
"exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=280)}
token = jwt.encode(
data, app.config["SECRET_KEY"], "HS256").decode("utf-8")
return jsonify({"token": token})
else:
return None, 401
def send_link_to_email(data):
print(data)
print(data["email"])
if data["email"] and data["email"].strip():
print("wpada")
token = s.dumps(data["email"], salt="check_email")
link = "http://localhost:8080/#/reset_password/" + token
msg = Message("Link to reset password", sender="<EMAIL>",
recipients=["<EMAIL>"])
msg.body = link
mail.send(msg)
return "Email has been sent successfully", 250
else:
return None, 400
def reset_password(data):
try:
email = s.loads(data["token"], salt="check_email", max_age=120)
user = User.query.filter_by(email=email).first()
hashed_password = generate_password_hash(
data["password"], method="sha256")
user.password = <PASSWORD>_password
db.session.add(user)
db.session.commit()
except SignatureExpired:
return jsonify({"message": "Token is expired!"})
<file_sep>def init_user(api, pagination_parser, Resource, request, token_required):
from app.serializers import user_model_list, user_model
from app.database.models import User
from app.controllers import create_new_user, edit_user, delete_user
ns_user = api.namespace("user", description="Operations related to user")
@ns_user.route("/")
class UserList(Resource):
@api.expect(pagination_parser)
@ns_user.marshal_with(user_model_list)
@token_required
def get(current_user, self):
args = pagination_parser.parse_args(request)
users = User.query.paginate(
args.get("page", 1), args.get("per_page", 7), error_out=False)
return users
@api.expect(user_model)
def post(self):
create_new_user(api.payload)
return None, 201
@ns_user.route("/<int:id>")
@api.response(404, "Page not found!")
class UserOne(Resource):
@api.expect(user_model)
@api.response(204, "User has been updated!")
def put(self, id):
edit_user(id, api.payload)
return None, 204
@api.response(204, "User was deleted!")
def delete(self, id):
delete_user(id)
return None, 204
<file_sep>def reset_password(api, Resource):
from app.serializers import send_email_model, reset_password_model
from app.controllers import send_link_to_email, reset_password
ns = api.namespace("email", description="Operations related to send email")
@ns.route("/send_email")
class SendEmail(Resource):
@ns.expect(send_email_model)
@ns.response(250, "Everything went well and your email was delivered to the recipient server")
def post(self):
return send_link_to_email(api.payload)
@ns.route("/reset_password/")
class ResetPassword(Resource):
@ns.expect(reset_password_model)
def post(self):
reset_password(api.payload)
return None
<file_sep>def init_login(api, Resource):
from app.serializers import login_model
from app.controllers import login
ns_login = api.namespace("login", description="Operations related to login")
@ns_login.route("/")
class Login(Resource):
@api.expect(login_model)
@api.response(200, "You are loggin!")
@api.response(401, "Not authorized!")
def post(self):
return login(api.payload)
<file_sep>from ..database.models import User, Role, Post, Category
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
def init_data():
db.drop_all()
db.create_all()
roleAdmin = Role(role="admin")
db.session.add(roleAdmin)
roleUser = Role(role="user")
db.session.add(roleUser)
db.session.commit()
category1 = Category(category_name="Technology")
category2 = Category(category_name="Sc-Fi")
db.session.add_all([category1, category2])
db.session.commit()
for i in range(1, 20):
hashed_password = generate_password_hash("p" + str(i), method="sha256")
if i % 2 != 0:
user = User(username="tom" + str(i), email="tom" +
str(i) + "@tom.pl", password=<PASSWORD>, role=roleAdmin)
db.session.add(user)
else:
user = User(username="tom" + str(i), email="tom" +
str(i) + "@tom.pl", password=<PASSWORD>, role=roleUser)
db.session.add(user)
db.session.commit()
for i in range(1, 30):
if i % 2 != 0:
post = Post(title="Some title " + str(i), content="Some content " +
str(1), author="Tom4", category=category1)
else:
post = Post(title="Some title " + str(i), content="Some content " +
str(1), author="Tom4", category=category2)
db.session.add(post)
db.session.commit()
# init_data()
<file_sep>from flask_restplus import reqparse
pagination_parser = reqparse.RequestParser()
pagination_parser.add_argument("page", type=int, default=1, help="Select page number")
pagination_parser.add_argument("per_page", type=int, default=10, choices=[2, 10, 20, 30, 40, 50], help="How many items schould be on the page?")
<file_sep>def init_post(api, pagination_parser, Resource, request, token_required):
from app.serializers import post_model_list, post_model
from app.database.models import Post
from app.controllers import create_new_post, edit_post, delete_post, get_post
ns_post = api.namespace("post", description="Operations related to post")
@ns_post.route("/")
class PostList(Resource):
@api.expect(pagination_parser)
@ns_post.marshal_with(post_model_list)
def get(self):
args = pagination_parser.parse_args(request)
posts = Post.query.paginate(
args.get("page", 1), args.get("per_page", 7), error_out=False)
return posts
@api.expect(post_model)
def post(self):
create_new_post(api.payload)
return None, 201
@ns_post.route("/<int:id>")
@api.response(404, "Page not found!")
class PostOne(Resource):
@ns_post.marshal_with(post_model)
@api.response(204, "Post has been updated!")
def get(self, id):
print(get_post(id))
return get_post(id)
@api.expect(post_model)
@api.response(204, "Post has been updated!")
def put(self, id):
edit_post(id, api.payload)
return None, 204
@api.response(204, "Post has been deleted!")
def delete(self, id):
delete_post(id)
return None, 204
| 71193f76d4e9d79cd147f5b2b63f97f79f7ae176 | [
"Python",
"Text"
] | 13 | Python | grzeslaws/backend-flask-api | ad26a111c98c990507bc17e8aeb093489ffcf943 | dfb299103e7a23a9055ad062e9f295dd2e365952 |
refs/heads/master | <file_sep>package class14.servlet;
import class14.dao.UsersDao;
import class14.domain.User;
import com.alibaba.fastjson.JSON;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.List;
@WebServlet(name = "UserSelectAllServlet",urlPatterns = "/UserSelectAllServlet")
public class UserSelectAllServlet extends HttpServlet {
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
//查询所有用户
UsersDao usersDao=new UsersDao();
List<User> userList=usersDao.selectAll();
//给客户端,返回所有用户数据
//java对象转换为json字符串
String result=JSON.toJSONString(userList);
response.setContentType("text/json;charset=utf-8");
response.getWriter().println(result);
}
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
doPost(request, response);
}
}
<file_sep>package class14.servlet;
import class14.dao.UsersDao;
import class14.domain.User;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.sql.Date;
@WebServlet(name = "UserDeleteServlet",urlPatterns = "UserDeleteServlet")
public class UserDeleteServlet extends HttpServlet {
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
//接受请求,获得参数
int id= Integer.parseInt(request.getParameter("id"));
//信息入库
UsersDao usersDao=new UsersDao();
boolean result=usersDao.delete(id);
//返回结果
if(result){
response.getWriter().println("delete success");
}else{
response.getWriter().println("delete failed");
}
}
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
doPost(request, response);
}
}
<file_sep>package class14.servlet;
import class14.dao.UsersDao;
import class14.domain.User;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.sql.Date;
@WebServlet(name = "UserInsertServlet",urlPatterns = "/UserInsertServlet")
public class UserInsertServlet extends HttpServlet {
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
//接受请求,获得参数
User user=new User();
user.setId(Integer.valueOf(request.getParameter("id")));
user.setName(request.getParameter("name"));
user.setPassword(request.getParameter("password"));
user.setSex(request.getParameter("sex"));
user.setAge(Integer.valueOf(request.getParameter("age")));
user.setBirthday(Date.valueOf(request.getParameter("birthday")));
//信息入库
UsersDao usersDao=new UsersDao();
boolean result=usersDao.insert(user);
//返回结果
if(result){
response.getWriter().println("Insert success");
}else{
response.getWriter().println("Insert failed");
}
}
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
doPost(request, response);
}
}
| 0d91430fea046940a80d4aee69986e3d918bdff2 | [
"Java"
] | 3 | Java | Drx17002090107/javaweb_test2 | 4f651f92f037acba81ae89161eef66b9bf0e2810 | 6269b6cb3a9f83ac8213771ed1524d2553fceb6f |
refs/heads/master | <repo_name>wf2016/springboot-soap-webservice-mysql<file_sep>/src/main/java/com/mikaila/springbootsoapwebservice/service/IArticleService.java
package com.mikaila.springbootsoapwebservice.service;
import java.util.List;
import com.mikaila.springbootsoapwebservice.entity.Article;
public interface IArticleService {
List<Article>getAllArticles();
Article getArticleById(long articleId);
boolean addArticle(Article article);
void updateArticle(Article article);
void deleteArticle(Article article);
}
<file_sep>/README.md
# springboot-soap-webservice-mysql
<file_sep>/src/main/resources/data.sql
CREATE DATABASE IF NOT EXISTS `concretepage`;
USE `concretepage`;
CREATE TABLE IF NOT EXISTS articles (
article_id bigint(5) NOT NULL AUTO_INCREMENT,
title varchar(200) NOT NULL,
category varchar(100) NOT NULL,
PRIMARY KEY (article_id)
);
INSERT INTO articles (article_id, title, category) VALUES
(1, 'Java Concurrency', 'Java'),
(2, 'Spring Boot Getting Started', 'Spring Boot'); | 7df83098ec496937daca0cbd88aa2fa999248392 | [
"Markdown",
"Java",
"SQL"
] | 3 | Java | wf2016/springboot-soap-webservice-mysql | 1cfcc0a08a762b53448f87bcb003ae38473fe94e | b2044d10a36317b435f2bc70a2db498eb82f890f |
refs/heads/master | <repo_name>minhpqn/Algorithm-Dojo<file_sep>/code/count_inversions.py
""" Count the number of inversions in the array
Read integers from
"""
def load():
filename = 'SmallIntegerArray.txt'
filename = 'IntegerArray.txt'
numbers = []
try:
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line == '':
continue
numbers.append(int(line))
except IOError:
print('Could not open file IntegerArray.txt to read')
return numbers
def brute_force(numbers):
count = 0
size = len(numbers)
for i in range(0, size-1):
for j in range(i+1, size):
if numbers[i] > numbers[j]:
count += 1
return count
def sort_and_count(numbers):
n = len(numbers)
if n == 1:
return (numbers, 0)
sorted_left, x = sort_and_count(numbers[0:int(n/2)])
sorted_right, y = sort_and_count(numbers[int(n/2):n])
sorted_numbers, z = count_split_inv(sorted_left, sorted_right)
return (sorted_numbers, x + y + z)
def count_split_inv(b, c):
""" b and c are two sorted arrays
"""
BIG_INT = 999999999999
z = 0
n = len(b) + len(c)
d = [0 for i in range(n)]
i = 0
j = 0
b.append(BIG_INT)
c.append(BIG_INT)
for k in range(n):
if b[i] < c[j]:
d[k] = b[i]
i += 1
elif c[j] < b[i]:
d[k] = c[j]
z += len(b) - i - 1
j += 1
return (d, z)
if __name__ == '__main__':
numbers = load()
# count = brute_force(numbers)
# print('Number of inversions (by brute-force) = %d' % count)
_, count = sort_and_count(numbers)
print('Number of integers in the array = %d' % len(numbers))
print('Number of inversions = %d' % count)
<file_sep>/code/inversion.cpp
/*
inversion.cpp
Calculate the number of inversions in an array
array of integer numbers is read from a file
See definition of inversion on:
[1] https://en.wikipedia.org/wiki/Inversion_%28discrete_mathematics%29
[2] https://www.cs.umd.edu/class/fall2009/cmsc451/lectures/Lec08-inversions.pdf
Date: 2015/11/09
Author: minhpham
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <cstring>
#include <cmath>
using namespace std;
// read integer array from file
// each line contains an integer number
void readIntArray(vector<int>& intArray, const string& fileName)
{
ifstream fin(fileName, ifstream::in);
int num;
while ( fin >> num ) {
intArray.push_back(num);
}
fin.close();
}
long int naiveMethod(const vector<int>& intArray)
{
long int numInversions = 0;
for (unsigned int i = 0; i < intArray.size() - 1; i++) {
for (unsigned int j = i+1; j < intArray.size(); j++) {
if ( intArray[i] > intArray[j] ) {
numInversions++;
}
}
}
return numInversions;
}
// merge and count the number of inversions for two sorted array
// intArray[p..q] and intArray[q+1..r]
long int mergeCount(vector<int>& intArray, int p, int q, int r)
{
vector<int> aux;
for (unsigned int k = p; k <= r; k++) {
aux.push_back(intArray[k]);
}
long int count = 0;
int i = p, j = q+1;
for (unsigned int k = p; k <= r; k++) {
if (i > q) {
intArray[k] = aux[j-p];
j++;
}
else if (j > r) {
intArray[k] = aux[i-p];
i++;
}
else if (aux[j-p] < aux[i-p]) {
intArray[k] = aux[j-p];
j++;
count += q - i + 1;
}
else {
intArray[k] = aux[i-p];
i++;
}
}
return count;
}
// fast divide-and-conquer algorithm
long int countInversions(vector<int>& intArray, int p, int r)
{
if ( p < r ) {
int q = floor( (p + r)/2 );
long int leftCount = countInversions(intArray, p, q);
long int rightCount = countInversions(intArray, q+1, r);
return leftCount + rightCount + mergeCount(intArray, p, q, r);
}
else {
return 0;
}
}
void printArray(const vector<int>& intArray, int l, int r) {
cout << "(" << l << ", " << r << "): ";
for (unsigned int k = l; k <= r; k++) {
cout << intArray[k] << " ";
}
cout << endl;
}
int main()
{
vector<int> intArray;
readIntArray(intArray, "./IntegerArray.txt");
long int numInversions = countInversions(intArray, 0, intArray.size()-1);
cout << "Number of inversions: " << numInversions << endl;
return 0;
}
<file_sep>/README.md
# Learning about algorithms and efficient coding techniques
Author: <NAME>
<file_sep>/code/int_mul_algo_trivial.py
""" Programming Assignment 1: Integer multiplication algorithm
A trivial solution in python
"""
int_num1 = 3141592653589793238462643383279502884197169399375105820974944592
int_num2 = 2718281828459045235360287471352662497757247093699959574966967627
prod = int_num1 * int_num2
print('Product of two 64-digit integer numbers: %s' % prod)
| 0f9b4427aba7822e03b135db6af9e984a6105a25 | [
"Markdown",
"Python",
"C++"
] | 4 | Python | minhpqn/Algorithm-Dojo | 69d1069889284dc53d0aff6a0f47419b1559cb57 | 3280481cb033751c65899c5865645820a22ae3e2 |
refs/heads/master | <repo_name>AlexAntoine/Curry-Restaurant-Windows-Form<file_sep>/Curry Restaurant/Curry Restaurant/PizzaMenu.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Curry_Restaurant
{
class PizzaMenu
{
private string[] _size = {"Small", "Large","Meduim" };
private string[] _toppings = {"Bacon","Sausage","Chicken","Pepperoni","Onions" };
private string[] _crust = { "Stuffed Crust", "Thick Crust", "Thin Crust" };
private string[] _variety = { "Baked","Browned Top"};
private string[] _stuffing = {"Plain","Cheesy Coated","Cheese Stuffed"};
public PizzaMenu()
{
}
public string pizzaSize(string size)
{
for(int a = 0; a < _size.Length; a++)
{
if(size.Equals(_size[a]))
{
size = _size[a];
}
}
return size;
}
public string pizzaTopping(string toppings)
{
for(int a= 0; a < _toppings.Length; a++)
{
if(toppings.Equals(_toppings[a]))
{
toppings = _toppings[a];
}
}
return toppings;
}
public string pizzaCrust(string crust)
{
for(int a= 0; a < _crust.Length; a++)
{
if(crust.Equals(_crust[a]))
{
crust = _crust[a];
}
}
return crust;
}
public string pizzaVariety(string variety)
{
for (int a = 0; a < _variety.Length; a++)
{
if (variety.Equals(_variety[a]))
{
variety = _variety[a];
}
}
return variety;
}
public string pizzaStuffing(string stuffing)
{
for(int a = 0; a < _stuffing.Length; a++)
{
if(stuffing.Equals(_stuffing[a]))
{
stuffing = _stuffing[a];
}
}
return stuffing;
}
}
}
<file_sep>/README.md
# Curry-Restaurant-Windows-Form
Windows Form of Curry Restaurant
| e143dd95d6c5b4092391956a1568ded53298f6ef | [
"Markdown",
"C#"
] | 2 | C# | AlexAntoine/Curry-Restaurant-Windows-Form | ebc27e6648b31146cbe7b5998bacdd46461fe5ae | 56d4c38b4e8c9f0aa2383c59d74fc7c45c2ba47f |
refs/heads/master | <repo_name>accolite/AU2021-PDFFormGen<file_sep>/README.md
# AU2021-PDFFormGen
In the DSschema we push our approach.
# SpringJpa
- this spring maven project contains the implementation of field group mapped to text field and number field with relation **ManyToMany** mapping..
<file_sep>/formapp/src/app/models/ListItem.ts
export class ListItem {
val: string;
text: string;
constructor(
val: string,
text: string){
this.val = val;
this.text = text;
}
}<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/springrest/entity/BoxItem.java
package com.greatlearning.springrest.entity;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.Table;
@Entity
@Table(name = "boxitem")
public class BoxItem {
@Id
@GeneratedValue(generator = "increment")
@Column(name = "id")
private long id;
String value;
String text;
@ManyToOne
@JoinColumn(name="boxid")
private CheckBoxList CB;
public BoxItem() {
}
public BoxItem(String val,String text) {
this.value = val;
this.text = text;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public CheckBoxList getCB() {
return CB;
}
public void setCB(CheckBoxList cB) {
CB = cB;
}
}
<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/springrest/entity/FieldGroup.java
package com.greatlearning.springrest.entity;
import java.util.ArrayList;
import java.util.List;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import javax.persistence.Table;
@Entity
@Table(name = "fieldgroup")
public class FieldGroup
{
public List<CheckBoxList> getFTcheckboxlist() {
return FTcheckboxlist;
}
public void setFTcheckboxlist(List<CheckBoxList> fTcheckboxlist) {
FTcheckboxlist = fTcheckboxlist;
}
public void setFTtext(List<Text> fTtext) {
FTtext = fTtext;
}
public void setFTNum(List<Num> fTNum) {
FTNum = fTNum;
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY) // auto increment
@Column(name ="id")
private long id;
private String name;
public FieldGroup() {
}
public FieldGroup(String name) {
this.name = name;
}
// @OneToMany(mappedBy = "FG", fetch = FetchType.LAZY, cascade = CascadeType.ALL)
// private List<Num> FTnum = new ArrayList<>();
@OneToMany (mappedBy = "FG", fetch = FetchType.LAZY, cascade = CascadeType.ALL)
private List<Text> FTtext = new ArrayList<>();
@OneToMany (mappedBy = "FG", fetch = FetchType.LAZY, cascade = CascadeType.ALL)
private List<Num> FTNum = new ArrayList<>();
@OneToMany (mappedBy = "FG", fetch = FetchType.LAZY, cascade = CascadeType.ALL)
private List<Password> FTPassword = new ArrayList<>();
public List<Password> getFTPassword() {
return FTPassword;
}
public void setFTPassword(List<Password> fTPassword) {
FTPassword = fTPassword;
}
public List<Email> getFTemail() {
return FTemail;
}
public void setFTemail(List<Email> fTemail) {
FTemail = fTemail;
}
public List<Num> getFTNum() {
return FTNum;
}
@OneToMany (mappedBy = "FG", fetch = FetchType.LAZY, cascade = CascadeType.ALL)
private List<Email> FTemail = new ArrayList<>();
@OneToMany (mappedBy = "FG", fetch = FetchType.LAZY, cascade = CascadeType.ALL)
private List<CheckBoxList> FTcheckboxlist = new ArrayList<>();
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
// public List<Num> getFTnum() {
// return FTnum;
// }
//
// public void setFTnum(ArrayList<Num> fTnum) {
// FTnum = fTnum;
// }
public List<Text> getFTtext() {
return FTtext;
}
public void setFTtext(ArrayList<Text> fTtext) {
FTtext = fTtext;
}
@Override
public String toString() {
return "FieldGroup [id=" + id + ", name=" + name + "]";
}
}
<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/Repsitory/FieldGroupRepository.java
//package com.greatlearning.Repsitory;
//
//import org.springframework.data.repository.CrudRepository;
//
//import com.greatlearning.springrest.entity.FieldGroup;
//
//public interface FieldGroupRepository extends CrudRepository<FieldGroup, Long>{
//
//}
<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/springrest/SpringrestApplication.java
package com.greatlearning.springrest;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;
//import com.greatlearning.Repsitory.FieldGroupRepository;
//import com.greatlearning.Repsitory.TextRepository;
import com.greatlearning.springrest.entity.FieldGroup;
import com.greatlearning.springrest.entity.Text;
@SpringBootApplication
public class SpringrestApplication extends SpringBootServletInitializer {
protected SpringApplicationBuilder configure(SpringApplicationBuilder appliation) {
return appliation.sources(SpringrestApplication.class);
}
public static void main(String[] args) {
SpringApplication.run(SpringrestApplication.class, args);
}
}
<file_sep>/formapp/src/app/models/FG.ts
import {Textfield} from './Text';
export class FG {
id: number;
name: string;
constructor(
id: number,
name: string){
this.id = id;
this.name = name;
}
}<file_sep>/formapp/src/app/service/service.ts
import { Injectable, Input } from '@angular/core';
import{HttpClient, HttpHeaders, HttpParams}from '@angular/common/http'
import { Textfield } from '../models/Text';
import {FG} from '../models/FG';
import { FgId } from '../models/FgId';
@Injectable({
providedIn: 'root'
})
export class Service {
constructor(private httpClient:HttpClient) { }
postTEXTf(t:Textfield){
console.log("postttt");
this.httpClient.post<Text>("http://localhost:8080/api/addtext",t).subscribe();
}
getfgs(){
return this.httpClient.get<FG[]>("http://localhost:8080/api/getfgs");
}
getfg(id: FgId){
return this.httpClient.post<Textfield[]>("http://localhost:8080/api/getfg",id);
}
}
<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/springrest/dao/EmployeeDAOImpl.java
package com.greatlearning.springrest.dao;
import java.util.List;
import org.hibernate.*;
import javax.persistence.EntityManager;
import javax.transaction.Transactional;
import org.hibernate.Session;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.greatlearning.springrest.entity.BoxItem;
import com.greatlearning.springrest.entity.CheckBoxList;
import com.greatlearning.springrest.entity.FieldGroup;
import com.greatlearning.springrest.entity.Form;
import com.greatlearning.springrest.entity.Num;
import com.greatlearning.springrest.entity.Password;
import com.greatlearning.springrest.entity.Text;
import org.json.*;
@Repository
public class EmployeeDAOImpl implements EmployeeDAO {
@Autowired
private EntityManager entityManager;
@Override
@Transactional
public void addText(int fgid,String name,int is_required,int max_length,int min_length) {
System.out.println("coming");
Session currentSession1 = entityManager.unwrap(Session.class);
long id = fgid;
FieldGroup fg = currentSession1.get(FieldGroup.class, id);
System.out.println(fg);
Text t = new Text(name,is_required,max_length,min_length);
t.setFG(fg);
fg.getFTtext().add(t);
currentSession1.update(fg);
}
@Override
@Transactional
public void addCheckBoxList(int fgid,String name,int is_required,List<BoxItem> boxlist) {
// System.out.println("coming");
Session currentSession1 = entityManager.unwrap(Session.class);
long id = fgid;
FieldGroup fg = currentSession1.get(FieldGroup.class, id);
// System.out.println(fg);
// Text t = new Text(name,is_required,max_length,min_length);
// t.setFG(fg);
// fg.getFTtext().add(t);
// currentSession1.update(fg);
CheckBoxList cbl = new CheckBoxList(name,is_required);
for(BoxItem b: boxlist) {
b.setCB(cbl);
cbl.getBoxItems().add(b);
}
// cbl.setBoxItems(boxlist);
Session currentSession = entityManager.unwrap(Session.class);
cbl.setFG(fg);
currentSession.save(cbl);
}
@Override
public void addFieldGroup(String string) {
FieldGroup fg = new FieldGroup(string);
Session currentSession1 = entityManager.unwrap(Session.class);
currentSession1.save(fg);
}
@Override
@Transactional
public String getfg(int i) {
Session currentSession1 = entityManager.unwrap(Session.class);
long id = i;
FieldGroup fg = currentSession1.get(FieldGroup.class, id);
List<Text> FTtext = fg.getFTtext();
System.out.println(FTtext);
JSONObject res = new JSONObject();
JSONArray jsonArray = new JSONArray();
for(Text t: FTtext) {
JSONObject obj = new JSONObject();
try {
obj.put("fgid", t.getId());
obj.put("name", t.getValue());
obj.put("is_required", t.getIs_required());
obj.put("max_length", t.getMax_length());
obj.put("min_length", t.getMin_length());
jsonArray.put(obj);
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// System.out.println(res);
// JSONArray cblarr = new JSONArray();
// List<CheckBoxList> cbl = fg.getFTcheckboxlist();
//
//
// try {
// for(CheckBoxList cb: cbl) {
// JSONObject cbli = new JSONObject();
// List<BoxItem> bi = cb.getBoxItems();
// JSONObject valobj = new JSONObject();
// for(BoxItem b: bi) {
// valobj.put(b.getValue(), b.getText());
// }
// cbli.put("id", cb.getId());
// cbli.put("name", cb.getValue());
// cbli.put("is_required", cb.getIs_required());
// cbli.put("val", valobj);
// cblarr.put(cbli);
// }
//
// }
// catch(Exception e) {
//
// }
//
//
// JSONArray numarr = new JSONArray();
// List<Num> numl = fg.getFTNum();
//
// for(Num n: numl) {
// JSONObject obj = new JSONObject();
// try {
// obj.put("id", n.getId());
// obj.put("value", n.getValue());
// obj.put("is_required", n.getIs_required());
// obj.put("max_length", n.getMax_length());
// obj.put("min_length", n.getMin_length());
// numarr.put(obj);
// } catch (JSONException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// }
//
// JSONArray passwordarr = new JSONArray();
// List<Password> passl = fg.getFTPassword();
//
// for(Password n: passl) {
// JSONObject obj = new JSONObject();
// try {
// obj.put("id", n.getId());
// obj.put("value", n.getValue());
// obj.put("is_required", n.getIs_required());
// obj.put("max_length", n.getMax_length());
// obj.put("min_length", n.getMin_length());
// passwordarr.put(obj);
// } catch (JSONException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// }
//
//
//
// try {
// res.put("text", jsonArray);
//// res.put("checkboxlist", cblarr);
//// res.put("number", numarr);
//// res.put("password", <PASSWORD>);
// } catch (JSONException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// return res;
// ObjectMapper mapper = new ObjectMapper();
// System.out.println(responseDetailsJson);
return jsonArray.toString();
}
@Override
@Transactional
public void addCheckBox(int fgid, String name, int is_required, String value) {
// TODO Auto-generated method stub
}
@Override
@Transactional
public void addDate(int fgid, String name, int is_required, String value) {
// TODO Auto-generated method stub
}
@Override
@Transactional
public void addDateTime(int fgid, String name, int is_required, String value) {
// TODO Auto-generated method stub
}
@Override
@Transactional
public void addEmail(int fgid, String name, int is_required, String value) {
}
@Override
@Transactional
public void addPassword(int fgid, String name, int is_required, int max_length, int min_length) {
// TODO Auto-generated method stub
System.out.println(name);
Session currentSession1 = entityManager.unwrap(Session.class);
long id = fgid;
FieldGroup fg = currentSession1.get(FieldGroup.class, id);
System.out.println(fg);
Password t = new Password(name,is_required,max_length,min_length);
t.setFG(fg);
fg.getFTPassword().add(t);
currentSession1.update(fg);
}
@Override
@Transactional
public void addTextarea(int fgid, String name, int is_required, int max_length, int min_length) {
// TODO Auto-generated method stub
}
@Override
@Transactional
public void addNum(int fgid, String name, int is_required, int max_length, int min_length) {
// TODO Auto-generated method stub
Session currentSession1 = entityManager.unwrap(Session.class);
long id = fgid;
FieldGroup fg = currentSession1.get(FieldGroup.class, id);
System.out.println(fg);
Num t = new Num(name,is_required,max_length,min_length);
t.setFG(fg);
fg.getFTNum().add(t);
currentSession1.update(fg);
}
@Override
@Transactional
public void createform(String string, List<Integer> textl, List<Integer> checkboxlistl,List<Integer> passwordl) {
Form f = new Form(string);
for(Integer i: textl) {
Session currentSession1 = entityManager.unwrap(Session.class);
long id = i;
Text t = currentSession1.get(Text.class, id);
f.getFTtext().add(t);
}
for(Integer i: checkboxlistl) {
Session currentSession1 = entityManager.unwrap(Session.class);
long id = i;
CheckBoxList t = currentSession1.get(CheckBoxList.class, id);
f.getFTcheckboxlist().add(t);
}
for(Integer i: passwordl) {
Session currentSession1 = entityManager.unwrap(Session.class);
long id = i;
Password t = currentSession1.get(Password.class, id);
f.getFTPassword().add(t);
}
Session currentSession = entityManager.unwrap(Session.class);
currentSession.save(f);
// Session currentSession2 = entityManager.unwrap(Session.class);
}
@Override
@Transactional
public JSONObject getform(int i) {
Session currentSession1 = entityManager.unwrap(Session.class);
long id = i;
Form fg = currentSession1.get(Form.class, id);
List<Text> FTtext = fg.getFTtext();
System.out.println(FTtext);
JSONObject res = new JSONObject();
JSONArray jsonArray = new JSONArray();
for(Text t: FTtext) {
JSONObject obj = new JSONObject();
try {
obj.put("id", t.getId());
obj.put("value", t.getValue());
obj.put("is_required", t.getIs_required());
obj.put("max_length", t.getMax_length());
obj.put("min_length", t.getMin_length());
jsonArray.put(obj);
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// System.out.println(res);
JSONArray cblarr = new JSONArray();
List<CheckBoxList> cbl = fg.getFTcheckboxlist();
try {
for(CheckBoxList cb: cbl) {
JSONObject cbli = new JSONObject();
List<BoxItem> bi = cb.getBoxItems();
JSONObject valobj = new JSONObject();
for(BoxItem b: bi) {
valobj.put(b.getValue(), b.getText());
}
cbli.put("id", cb.getId());
cbli.put("name", cb.getValue());
cbli.put("is_required", cb.getIs_required());
cbli.put("val", valobj);
cblarr.put(cbli);
}
}
catch(Exception e) {
}
JSONArray numarr = new JSONArray();
List<Num> numl = fg.getFTNum();
for(Num n: numl) {
JSONObject obj = new JSONObject();
try {
obj.put("id", n.getId());
obj.put("value", n.getValue());
obj.put("is_required", n.getIs_required());
obj.put("max_length", n.getMax_length());
obj.put("min_length", n.getMin_length());
numarr.put(obj);
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
JSONArray passwordarr = new JSONArray();
List<Password> passl = fg.getFTPassword();
for(Password n: passl) {
JSONObject obj = new JSONObject();
try {
obj.put("id", n.getId());
obj.put("value", n.getValue());
obj.put("is_required", n.getIs_required());
obj.put("max_length", n.getMax_length());
obj.put("min_length", n.getMin_length());
passwordarr.put(obj);
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
try {
res.put("text", jsonArray);
res.put("checkboxlist", cblarr);
res.put("number", numarr);
res.put("password", <PASSWORD>);
} catch (JSONException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return res;
}
@Override
public String getfgs() {
Session currentSession = entityManager.unwrap(Session.class);
Query<FieldGroup> theQuery = currentSession.createQuery("from com.greatlearning.springrest.entity.FieldGroup");
List<FieldGroup> items = theQuery.getResultList();
JSONArray jsonArray = new JSONArray();
try {
for(FieldGroup i: items) {
JSONObject obj = new JSONObject();
try {
obj.put("id", i.getId());
obj.put("name", i.getName());
jsonArray.put(obj);
}
catch(Exception e )
{
}
}
}
catch (Exception e) {
}
return jsonArray.toString();
}
}<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/Repsitory/TextRepository.java
//package com.greatlearning.Repsitory;
//import org.springframework.data.repository.CrudRepository;
//
//import com.greatlearning.springrest.entity.Text;
//public interface TextRepository extends CrudRepository<Text, Long> {
//
//}
<file_sep>/formapp/src/app/models/Text.ts
export class Textfield {
fgid: number;
name: string;
is_required: number;
max_length: number;
min_length: number;
constructor(
fgid: number,name: string,
is_required: number,
max_length: number,
min_length: number){
this.fgid = fgid;
this.name = name;
this.is_required = is_required;
this.max_length = max_length;
this.min_length = min_length;
}
}<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/springrest/entity/Textarea.java
//package com.greatlearning.springrest.entity;
//
//import javax.persistence.Column;
//import javax.persistence.Entity;
//import javax.persistence.GeneratedValue;
//import javax.persistence.Id;
//import javax.persistence.JoinColumn;
//import javax.persistence.ManyToOne;
//import javax.persistence.Table;
//
//@Entity
//@Table(name = "Textarea")
//public class Textarea {
//
// @Id
// @GeneratedValue(generator = "increment")
// @Column(name = "id")
// private long id;
// private String text;
// private int max_length;
// private int min_length;
// public long getId() {
// return id;
// }
// public void setId(long id) {
// this.id = id;
// }
// private int is_required;
// @ManyToOne
// @JoinColumn(name="fg_id")
// private FieldGroup FG;
//
// @ManyToOne
// @JoinColumn(name="formid")
// private Form FID;
//
// public Textarea() {
//
// }
//
// public Textarea(String text, int is_required, int max_length, int min_length) {
// this.text = text;
// this.is_required = is_required;
// this.max_length = max_length;
// this.min_length = min_length;
// }
// public String getText() {
// return text;
// }
// public void setText(String text) {
// this.text = text;
// }
// public int getIs_required() {
// return is_required;
// }
// public void setIs_required(int is_required) {
// this.is_required = is_required;
// }
// public int getMax_length() {
// return max_length;
// }
// public void setMax_length(int max_length) {
// this.max_length = max_length;
// }
// public int getMin_length() {
// return min_length;
// }
// public void setMin_length(int min_length) {
// this.min_length = min_length;
// }
// public FieldGroup getFG() {
// return FG;
// }
// public void setFG(FieldGroup fG) {
// FG = fG;
// }
//}
<file_sep>/formapp/src/app/components/rightbody/rightbody.component.ts
import { Component, OnInit } from '@angular/core';
import { Textfield } from 'src/app/models/Text';
import {ListItem} from 'src/app/models/ListItem';
import {FG} from 'src/app/models/FG';
import { FgId } from 'src/app/models/FgId';
import { Service } from 'src/app/service/service';
@Component({
selector: 'app-rightbody',
templateUrl: './rightbody.component.html',
styleUrls: ['./rightbody.component.css']
})
export class RightbodyComponent implements OnInit {
sfg: boolean = false;
showlist:boolean=false;
fgs: FG[] = [];
listItems: ListItem[] = [];
liststatus: boolean = false;
// text: Text = {
// fgid : 1,
// name: "<NAME>",
// min_length: 2,
// max_length: 30,
// is_required: 0
// >>>>>>> 620a553a63155a0795e1bd59f7d44e9d86e2907e
// };
text:Textfield=new Textfield(1,"",1,0,0);
constructor(private service:Service) { }
ngOnInit(): void {
this.listItems.push(new ListItem("",""));
this.service.getfgs().subscribe((response)=>{
this.fgs = response;
// console.log(response);
},
(error)=>{
console.log(error);
});
}
onCheckboxChange(): void {
this.text.is_required = this.text.is_required+1;
this.text.is_required = this.text.is_required%2;
}
addtext(): void{
this.service.postTEXTf(this.text);
}
cleartext(): void{
this.text.max_length = 0;
this.text.min_length = 0;
this.text.name = "";
}
onOptionsSelected(val: string){
if(val=="chechboxlist")
{
this.liststatus=true;
}
else
{
this.liststatus=false;
}
}
addListItem(){
this.listItems.push(new ListItem("",""));
}
removeListItem(){
this.listItems.pop();
}
showfgs(){
this.sfg = !this.sfg;
}
closefgs(){
this.showlist=false;
}
texts: Textfield[] = [];
id: FgId = new FgId(0);
fieldtitle: string = "";
getfg(id: number, name: string){
this.fieldtitle = name;
// console.log(id);
this.showlist=true;
this.id.fgid = id;
this.service.getfg(this.id).subscribe((response)=>{
this.texts = response;
// console.log(response);
},
(error)=>{
console.log(error);
});
}
}
<file_sep>/formapp/src/app/models/FgId.ts
export class FgId {
fgid: number;
constructor(
id: number){
this.fgid = id;
}
}<file_sep>/day12springjpa/springrestdemo/springrest/src/main/java/com/greatlearning/springrest/service/EmployeeService.java
package com.greatlearning.springrest.service;
import java.util.List;
public interface EmployeeService {
void addText(int fgid, String name, int is_required, int max_length, int min_length);
}
| 856f424732716d80c2a3261f65fddd54ac5d393f | [
"Markdown",
"Java",
"TypeScript"
] | 15 | Markdown | accolite/AU2021-PDFFormGen | aa0dcdad381b8cecd4d47b9f18921f664bdf6aaa | b6f8cf35fadfa3d6dc89117e93da2382267fc37a |
refs/heads/master | <repo_name>Benjamin-eecs/Python-Tello-Control<file_sep>/README.md
# Python-Tello-Control
Controlling the Tello drone with Python
Based on the works from (https://bitbucket.org/PingguSoft/pytello)
## Requirements
For running the drone with video stream [MPlayer](http://www.mplayerhq.hu/design7/news.html) is needed.
## Running the program with video output
```
python runWithVideostream.py
```
Starts the programm and directly outputs video stream to MPlayer.
You can then simply control the drone and view the video file.
## Running the drone with face tracking
```
python runWithFaceTracking.py
```
MPlayer is still used for generating the video, but every frame is saved as a picture
to the local drive. Image analysis then works on the PC, finds the first face and
orders the drone to follow it. Currently only turning left/right and flying up/down is
supported for safety reasons.<file_sep>/tello2.lua
-- License.
--
-- Copyright 2018 PingguSoft <<EMAIL>>
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
-- load the udp.port table
udp_table = DissectorTable.get("udp.port")
dissector = udp_table:get_dissector(8889)
if dissector ~= nil then
udp_table:remove(8889, dissector)
message("8889 dissector removed")
end
dissector = udp_table:get_dissector(6038)
if dissector ~= nil then
udp_table:remove(6038, dissector)
message("6038 dissector removed")
end
-- tello command
-- ts = os.time(os.date("!*t"))
proto_name = "TELLO_CMD"
local tello_cmd = Proto(proto_name, proto_name)
local cmd_names = {
[17] = "TELLO_CMD_SSID",
[18] = "TELLO_CMD_SET_SSID",
[19] = "TELLO_CMD_SSID_PASS",
[20] = "TELLO_CMD_SET_SSID_PASS",
[21] = "TELLO_CMD_REGION",
[22] = "TELLO_CMD_SET_REGION",
[37] = "TELLO_CMD_VIDEO_REQ_SPS_PPS",
[48] = "TELLO_CMD_TAKE_PICTURE",
[49] = "TELLO_CMD_SWITCH_PICTURE_VIDEO",
[50] = "TELLO_CMD_START_RECORDING",
[52] = "TELLO_CMD_EV",
[70] = "TELLO_CMD_DATE_TIME",
[80] = "TELLO_CMD_STICK",
[4176] = "TELLO_CMD_LOG_HEADER_WRITE",
[4177] = "TELLO_CMD_LOG_DATA_WRITE",
[4178] = "TELLO_CMD_LOG_CONFIGURATION",
[26] = "TELLO_CMD_WIFI_SIGNAL",
[40] = "TELLO_CMD_RATE",
[53] = "TELLO_CMD_LIGHT_STRENGTH",
[69] = "TELLO_CMD_VERSION_STRING",
[71] = "TELLO_CMD_ACTIVATION_TIME",
[73] = "TELLO_CMD_LOADER_VERSION",
[86] = "TELLO_CMD_STATUS",
[4182] = "TELLO_CMD_ALT_LIMIT",
[4183] = "TELLO_CMD_LOW_BATT_PARAM",
[4185] = "TELLO_CMD_ATT_ANGLE",
[55] = "TELLO_CMD_JPEG_QUALITY",
[84] = "TELLO_CMD_TAKEOFF",
[85] = "TELLO_CMD_LANDING",
[88] = "TELLO_CMD_SET_HEIHT",
[92] = "TELLO_CMD_FLIP",
[93] = "TELLO_CMD_THROW_FLY",
[94] = "TELLO_CMD_PALM_LANDING",
[4180] = "TELLO_CMD_PLANE_CALIBRATION",
[4181] = "TELLO_CMD_LOW_BATTERY_THRESHOLD",
[4184] = "TELLO_CMD_SET_ATTITUDE_ANGLE",
[67] = "TELLO_CMD_ERROR1",
[68] = "TELLO_CMD_ERROR2",
[98] = "TELLO_CMD_FILE_SIZE",
[99] = "TELLO_CMD_FILE_DATA",
[100 ] = "TELLO_CMD_FILE_COMPLETE",
[90] = "TELLO_CMD_HANDLE_IMU_ANGLE",
[32] = "TELLO_CMD_SET_VIDEO_BIT_RATE",
[33] = "TELLO_CMD_SET_DYN_ADJ_RATE",
[36] = "TELLO_CMD_EIS_SETTING",
[128 ] = "TELLO_CMD_SMART_VIDEO_START",
[129 ] = "TELLO_CMD_SMART_VIDEO_STATUS",
[4179] = "TELLO_CMD_BOUNCE",
}
local cmd_fields =
{
pf_sop = ProtoField.uint8("tello.sop", "SOP ", base.HEX, nil),
pf_size = ProtoField.uint16("tello.sz", "SIZE "),
pf_crc8 = ProtoField.uint8("tello.crc8", "CRC8 ", base.HEX, nil),
pf_pacType = ProtoField.uint8("tello.pac", "PACT ", base.HEX, nil),
pf_dir = ProtoField.string("tello.dir", "DIR "),
pf_cmdID = ProtoField.uint16("tello.cmd", "CMD ", base.DEC, cmd_names),
pf_seqID = ProtoField.uint16("tello.seq", "SEQ "),
pf_dataSize= ProtoField.uint16("tello.datasz", "DATASZ"),
pf_data = ProtoField.bytes("tello.data", "DATA ", base.SPACE, nil),
pf_crc16 = ProtoField.uint16("tello.crc16", "CRC16 ", base.HEX, nil),
}
tello_cmd.fields = cmd_fields
function tello_cmd.dissector(tvb, pinfo, root)
pinfo.cols.protocol = "TELLO_CMD"
local i = 0
local size = 0
local stick = 0
local pktlen = tvb:reported_length_remaining()
local tree = root:add(tello_cmd, tvb:range(0, pktlen))
local data_tree;
sop = tvb(i,1):le_uint()
if sop == 0xCC then
tree:add(cmd_fields.pf_sop, tvb(i,1))
i = i + 1
tree:add_le(cmd_fields.pf_size, tvb(i,2))
i = i + 2
tree:add(cmd_fields.pf_crc8, tvb(i,1))
i = i + 1
tree:add(cmd_fields.pf_pacType, tvb(i,1))
pact = tvb(i,1):le_uint()
i = i + 1
if bit.band(pact, 0x80) == 0x80 then
dest = " <- FROM DRONE"
from_drone = 1
else
dest = " -> TO DRONE"
from_drone = 0
end
tree:add(cmd_fields.pf_dir, dest)
cmd = tvb(i,2):le_uint()
tree:add_le(cmd_fields.pf_cmdID, tvb(i,2))
i = i + 2
tree:add_le(cmd_fields.pf_seqID, tvb(i,2))
i = i + 2
size = tvb:len() - i - 2
tree:add_le(cmd_fields.pf_dataSize, size)
if size ~= 0 then
data_tree = tree:add(cmd_fields.pf_data, tvb(i,size))
ii = i
-- stick command
if cmd == 80 then
stick = tvb(i,6):le_uint64()
axis1 = stick:band(0x7ff):lower()
stick = stick:rshift(11)
axis2 = stick:band(0x7ff):lower()
stick = stick:rshift(11)
axis3 = stick:band(0x7ff):lower()
stick = stick:rshift(11)
axis4 = stick:band(0x7ff):lower()
stick = stick:rshift(11)
axis5 = stick:band(0x7ff):lower()
stick_str = string.format("roll:%4d, pitch:%4d, thr:%4d, yaw:%4d, fastmode:%d", axis1, axis2, axis3, axis4, axis5)
data_tree:add(tvb(i,size), "STICK - " .. stick_str)
elseif cmd == 98 and from_drone == 1 then
fileType = tvb(ii,1):le_uint()
ii = ii + 1
fileSize = tvb(ii,4):le_uint()
ii = ii + 4
fileID = tvb(ii,2):le_uint()
ii = ii + 2
file_str = string.format("fileID:%d, fileType:%d, fileSize:%d", fileID, fileType, fileSize)
data_tree:add(tvb(i,size), "FILE INFO - " .. file_str)
elseif cmd == 99 then
if from_drone == 1 then
fileID = tvb(ii,2):le_uint()
ii = ii + 2
pieceNum = tvb(ii,4):le_uint()
ii = ii + 4
seqNum = tvb(ii,4):le_uint()
ii = ii + 4
length = tvb(ii,2):le_uint()
file_str = string.format("fileID:%d, pieceNum:%d, seqNum:%d, len:%d", fileID, pieceNum, seqNum, length)
data_tree:add(tvb(i,size), "FILE DATA - " .. file_str)
else
ii = ii + 1
fileID = tvb(ii,2):le_uint()
ii = ii + 2
pieceNum = tvb(ii,4):le_uint()
ii = ii + 4
file_str = string.format("fileID:%d, pieceNum:%d", fileID, pieceNum)
data_tree:add(tvb(i,size), "FILE ACK - " .. file_str)
end
elseif cmd == 128 then
if from_drone == 0 then
code = tvb(ii, 1):le_uint()
start = bit.band(code, 0x01)
code = bit.rshift(code, 2)
mode = bit.band(code, 0x07)
data_tree:add(tvb(i,size), "SMART_REC_CMD - " .. string.format("mode:%d, start:%d", mode, start))
end
elseif cmd == 129 then
if from_drone == 1 then
code = tvb(ii, 1):le_uint()
dummy = bit.band(code, 0x07)
code = bit.rshift(code, 3)
start = bit.band(code, 0x03)
code = bit.rshift(code, 2)
mode = bit.band(code, 0x07)
data_tree:add(tvb(i,size), "SMART_REC_ACK - " .. string.format("dummy:%d, mode:%d, start:%d", dummy, mode, start))
end
end
i = i + size
end
tree:add_le(cmd_fields.pf_crc16, tvb(i,2))
i = i + 2
end
end
-- tello video
proto_name = "TELLO_VIDEO"
local tello_video = Proto(proto_name, proto_name)
function tello_video.dissector(tvb, pinfo, root)
pinfo.cols.protocol = "TELLO_VIDEO"
local i = 0
local size = 0;
local tree = root:add(tello_video, tvb(), "TELLO_VIDEO : " .. tvb:len())
tree:add(tvb(i,1), "SEQ: " .. string.format("%d", tvb(i,1):le_uint()))
i = i + 1
subseq = tvb(i,1):le_uint()
if bit.band(subseq, 0x80) == 0x80 then
tree:add(tvb(i,1), "SUB: " .. string.format("%d", bit.band(subseq, 0x7f)) .. " Last")
else
tree:add(tvb(i,1), "SUB: " .. string.format("%d", bit.band(subseq, 0x7f)))
end
i = i + 1
mark = tvb(i,4):le_int();
i = i + 4
if mark == 0x01000000 then
nal_type = bit.band(tvb(i,1):le_uint(), 0x1f)
tree:add(tvb(i,1), "NAL TYPE: " .. string.format("%d", nal_type))
end
end
-- register our protocol to handle
udp_table:add(8889, tello_cmd)
udp_table:add(6038, tello_video) | b191495d984244a141130eeec4f140efbe2cf94f | [
"Markdown",
"Lua"
] | 2 | Markdown | Benjamin-eecs/Python-Tello-Control | 050ebb2c6e07b64c7ef50aceccab5b822611bb75 | 0f56da5a66feccb72c0b9e82456f92c8ed55d341 |
refs/heads/master | <file_sep>import BrowserRouter from 'react-router-dom/BrowserRouter';
import React from 'react';
import { hydrate } from 'react-dom';
import { ensureReady, After } from '@jaredpalmer/after';
import { Provider } from 'react-redux';
import configureStore from '../common/store/configureStore';
import routes from '../common/routes';
// hydrate(
// <BrowserRouter>
// <App />
// </BrowserRouter>,
// document.getElementById('root')
// );
const store = configureStore(window.__PRELOADED_STATE__ || {});
ensureReady(routes).then(data =>
hydrate(
<Provider store={store}>
<BrowserRouter>
<After data={data} routes={routes} />
</BrowserRouter>
</Provider>,
document.getElementById('root')
)
);
if (module.hot) {
module.hot.accept();
}
<file_sep>import React from 'react';
import { Link } from 'react-router-dom';
import logo from './react.svg';
import './Home.css';
class Home extends React.Component {
static async getInitialProps({ req, res, match}) {
// console.log('store ', store)
return { };
}
render() {
return (
<div className="Home">
<div className="Home-header">
<img src={logo} className="Home-logo" alt="logo" />
<h2>Welcome to Razzle</h2>
</div>
<ul className="Home-resources">
<li>
<Link to={`/about`}>About</Link>
</li>
<li>
<Link to={`/detail/5`}>Detail</Link>
</li>
<li>
<a href="https://github.com/jaredpalmer/razzle">Docs</a>
</li>
<li>
<a href="https://github.com/jaredpalmer/razzle/issues">Issues</a>
</li>
<li>
<a href="https://palmer.chat">Community Slack</a>
</li>
</ul>
</div>
);
}
}
export default Home;
<file_sep>import React from 'react'
import { asyncComponent } from '@jaredpalmer/after';
// Internally these will become:
// <Route path={path} exact={exact} render={props => <component {...props} data={data} />} />
const routes = [
{
path: '/',
exact: true,
component: asyncComponent({
loader: () => import('../client/Home'), // required
Placeholder: () => <div>...LOADING...</div>, // this is optional, just returns null by default
}),
},
{
path: '/about',
component: asyncComponent({
loader: () => import('../client/About'), // required
Placeholder: () => <div>...LOADING...</div>, // this is optional, just returns null by default
}),
},
{
path: '/detail/:id',
component: asyncComponent({
loader: () => import('../client/Detail'), // required
Placeholder: () => <div>...LOADING...</div>, // this is optional, just returns null by default
}),
},
];
export default routes; | 4aa2e5cbb29d361afc4f3b920d14d4bbeed05555 | [
"JavaScript"
] | 3 | JavaScript | fadiquader/try_after.js | dac444221dffbf11834dd932453e0df2baad662d | a91fe81d444e02644e53edbf7c38f2645b7b02bd |
refs/heads/master | <repo_name>kupavcevdenis/smeta3d<file_sep>/source/render/engine/Engine.cpp
#include "Engine.h"
#include "gl_render_system/GLContext.h"
namespace smeta3d
{
/////////////////////////////////////////////////////////////////////
///
SP_IEngine GetSingltonEngine()
{
static SP_IEngine ptrEngine = nullptr;
if (!ptrEngine)
ptrEngine.reset(new CEngine());
return ptrEngine;
}
/////////////////////////////////////////////////////////////////////
///
CEngine::CEngine()
{
}
/////////////////////////////////////////////////////////////////////
///
CEngine::~CEngine()
{
}
/////////////////////////////////////////////////////////////////////
///
bool CEngine::Init(const HWND& HWnd)
{
m_ptrContext = std::make_shared<GLContext>(HWnd);
return true;
}
/////////////////////////////////////////////////////////////////////
///
bool CEngine::IsInit() const
{
return true;
}
/////////////////////////////////////////////////////////////////////
///
void CEngine::DeInit()
{
m_ptrContext = nullptr;
}
/////////////////////////////////////////////////////////////////////
///
void CEngine::Update(float fTimePerSec)
{
m_ptrContext->Clear();
}
/////////////////////////////////////////////////////////////////////
///
void CEngine::BeginRender()
{
}
/////////////////////////////////////////////////////////////////////
///
void CEngine::Render()
{
}
/////////////////////////////////////////////////////////////////////
///
void CEngine::EndRender()
{
m_ptrContext->Swap();
}
/////////////////////////////////////////////////////////////////////
///
void CEngine::Resize(int w, int h)
{
m_ptrContext->Resize(w, h);
}
}<file_sep>/include/kernel/ifc/ICore.h
#ifndef ICORE_H
#define ICORE_H
#include <memory>
#include "render/engine/ifc/IEngine.h"
namespace smeta3d
{
class ICore
{
public:
~ICore() {};
virtual bool Init(const HWND& HWnd) = 0;
virtual bool IsInit() const = 0;
virtual void DeInit() = 0;
virtual const smeta3d::SP_IEngine& GetEngine() const = 0;
virtual void Simulate(float fTimePerSec) = 0;
virtual void Resize(int w, int h) = 0;
};
using SP_ICore = std::shared_ptr<ICore>;
SP_ICore __declspec(dllexport) GetSingltonCore();
}
#define DECL_PUBLIC_ICORE \
bool Init(const HWND& HWnd); \
bool IsInit() const; \
void DeInit(); \
const smeta3d::SP_IEngine& GetEngine() const; \
void Simulate(float fTimePerSec); \
void Resize(int w, int h);
#endif<file_sep>/source/render/GLExt.cpp
#include "GLExt.h"
/******************************************************************************\
*
* Extensions
*
\******************************************************************************/
// OpenGL 1.2
PFNGLBLENDCOLORPROC glBlendColor = NULL;
PFNGLBLENDEQUATIONPROC glBlendEquation = NULL;
PFNGLDRAWRANGEELEMENTSPROC glDrawRangeElements = NULL;
PFNGLTEXIMAGE3DPROC glTexImage3D = NULL;
PFNGLTEXSUBIMAGE3DPROC glTexSubImage3D = NULL;
PFNGLCOPYTEXSUBIMAGE3DPROC glCopyTexSubImage3D = NULL;
// OpenGL 1.3
PFNGLACTIVETEXTUREPROC glActiveTexture = NULL;
PFNGLSAMPLECOVERAGEPROC glSampleCoverage = NULL;
PFNGLCOMPRESSEDTEXIMAGE3DPROC glCompressedTexImage3D = NULL;
PFNGLCOMPRESSEDTEXIMAGE2DPROC glCompressedTexImage2D = NULL;
PFNGLCOMPRESSEDTEXIMAGE1DPROC glCompressedTexImage1D = NULL;
PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glCompressedTexSubImage3D = NULL;
PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glCompressedTexSubImage2D = NULL;
PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glCompressedTexSubImage1D = NULL;
PFNGLGETCOMPRESSEDTEXIMAGEPROC glGetCompressedTexImage = NULL;
// OpenGL 1.4
PFNGLBLENDFUNCSEPARATEPROC glBlendFuncSeparate = NULL;
PFNGLMULTIDRAWARRAYSPROC glMultiDrawArrays = NULL;
PFNGLMULTIDRAWELEMENTSPROC glMultiDrawElements = NULL;
PFNGLPOINTPARAMETERFPROC glPointParameterf = NULL;
PFNGLPOINTPARAMETERFVPROC glPointParameterfv = NULL;
PFNGLPOINTPARAMETERIPROC glPointParameteri = NULL;
PFNGLPOINTPARAMETERIVPROC glPointParameteriv = NULL;
// OpenGL 1.5
PFNGLGENQUERIESPROC glGenQueries = NULL;
PFNGLDELETEQUERIESPROC glDeleteQueries = NULL;
PFNGLISQUERYPROC glIsQuery = NULL;
PFNGLBEGINQUERYPROC glBeginQuery = NULL;
PFNGLENDQUERYPROC glEndQuery = NULL;
PFNGLGETQUERYIVPROC glGetQueryiv = NULL;
PFNGLGETQUERYOBJECTIVPROC glGetQueryObjectiv = NULL;
PFNGLGETQUERYOBJECTUIVPROC glGetQueryObjectuiv = NULL;
PFNGLBINDBUFFERPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSPROC glGenBuffers = NULL;
PFNGLISBUFFERPROC glIsBuffer = NULL;
PFNGLBUFFERDATAPROC glBufferData = NULL;
PFNGLBUFFERSUBDATAPROC glBufferSubData = NULL;
PFNGLGETBUFFERSUBDATAPROC glGetBufferSubData = NULL;
PFNGLMAPBUFFERPROC glMapBuffer = NULL;
PFNGLUNMAPBUFFERPROC glUnmapBuffer = NULL;
PFNGLGETBUFFERPARAMETERIVPROC glGetBufferParameteriv = NULL;
PFNGLGETBUFFERPOINTERVPROC glGetBufferPointerv = NULL;
// OpenGL 2.0
PFNGLBLENDEQUATIONSEPARATEPROC glBlendEquationSeparate = NULL;
PFNGLDRAWBUFFERSPROC glDrawBuffers = NULL;
PFNGLSTENCILOPSEPARATEPROC glStencilOpSeparate = NULL;
PFNGLSTENCILFUNCSEPARATEPROC glStencilFuncSeparate = NULL;
PFNGLSTENCILMASKSEPARATEPROC glStencilMaskSeparate = NULL;
PFNGLATTACHSHADERPROC glAttachShader = NULL;
PFNGLBINDATTRIBLOCATIONPROC glBindAttribLocation = NULL;
PFNGLCOMPILESHADERPROC glCompileShader = NULL;
PFNGLCREATEPROGRAMPROC glCreateProgram = NULL;
PFNGLCREATESHADERPROC glCreateShader = NULL;
PFNGLDELETEPROGRAMPROC glDeleteProgram = NULL;
PFNGLDELETESHADERPROC glDeleteShader = NULL;
PFNGLDETACHSHADERPROC glDetachShader = NULL;
PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray = NULL;
PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray = NULL;
PFNGLGETACTIVEATTRIBPROC glGetActiveAttrib = NULL;
PFNGLGETACTIVEUNIFORMPROC glGetActiveUniform = NULL;
PFNGLGETATTACHEDSHADERSPROC glGetAttachedShaders = NULL;
PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation = NULL;
PFNGLGETPROGRAMIVPROC glGetProgramiv = NULL;
PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog = NULL;
PFNGLGETSHADERIVPROC glGetShaderiv = NULL;
PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog = NULL;
PFNGLGETSHADERSOURCEPROC glGetShaderSource = NULL;
PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation = NULL;
PFNGLGETUNIFORMFVPROC glGetUniformfv = NULL;
PFNGLGETUNIFORMIVPROC glGetUniformiv = NULL;
PFNGLGETVERTEXATTRIBDVPROC glGetVertexAttribdv = NULL;
PFNGLGETVERTEXATTRIBFVPROC glGetVertexAttribfv = NULL;
PFNGLGETVERTEXATTRIBIVPROC glGetVertexAttribiv = NULL;
PFNGLGETVERTEXATTRIBPOINTERVPROC glGetVertexAttribPointerv = NULL;
PFNGLISPROGRAMPROC glIsProgram = NULL;
PFNGLISSHADERPROC glIsShader = NULL;
PFNGLLINKPROGRAMPROC glLinkProgram = NULL;
PFNGLSHADERSOURCEPROC glShaderSource = NULL;
PFNGLUSEPROGRAMPROC glUseProgram = NULL;
PFNGLUNIFORM1FPROC glUniform1f = NULL;
PFNGLUNIFORM2FPROC glUniform2f = NULL;
PFNGLUNIFORM3FPROC glUniform3f = NULL;
PFNGLUNIFORM4FPROC glUniform4f = NULL;
PFNGLUNIFORM1IPROC glUniform1i = NULL;
PFNGLUNIFORM2IPROC glUniform2i = NULL;
PFNGLUNIFORM3IPROC glUniform3i = NULL;
PFNGLUNIFORM4IPROC glUniform4i = NULL;
PFNGLUNIFORM1FVPROC glUniform1fv = NULL;
PFNGLUNIFORM2FVPROC glUniform2fv = NULL;
PFNGLUNIFORM3FVPROC glUniform3fv = NULL;
PFNGLUNIFORM4FVPROC glUniform4fv = NULL;
PFNGLUNIFORM1IVPROC glUniform1iv = NULL;
PFNGLUNIFORM2IVPROC glUniform2iv = NULL;
PFNGLUNIFORM3IVPROC glUniform3iv = NULL;
PFNGLUNIFORM4IVPROC glUniform4iv = NULL;
PFNGLUNIFORMMATRIX2FVPROC glUniformMatrix2fv = NULL;
PFNGLUNIFORMMATRIX3FVPROC glUniformMatrix3fv = NULL;
PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv = NULL;
PFNGLVALIDATEPROGRAMPROC glValidateProgram = NULL;
PFNGLVERTEXATTRIB1DPROC glVertexAttrib1d = NULL;
PFNGLVERTEXATTRIB1DVPROC glVertexAttrib1dv = NULL;
PFNGLVERTEXATTRIB1FPROC glVertexAttrib1f = NULL;
PFNGLVERTEXATTRIB1FVPROC glVertexAttrib1fv = NULL;
PFNGLVERTEXATTRIB1SPROC glVertexAttrib1s = NULL;
PFNGLVERTEXATTRIB1SVPROC glVertexAttrib1sv = NULL;
PFNGLVERTEXATTRIB2DPROC glVertexAttrib2d = NULL;
PFNGLVERTEXATTRIB2DVPROC glVertexAttrib2dv = NULL;
PFNGLVERTEXATTRIB2FPROC glVertexAttrib2f = NULL;
PFNGLVERTEXATTRIB2FVPROC glVertexAttrib2fv = NULL;
PFNGLVERTEXATTRIB2SPROC glVertexAttrib2s = NULL;
PFNGLVERTEXATTRIB2SVPROC glVertexAttrib2sv = NULL;
PFNGLVERTEXATTRIB3DPROC glVertexAttrib3d = NULL;
PFNGLVERTEXATTRIB3DVPROC glVertexAttrib3dv = NULL;
PFNGLVERTEXATTRIB3FPROC glVertexAttrib3f = NULL;
PFNGLVERTEXATTRIB3FVPROC glVertexAttrib3fv = NULL;
PFNGLVERTEXATTRIB3SPROC glVertexAttrib3s = NULL;
PFNGLVERTEXATTRIB3SVPROC glVertexAttrib3sv = NULL;
PFNGLVERTEXATTRIB4NBVPROC glVertexAttrib4Nbv = NULL;
PFNGLVERTEXATTRIB4NIVPROC glVertexAttrib4Niv = NULL;
PFNGLVERTEXATTRIB4NSVPROC glVertexAttrib4Nsv = NULL;
PFNGLVERTEXATTRIB4NUBPROC glVertexAttrib4Nub = NULL;
PFNGLVERTEXATTRIB4NUBVPROC glVertexAttrib4Nubv = NULL;
PFNGLVERTEXATTRIB4NUIVPROC glVertexAttrib4Nuiv = NULL;
PFNGLVERTEXATTRIB4NUSVPROC glVertexAttrib4Nusv = NULL;
PFNGLVERTEXATTRIB4BVPROC glVertexAttrib4bv = NULL;
PFNGLVERTEXATTRIB4DPROC glVertexAttrib4d = NULL;
PFNGLVERTEXATTRIB4DVPROC glVertexAttrib4dv = NULL;
PFNGLVERTEXATTRIB4FPROC glVertexAttrib4f = NULL;
PFNGLVERTEXATTRIB4FVPROC glVertexAttrib4fv = NULL;
PFNGLVERTEXATTRIB4IVPROC glVertexAttrib4iv = NULL;
PFNGLVERTEXATTRIB4SPROC glVertexAttrib4s = NULL;
PFNGLVERTEXATTRIB4SVPROC glVertexAttrib4sv = NULL;
PFNGLVERTEXATTRIB4UBVPROC glVertexAttrib4ubv = NULL;
PFNGLVERTEXATTRIB4UIVPROC glVertexAttrib4uiv = NULL;
PFNGLVERTEXATTRIB4USVPROC glVertexAttrib4usv = NULL;
PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer = NULL;
// OpenGL 2.1
PFNGLUNIFORMMATRIX2X3FVPROC glUniformMatrix2x3fv = NULL;
PFNGLUNIFORMMATRIX3X2FVPROC glUniformMatrix3x2fv = NULL;
PFNGLUNIFORMMATRIX2X4FVPROC glUniformMatrix2x4fv = NULL;
PFNGLUNIFORMMATRIX4X2FVPROC glUniformMatrix4x2fv = NULL;
PFNGLUNIFORMMATRIX3X4FVPROC glUniformMatrix3x4fv = NULL;
PFNGLUNIFORMMATRIX4X3FVPROC glUniformMatrix4x3fv = NULL;
// OpenGL 3.0
PFNGLCOLORMASKIPROC glColorMaski = NULL;
PFNGLGETBOOLEANI_VPROC glGetBooleani_v = NULL;
PFNGLGETINTEGERI_VPROC glGetIntegeri_v = NULL;
PFNGLENABLEIPROC glEnablei = NULL;
PFNGLDISABLEIPROC glDisablei = NULL;
PFNGLISENABLEDIPROC glIsEnabledi = NULL;
PFNGLBEGINTRANSFORMFEEDBACKPROC glBeginTransformFeedback = NULL;
PFNGLENDTRANSFORMFEEDBACKPROC glEndTransformFeedback = NULL;
PFNGLBINDBUFFERRANGEPROC glBindBufferRange = NULL;
PFNGLBINDBUFFERBASEPROC glBindBufferBase = NULL;
PFNGLTRANSFORMFEEDBACKVARYINGSPROC glTransformFeedbackVaryings = NULL;
PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glGetTransformFeedbackVarying = NULL;
PFNGLCLAMPCOLORPROC glClampColor = NULL;
PFNGLBEGINCONDITIONALRENDERPROC glBeginConditionalRender = NULL;
PFNGLENDCONDITIONALRENDERPROC glEndConditionalRender = NULL;
PFNGLVERTEXATTRIBIPOINTERPROC glVertexAttribIPointer = NULL;
PFNGLGETVERTEXATTRIBIIVPROC glGetVertexAttribIiv = NULL;
PFNGLGETVERTEXATTRIBIUIVPROC glGetVertexAttribIuiv = NULL;
PFNGLVERTEXATTRIBI1IPROC glVertexAttribI1i = NULL;
PFNGLVERTEXATTRIBI2IPROC glVertexAttribI2i = NULL;
PFNGLVERTEXATTRIBI3IPROC glVertexAttribI3i = NULL;
PFNGLVERTEXATTRIBI4IPROC glVertexAttribI4i = NULL;
PFNGLVERTEXATTRIBI1UIPROC glVertexAttribI1ui = NULL;
PFNGLVERTEXATTRIBI2UIPROC glVertexAttribI2ui = NULL;
PFNGLVERTEXATTRIBI3UIPROC glVertexAttribI3ui = NULL;
PFNGLVERTEXATTRIBI4UIPROC glVertexAttribI4ui = NULL;
PFNGLVERTEXATTRIBI1IVPROC glVertexAttribI1iv = NULL;
PFNGLVERTEXATTRIBI2IVPROC glVertexAttribI2iv = NULL;
PFNGLVERTEXATTRIBI3IVPROC glVertexAttribI3iv = NULL;
PFNGLVERTEXATTRIBI4IVPROC glVertexAttribI4iv = NULL;
PFNGLVERTEXATTRIBI1UIVPROC glVertexAttribI1uiv = NULL;
PFNGLVERTEXATTRIBI2UIVPROC glVertexAttribI2uiv = NULL;
PFNGLVERTEXATTRIBI3UIVPROC glVertexAttribI3uiv = NULL;
PFNGLVERTEXATTRIBI4UIVPROC glVertexAttribI4uiv = NULL;
PFNGLVERTEXATTRIBI4BVPROC glVertexAttribI4bv = NULL;
PFNGLVERTEXATTRIBI4SVPROC glVertexAttribI4sv = NULL;
PFNGLVERTEXATTRIBI4UBVPROC glVertexAttribI4ubv = NULL;
PFNGLVERTEXATTRIBI4USVPROC glVertexAttribI4usv = NULL;
PFNGLGETUNIFORMUIVPROC glGetUniformuiv = NULL;
PFNGLBINDFRAGDATALOCATIONPROC glBindFragDataLocation = NULL;
PFNGLGETFRAGDATALOCATIONPROC glGetFragDataLocation = NULL;
PFNGLUNIFORM1UIPROC glUniform1ui = NULL;
PFNGLUNIFORM2UIPROC glUniform2ui = NULL;
PFNGLUNIFORM3UIPROC glUniform3ui = NULL;
PFNGLUNIFORM4UIPROC glUniform4ui = NULL;
PFNGLUNIFORM1UIVPROC glUniform1uiv = NULL;
PFNGLUNIFORM2UIVPROC glUniform2uiv = NULL;
PFNGLUNIFORM3UIVPROC glUniform3uiv = NULL;
PFNGLUNIFORM4UIVPROC glUniform4uiv = NULL;
PFNGLTEXPARAMETERIIVPROC glTexParameterIiv = NULL;
PFNGLTEXPARAMETERIUIVPROC glTexParameterIuiv = NULL;
PFNGLGETTEXPARAMETERIIVPROC glGetTexParameterIiv = NULL;
PFNGLGETTEXPARAMETERIUIVPROC glGetTexParameterIuiv = NULL;
PFNGLCLEARBUFFERIVPROC glClearBufferiv = NULL;
PFNGLCLEARBUFFERUIVPROC glClearBufferuiv = NULL;
PFNGLCLEARBUFFERFVPROC glClearBufferfv = NULL;
PFNGLCLEARBUFFERFIPROC glClearBufferfi = NULL;
PFNGLGETSTRINGIPROC glGetStringi = NULL;
// OpenGL 3.1
PFNGLDRAWARRAYSINSTANCEDPROC glDrawArraysInstanced = NULL;
PFNGLDRAWELEMENTSINSTANCEDPROC glDrawElementsInstanced = NULL;
PFNGLTEXBUFFERPROC glTexBuffer = NULL;
PFNGLPRIMITIVERESTARTINDEXPROC glPrimitiveRestartIndex = NULL;
// OpenGL 3.2
PFNGLGETINTEGER64I_VPROC glGetInteger64i_v = NULL;
PFNGLGETBUFFERPARAMETERI64VPROC glGetBufferParameteri64v = NULL;
PFNGLFRAMEBUFFERTEXTUREPROC glFramebufferTexture = NULL;
// OpenGL 3.3
PFNGLVERTEXATTRIBDIVISORPROC glVertexAttribDivisor = NULL;
// OpenGL 4.0
PFNGLMINSAMPLESHADINGPROC glMinSampleShading = NULL;
PFNGLBLENDEQUATIONIPROC glBlendEquationi = NULL;
PFNGLBLENDEQUATIONSEPARATEIPROC glBlendEquationSeparatei = NULL;
PFNGLBLENDFUNCIPROC glBlendFunci = NULL;
PFNGLBLENDFUNCSEPARATEIPROC glBlendFuncSeparatei = NULL;
// ARB framebuffer object
PFNGLISRENDERBUFFERPROC glIsRenderbuffer = NULL;
PFNGLBINDRENDERBUFFERPROC glBindRenderbuffer = NULL;
PFNGLDELETERENDERBUFFERSPROC glDeleteRenderbuffers = NULL;
PFNGLGENRENDERBUFFERSPROC glGenRenderbuffers = NULL;
PFNGLRENDERBUFFERSTORAGEPROC glRenderbufferStorage = NULL;
PFNGLGETRENDERBUFFERPARAMETERIVPROC glGetRenderbufferParameteriv = NULL;
PFNGLISFRAMEBUFFERPROC glIsFramebuffer = NULL;
PFNGLBINDFRAMEBUFFERPROC glBindFramebuffer = NULL;
PFNGLDELETEFRAMEBUFFERSPROC glDeleteFramebuffers = NULL;
PFNGLGENFRAMEBUFFERSPROC glGenFramebuffers = NULL;
PFNGLCHECKFRAMEBUFFERSTATUSPROC glCheckFramebufferStatus = NULL;
PFNGLFRAMEBUFFERTEXTURE1DPROC glFramebufferTexture1D = NULL;
PFNGLFRAMEBUFFERTEXTURE2DPROC glFramebufferTexture2D = NULL;
PFNGLFRAMEBUFFERTEXTURE3DPROC glFramebufferTexture3D = NULL;
PFNGLFRAMEBUFFERRENDERBUFFERPROC glFramebufferRenderbuffer = NULL;
PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glGetFramebufferAttachmentParameteriv = NULL;
PFNGLGENERATEMIPMAPPROC glGenerateMipmap = NULL;
PFNGLBLITFRAMEBUFFERPROC glBlitFramebuffer = NULL;
PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glRenderbufferStorageMultisample = NULL;
PFNGLFRAMEBUFFERTEXTURELAYERPROC glFramebufferTextureLayer = NULL;
// ARB map buffer range
PFNGLMAPBUFFERRANGEPROC glMapBufferRange = NULL;
PFNGLFLUSHMAPPEDBUFFERRANGEPROC glFlushMappedBufferRange = NULL;
// ARB vertex array object
PFNGLBINDVERTEXARRAYPROC glBindVertexArray = NULL;
PFNGLDELETEVERTEXARRAYSPROC glDeleteVertexArrays = NULL;
PFNGLGENVERTEXARRAYSPROC glGenVertexArrays = NULL;
PFNGLISVERTEXARRAYPROC glIsVertexArray = NULL;
// ARB uniform buffer object
PFNGLGETUNIFORMINDICESPROC glGetUniformIndices = NULL;
PFNGLGETACTIVEUNIFORMSIVPROC glGetActiveUniformsiv = NULL;
PFNGLGETACTIVEUNIFORMNAMEPROC glGetActiveUniformName = NULL;
PFNGLGETUNIFORMBLOCKINDEXPROC glGetUniformBlockIndex = NULL;
PFNGLGETACTIVEUNIFORMBLOCKIVPROC glGetActiveUniformBlockiv = NULL;
PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glGetActiveUniformBlockName = NULL;
PFNGLUNIFORMBLOCKBINDINGPROC glUniformBlockBinding = NULL;
// ARB copy buffer
PFNGLCOPYBUFFERSUBDATAPROC glCopyBufferSubData = NULL;
// ARB blend func extended
PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glBindFragDataLocationIndexed = NULL;
// ARB draw elements base vertex
PFNGLDRAWELEMENTSBASEVERTEXPROC glDrawElementsBaseVertex = NULL;
PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glDrawRangeElementsBaseVertex = NULL;
PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glDrawElementsInstancedBaseVertex = NULL;
PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glMultiDrawElementsBaseVertex = NULL;
// ARB texture multisample
PFNGLTEXIMAGE2DMULTISAMPLEPROC glTexImage2DMultisample = NULL;
PFNGLTEXIMAGE3DMULTISAMPLEPROC glTexImage3DMultisample = NULL;
PFNGLGETMULTISAMPLEFVPROC glGetMultisamplefv = NULL;
PFNGLSAMPLEMASKIPROC glSampleMaski = NULL;
// ARB tessellation shader
PFNGLPATCHPARAMETERIPROC glPatchParameteri = NULL;
PFNGLPATCHPARAMETERFVPROC glPatchParameterfv = NULL;
// ARB debug output
PFNGLDEBUGMESSAGECONTROLARBPROC glDebugMessageControlARB = NULL;
PFNGLDEBUGMESSAGEINSERTARBPROC glDebugMessageInsertARB = NULL;
PFNGLDEBUGMESSAGECALLBACKARBPROC glDebugMessageCallbackARB = NULL;
PFNGLGETDEBUGMESSAGELOGARBPROC glGetDebugMessageLogARB = NULL;
// ARB compute shader
PFNGLDISPATCHCOMPUTEPROC glDispatchCompute = NULL;
PFNGLDISPATCHCOMPUTEINDIRECTPROC glDispatchComputeIndirect = NULL;
/******************************************************************************\
*
* GLExt
*
\******************************************************************************/
/*
*/
#define GL_EXT_NUM_TEXTURES 32
/*
*/
static const char *gl_vendor = NULL;
static const char *gl_renderer = NULL;
static const char *gl_version = NULL;
static const char *gl_shading_language = NULL;
static char *gl_extensions = NULL;
//static GLFfp *gl_ffp = NULL;
/*
*/
static int gl_multisample = 0;
static int gl_viewport[4] = { 0, };
static GLuint gl_program_id = 0;
static GLuint gl_texture_target[GL_EXT_NUM_TEXTURES] = { 0, };
static GLuint gl_texture_id[GL_EXT_NUM_TEXTURES] = { 0, };
static int gl_texture_unit = 0;
/*
*/
#ifndef USE_GL_WRAPPER
#ifdef _MACOS
static const char *gl_name = "/System/Library/Frameworks/OpenGL.framework/OpenGL";
static void *gl_handle = NULL;
static void *glGetProcAddress(const char *name) {
if(gl_handle == NULL) gl_handle = dlopen(gl_name,RTLD_LAZY);
if(gl_name != NULL) return dlsym(gl_handle,name);
return NULL;
}
#endif
#endif
/*
*/
GLExt::GLExt() {
}
/*
*/
int GLExt::init() {
// assert(gl_ffp == NULL && "GLExt::init(): is already initialized");
// load functions
#ifdef _WIN32
#define GET_PROC_ADDRESS(NAME) { \
*((unsigned char**)&NAME) = (unsigned char*)wglGetProcAddress(#NAME); \
if(NAME == NULL) *((unsigned char**)&NAME) = (unsigned char*)wglGetProcAddress(#NAME "ARB"); \
if(NAME == NULL) *((unsigned char**)&NAME) = (unsigned char*)wglGetProcAddress(#NAME "EXT"); \
}
#elif _LINUX
#define GET_PROC_ADDRESS(NAME) { \
*((unsigned char**)&NAME) = (unsigned char*)glXGetProcAddressARB((const GLubyte*)#NAME); \
if(NAME == NULL) *((unsigned char**)&NAME) = (unsigned char*)glXGetProcAddressARB((const GLubyte*)#NAME "ARB"); \
if(NAME == NULL) *((unsigned char**)&NAME) = (unsigned char*)glXGetProcAddressARB((const GLubyte*)#NAME "EXT"); \
}
#elif _MACOS
#define GET_PROC_ADDRESS(NAME) { \
*((unsigned char**)&NAME) = (unsigned char*)glGetProcAddress(#NAME); \
if(NAME == NULL) *((unsigned char**)&NAME) = (unsigned char*)glGetProcAddress(#NAME "ARB"); \
if(NAME == NULL) *((unsigned char**)&NAME) = (unsigned char*)glGetProcAddress(#NAME "EXT"); \
}
#endif
// OpenGL 1.2
GET_PROC_ADDRESS(glBlendColor)
GET_PROC_ADDRESS(glBlendEquation)
GET_PROC_ADDRESS(glDrawRangeElements)
GET_PROC_ADDRESS(glTexImage3D)
GET_PROC_ADDRESS(glTexSubImage3D)
GET_PROC_ADDRESS(glCopyTexSubImage3D)
// OpenGL 1.3
GET_PROC_ADDRESS(glActiveTexture)
GET_PROC_ADDRESS(glSampleCoverage)
GET_PROC_ADDRESS(glCompressedTexImage3D)
GET_PROC_ADDRESS(glCompressedTexImage2D)
GET_PROC_ADDRESS(glCompressedTexImage1D)
GET_PROC_ADDRESS(glCompressedTexSubImage3D)
GET_PROC_ADDRESS(glCompressedTexSubImage2D)
GET_PROC_ADDRESS(glCompressedTexSubImage1D)
GET_PROC_ADDRESS(glGetCompressedTexImage)
// OpenGL 1.4
GET_PROC_ADDRESS(glBlendFuncSeparate)
GET_PROC_ADDRESS(glMultiDrawArrays)
GET_PROC_ADDRESS(glMultiDrawElements)
GET_PROC_ADDRESS(glPointParameterf)
GET_PROC_ADDRESS(glPointParameterfv)
GET_PROC_ADDRESS(glPointParameteri)
GET_PROC_ADDRESS(glPointParameteriv)
// OpenGL 1.5
GET_PROC_ADDRESS(glGenQueries)
GET_PROC_ADDRESS(glDeleteQueries)
GET_PROC_ADDRESS(glIsQuery)
GET_PROC_ADDRESS(glBeginQuery)
GET_PROC_ADDRESS(glEndQuery)
GET_PROC_ADDRESS(glGetQueryiv)
GET_PROC_ADDRESS(glGetQueryObjectiv)
GET_PROC_ADDRESS(glGetQueryObjectuiv)
GET_PROC_ADDRESS(glBindBuffer)
GET_PROC_ADDRESS(glDeleteBuffers)
GET_PROC_ADDRESS(glGenBuffers)
GET_PROC_ADDRESS(glIsBuffer)
GET_PROC_ADDRESS(glBufferData)
GET_PROC_ADDRESS(glBufferSubData)
GET_PROC_ADDRESS(glGetBufferSubData)
GET_PROC_ADDRESS(glMapBuffer)
GET_PROC_ADDRESS(glUnmapBuffer)
GET_PROC_ADDRESS(glGetBufferParameteriv)
GET_PROC_ADDRESS(glGetBufferPointerv)
// OpenGL 2.0
GET_PROC_ADDRESS(glBlendEquationSeparate)
GET_PROC_ADDRESS(glDrawBuffers)
GET_PROC_ADDRESS(glStencilOpSeparate)
GET_PROC_ADDRESS(glStencilFuncSeparate)
GET_PROC_ADDRESS(glStencilMaskSeparate)
GET_PROC_ADDRESS(glAttachShader)
GET_PROC_ADDRESS(glBindAttribLocation)
GET_PROC_ADDRESS(glCompileShader)
GET_PROC_ADDRESS(glCreateProgram)
GET_PROC_ADDRESS(glCreateShader)
GET_PROC_ADDRESS(glDeleteProgram)
GET_PROC_ADDRESS(glDeleteShader)
GET_PROC_ADDRESS(glDetachShader)
GET_PROC_ADDRESS(glDisableVertexAttribArray)
GET_PROC_ADDRESS(glEnableVertexAttribArray)
GET_PROC_ADDRESS(glGetActiveAttrib)
GET_PROC_ADDRESS(glGetActiveUniform)
GET_PROC_ADDRESS(glGetAttachedShaders)
GET_PROC_ADDRESS(glGetAttribLocation)
GET_PROC_ADDRESS(glGetProgramiv)
GET_PROC_ADDRESS(glGetProgramInfoLog)
GET_PROC_ADDRESS(glGetShaderiv)
GET_PROC_ADDRESS(glGetShaderInfoLog)
GET_PROC_ADDRESS(glGetShaderSource)
GET_PROC_ADDRESS(glGetUniformLocation)
GET_PROC_ADDRESS(glGetUniformfv)
GET_PROC_ADDRESS(glGetUniformiv)
GET_PROC_ADDRESS(glGetVertexAttribdv)
GET_PROC_ADDRESS(glGetVertexAttribfv)
GET_PROC_ADDRESS(glGetVertexAttribiv)
GET_PROC_ADDRESS(glGetVertexAttribPointerv)
GET_PROC_ADDRESS(glIsProgram)
GET_PROC_ADDRESS(glIsShader)
GET_PROC_ADDRESS(glLinkProgram)
GET_PROC_ADDRESS(glShaderSource)
GET_PROC_ADDRESS(glUseProgram)
GET_PROC_ADDRESS(glUniform1f)
GET_PROC_ADDRESS(glUniform2f)
GET_PROC_ADDRESS(glUniform3f)
GET_PROC_ADDRESS(glUniform4f)
GET_PROC_ADDRESS(glUniform1i)
GET_PROC_ADDRESS(glUniform2i)
GET_PROC_ADDRESS(glUniform3i)
GET_PROC_ADDRESS(glUniform4i)
GET_PROC_ADDRESS(glUniform1fv)
GET_PROC_ADDRESS(glUniform2fv)
GET_PROC_ADDRESS(glUniform3fv)
GET_PROC_ADDRESS(glUniform4fv)
GET_PROC_ADDRESS(glUniform1iv)
GET_PROC_ADDRESS(glUniform2iv)
GET_PROC_ADDRESS(glUniform3iv)
GET_PROC_ADDRESS(glUniform4iv)
GET_PROC_ADDRESS(glUniformMatrix2fv)
GET_PROC_ADDRESS(glUniformMatrix3fv)
GET_PROC_ADDRESS(glUniformMatrix4fv)
GET_PROC_ADDRESS(glValidateProgram)
GET_PROC_ADDRESS(glVertexAttrib1d)
GET_PROC_ADDRESS(glVertexAttrib1dv)
GET_PROC_ADDRESS(glVertexAttrib1f)
GET_PROC_ADDRESS(glVertexAttrib1fv)
GET_PROC_ADDRESS(glVertexAttrib1s)
GET_PROC_ADDRESS(glVertexAttrib1sv)
GET_PROC_ADDRESS(glVertexAttrib2d)
GET_PROC_ADDRESS(glVertexAttrib2dv)
GET_PROC_ADDRESS(glVertexAttrib2f)
GET_PROC_ADDRESS(glVertexAttrib2fv)
GET_PROC_ADDRESS(glVertexAttrib2s)
GET_PROC_ADDRESS(glVertexAttrib2sv)
GET_PROC_ADDRESS(glVertexAttrib3d)
GET_PROC_ADDRESS(glVertexAttrib3dv)
GET_PROC_ADDRESS(glVertexAttrib3f)
GET_PROC_ADDRESS(glVertexAttrib3fv)
GET_PROC_ADDRESS(glVertexAttrib3s)
GET_PROC_ADDRESS(glVertexAttrib3sv)
GET_PROC_ADDRESS(glVertexAttrib4Nbv)
GET_PROC_ADDRESS(glVertexAttrib4Niv)
GET_PROC_ADDRESS(glVertexAttrib4Nsv)
GET_PROC_ADDRESS(glVertexAttrib4Nub)
GET_PROC_ADDRESS(glVertexAttrib4Nubv)
GET_PROC_ADDRESS(glVertexAttrib4Nuiv)
GET_PROC_ADDRESS(glVertexAttrib4Nusv)
GET_PROC_ADDRESS(glVertexAttrib4bv)
GET_PROC_ADDRESS(glVertexAttrib4d)
GET_PROC_ADDRESS(glVertexAttrib4dv)
GET_PROC_ADDRESS(glVertexAttrib4f)
GET_PROC_ADDRESS(glVertexAttrib4fv)
GET_PROC_ADDRESS(glVertexAttrib4iv)
GET_PROC_ADDRESS(glVertexAttrib4s)
GET_PROC_ADDRESS(glVertexAttrib4sv)
GET_PROC_ADDRESS(glVertexAttrib4ubv)
GET_PROC_ADDRESS(glVertexAttrib4uiv)
GET_PROC_ADDRESS(glVertexAttrib4usv)
GET_PROC_ADDRESS(glVertexAttribPointer)
// OpenGL 2.1
GET_PROC_ADDRESS(glUniformMatrix2x3fv)
GET_PROC_ADDRESS(glUniformMatrix3x2fv)
GET_PROC_ADDRESS(glUniformMatrix2x4fv)
GET_PROC_ADDRESS(glUniformMatrix4x2fv)
GET_PROC_ADDRESS(glUniformMatrix3x4fv)
GET_PROC_ADDRESS(glUniformMatrix4x3fv)
// OpenGL 3.0
GET_PROC_ADDRESS(glColorMaski)
GET_PROC_ADDRESS(glGetBooleani_v)
GET_PROC_ADDRESS(glGetIntegeri_v)
GET_PROC_ADDRESS(glEnablei)
GET_PROC_ADDRESS(glDisablei)
GET_PROC_ADDRESS(glIsEnabledi)
GET_PROC_ADDRESS(glBeginTransformFeedback)
GET_PROC_ADDRESS(glEndTransformFeedback)
GET_PROC_ADDRESS(glBindBufferRange)
GET_PROC_ADDRESS(glBindBufferBase)
GET_PROC_ADDRESS(glTransformFeedbackVaryings)
GET_PROC_ADDRESS(glGetTransformFeedbackVarying)
GET_PROC_ADDRESS(glClampColor)
GET_PROC_ADDRESS(glBeginConditionalRender)
GET_PROC_ADDRESS(glEndConditionalRender)
GET_PROC_ADDRESS(glVertexAttribIPointer)
GET_PROC_ADDRESS(glGetVertexAttribIiv)
GET_PROC_ADDRESS(glGetVertexAttribIuiv)
GET_PROC_ADDRESS(glVertexAttribI1i)
GET_PROC_ADDRESS(glVertexAttribI2i)
GET_PROC_ADDRESS(glVertexAttribI3i)
GET_PROC_ADDRESS(glVertexAttribI4i)
GET_PROC_ADDRESS(glVertexAttribI1ui)
GET_PROC_ADDRESS(glVertexAttribI2ui)
GET_PROC_ADDRESS(glVertexAttribI3ui)
GET_PROC_ADDRESS(glVertexAttribI4ui)
GET_PROC_ADDRESS(glVertexAttribI1iv)
GET_PROC_ADDRESS(glVertexAttribI2iv)
GET_PROC_ADDRESS(glVertexAttribI3iv)
GET_PROC_ADDRESS(glVertexAttribI4iv)
GET_PROC_ADDRESS(glVertexAttribI1uiv)
GET_PROC_ADDRESS(glVertexAttribI2uiv)
GET_PROC_ADDRESS(glVertexAttribI3uiv)
GET_PROC_ADDRESS(glVertexAttribI4uiv)
GET_PROC_ADDRESS(glVertexAttribI4bv)
GET_PROC_ADDRESS(glVertexAttribI4sv)
GET_PROC_ADDRESS(glVertexAttribI4ubv)
GET_PROC_ADDRESS(glVertexAttribI4usv)
GET_PROC_ADDRESS(glGetUniformuiv)
GET_PROC_ADDRESS(glBindFragDataLocation)
GET_PROC_ADDRESS(glGetFragDataLocation)
GET_PROC_ADDRESS(glUniform1ui)
GET_PROC_ADDRESS(glUniform2ui)
GET_PROC_ADDRESS(glUniform3ui)
GET_PROC_ADDRESS(glUniform4ui)
GET_PROC_ADDRESS(glUniform1uiv)
GET_PROC_ADDRESS(glUniform2uiv)
GET_PROC_ADDRESS(glUniform3uiv)
GET_PROC_ADDRESS(glUniform4uiv)
GET_PROC_ADDRESS(glTexParameterIiv)
GET_PROC_ADDRESS(glTexParameterIuiv)
GET_PROC_ADDRESS(glGetTexParameterIiv)
GET_PROC_ADDRESS(glGetTexParameterIuiv)
GET_PROC_ADDRESS(glClearBufferiv)
GET_PROC_ADDRESS(glClearBufferuiv)
GET_PROC_ADDRESS(glClearBufferfv)
GET_PROC_ADDRESS(glClearBufferfi)
GET_PROC_ADDRESS(glGetStringi)
// OpenGL 3.1
GET_PROC_ADDRESS(glDrawArraysInstanced)
GET_PROC_ADDRESS(glDrawElementsInstanced)
GET_PROC_ADDRESS(glTexBuffer)
GET_PROC_ADDRESS(glPrimitiveRestartIndex)
// OpenGL 3.2
GET_PROC_ADDRESS(glGetInteger64i_v)
GET_PROC_ADDRESS(glGetBufferParameteri64v)
GET_PROC_ADDRESS(glFramebufferTexture)
// OpenGL 3.3
GET_PROC_ADDRESS(glVertexAttribDivisor)
// OpenGL 4.0
GET_PROC_ADDRESS(glMinSampleShading)
GET_PROC_ADDRESS(glBlendEquationi)
GET_PROC_ADDRESS(glBlendEquationSeparatei)
GET_PROC_ADDRESS(glBlendFunci)
GET_PROC_ADDRESS(glBlendFuncSeparatei)
// ARB framebuffer object
GET_PROC_ADDRESS(glIsRenderbuffer)
GET_PROC_ADDRESS(glBindRenderbuffer)
GET_PROC_ADDRESS(glDeleteRenderbuffers)
GET_PROC_ADDRESS(glGenRenderbuffers)
GET_PROC_ADDRESS(glRenderbufferStorage)
GET_PROC_ADDRESS(glGetRenderbufferParameteriv)
GET_PROC_ADDRESS(glIsFramebuffer)
GET_PROC_ADDRESS(glBindFramebuffer)
GET_PROC_ADDRESS(glDeleteFramebuffers)
GET_PROC_ADDRESS(glGenFramebuffers)
GET_PROC_ADDRESS(glCheckFramebufferStatus)
GET_PROC_ADDRESS(glFramebufferTexture1D)
GET_PROC_ADDRESS(glFramebufferTexture2D)
GET_PROC_ADDRESS(glFramebufferTexture3D)
GET_PROC_ADDRESS(glFramebufferRenderbuffer)
GET_PROC_ADDRESS(glGetFramebufferAttachmentParameteriv)
GET_PROC_ADDRESS(glGenerateMipmap)
GET_PROC_ADDRESS(glBlitFramebuffer)
GET_PROC_ADDRESS(glRenderbufferStorageMultisample)
GET_PROC_ADDRESS(glFramebufferTextureLayer)
// ARB map buffer range
GET_PROC_ADDRESS(glMapBufferRange)
GET_PROC_ADDRESS(glFlushMappedBufferRange)
// ARB vertex array object
GET_PROC_ADDRESS(glBindVertexArray)
GET_PROC_ADDRESS(glDeleteVertexArrays)
GET_PROC_ADDRESS(glGenVertexArrays)
GET_PROC_ADDRESS(glIsVertexArray)
// ARB uniform buffer object
GET_PROC_ADDRESS(glGetUniformIndices)
GET_PROC_ADDRESS(glGetActiveUniformsiv)
GET_PROC_ADDRESS(glGetActiveUniformName)
GET_PROC_ADDRESS(glGetUniformBlockIndex)
GET_PROC_ADDRESS(glGetActiveUniformBlockiv)
GET_PROC_ADDRESS(glGetActiveUniformBlockName)
GET_PROC_ADDRESS(glUniformBlockBinding)
// ARB copy buffer
GET_PROC_ADDRESS(glCopyBufferSubData)
// ARB blend func extended
GET_PROC_ADDRESS(glBindFragDataLocationIndexed)
// ARB draw elements base vertex
GET_PROC_ADDRESS(glDrawElementsBaseVertex)
GET_PROC_ADDRESS(glDrawRangeElementsBaseVertex)
GET_PROC_ADDRESS(glDrawElementsInstancedBaseVertex)
GET_PROC_ADDRESS(glMultiDrawElementsBaseVertex)
// ARB texture multisample
GET_PROC_ADDRESS(glTexImage2DMultisample)
GET_PROC_ADDRESS(glTexImage3DMultisample)
GET_PROC_ADDRESS(glGetMultisamplefv)
GET_PROC_ADDRESS(glSampleMaski)
// ARB tessellation shader
GET_PROC_ADDRESS(glPatchParameteri)
GET_PROC_ADDRESS(glPatchParameterfv)
// ARB debug output
GET_PROC_ADDRESS(glDebugMessageControlARB)
GET_PROC_ADDRESS(glDebugMessageInsertARB)
GET_PROC_ADDRESS(glDebugMessageCallbackARB)
GET_PROC_ADDRESS(glGetDebugMessageLogARB)
// ARB compute shader
GET_PROC_ADDRESS(glDispatchCompute)
GET_PROC_ADDRESS(glDispatchComputeIndirect)
#undef GET_PROC_ADDRESS
// version
gl_vendor = (const char*)glGetString(GL_VENDOR);
gl_renderer = (const char*)glGetString(GL_RENDERER);
gl_version = (const char*)glGetString(GL_VERSION);
gl_shading_language = (const char*)glGetString(GL_SHADING_LANGUAGE_VERSION);
// extensions
int size = 0;
int capacity = 128;
int num_extensions = 0;
gl_extensions = new char[capacity];
glGetIntegerv(GL_NUM_EXTENSIONS,&num_extensions);
for(int i = 0; i < num_extensions; i++) {
const char *extension = (const char*)glGetStringi(GL_EXTENSIONS,i);
int length = (int)strlen(extension);
if(size + length + 2 > capacity) {
capacity = capacity * 2 + 1;
char *extensions = new char[capacity];
memcpy(extensions,gl_extensions,sizeof(char) * size + 1);
delete [] gl_extensions;
gl_extensions = extensions;
}
memcpy(gl_extensions + size,extension,length);
gl_extensions[size++ + length] = ' ';
gl_extensions[size + length] = '\0';
size += length;
}
// current state
gl_multisample = 0;
gl_viewport[0] = 0;
gl_viewport[1] = 0;
gl_viewport[2] = 0;
gl_viewport[3] = 0;
gl_program_id = 0;
clearTextures();
//gl_ffp = new GLFfp();
// set default options
glDepthFunc(GL_LEQUAL);
glEnable(GL_DEPTH_TEST);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
return 1;
}
int GLExt::shutdown() {
//delete gl_ffp;
//gl_ffp = NULL;
gl_vendor = NULL;
gl_renderer = NULL;
gl_version = NULL;
gl_shading_language = NULL;
delete [] gl_extensions;
gl_extensions = NULL;
gl_multisample = 0;
gl_viewport[0] = 0;
gl_viewport[1] = 0;
gl_viewport[2] = 0;
gl_viewport[3] = 0;
gl_program_id = 0;
clearTextures();
return 1;
}
/*
*/
int GLExt::isInitialized() {
return (/*gl_ffp != */NULL);
}
/*
*/
const char *GLExt::getVendor() {
return gl_vendor;
}
const char *GLExt::getRenderer() {
return gl_renderer;
}
const char *GLExt::getVersion() {
return gl_version;
}
const char *GLExt::getShadingLanguage() {
return gl_shading_language;
}
/*
*/
const char *GLExt::getExtensions() {
return gl_extensions;
}
int GLExt::checkExtension(const char *extension) {
if(gl_extensions == NULL) return 0;
return (strstr(gl_extensions,extension) != NULL);
}
/*
*/
void GLExt::setMultisample(int multisample) {
gl_multisample = multisample;
if(gl_multisample) glEnable(GL_MULTISAMPLE);
else glDisable(GL_MULTISAMPLE);
}
int GLExt::getMultisample() {
return gl_multisample;
}
/*
*/
void GLExt::setViewport(int x,int y,int width,int height) {
gl_viewport[0] = x;
gl_viewport[1] = y;
gl_viewport[2] = width;
gl_viewport[3] = height;
glViewport(x,y,width,height);
}
void GLExt::getViewport(int *viewport) {
viewport[0] = gl_viewport[0];
viewport[1] = gl_viewport[1];
viewport[2] = gl_viewport[2];
viewport[3] = gl_viewport[3];
}
/*
*/
void GLExt::setProgramID(GLuint program_id) {
if(gl_program_id != program_id) {
gl_program_id = program_id;
glUseProgram(program_id);
}
}
GLuint GLExt::getProgramID() {
return gl_program_id;
}
/*
*/
void GLExt::clearTextures() {
gl_texture_unit = 0;
glActiveTexture(GL_TEXTURE0);
for(int i = 0; i < GL_EXT_NUM_TEXTURES; i++) {
gl_texture_target[i] = GL_TEXTURE_2D;
gl_texture_id[i] = 0;
}
}
void GLExt::deleteTexture(GLuint target,GLuint texture_id) {
if(glIsTexture(texture_id)) glDeleteTextures(1,&texture_id);
for(int i = 0; i < GL_EXT_NUM_TEXTURES; i++) {
if(gl_texture_id[i] == texture_id) {
gl_texture_target[i] = GL_TEXTURE_2D;
gl_texture_id[i] = 0;
break;
}
}
}
void GLExt::setTexture(int unit,GLuint target,GLuint texture_id) {
//assert((unsigned int)unit < GL_EXT_NUM_TEXTURES && "GLExt::setTexture(): bad texture unit");
if(gl_texture_id[unit] != texture_id) {
gl_texture_target[unit] = target;
gl_texture_id[unit] = texture_id;
if(gl_texture_unit != unit) {
gl_texture_unit = unit;
glActiveTexture(GL_TEXTURE0 + unit);
}
glBindTexture(target,texture_id);
}
}
void GLExt::setTextureUnit(int unit,GLuint target,GLuint texture_id) {
//assert((unsigned int)unit < GL_EXT_NUM_TEXTURES && "GLExt::setTextureUnit(): bad texture unit");
if(gl_texture_unit != unit) {
gl_texture_unit = unit;
glActiveTexture(GL_TEXTURE0 + unit);
}
if(gl_texture_id[unit] != texture_id) {
gl_texture_target[unit] = target;
gl_texture_id[unit] = texture_id;
glBindTexture(target,texture_id);
}
}
GLuint GLExt::getTextureTarget(int unit) {
// assert((unsigned int)unit < GL_EXT_NUM_TEXTURES && "GLExt::getTextureTarget(): bad texture unit");
return gl_texture_target[unit];
}
GLuint GLExt::getTextureID(int unit) {
// assert((unsigned int)unit < GL_EXT_NUM_TEXTURES && "GLExt::getTextureID(): bad texture unit");
return gl_texture_id[unit];
}
/*
*/
int GLExt::error(GLenum result) {
if(result == GL_NO_ERROR) return 0;
if(result == GL_INVALID_ENUM)return 0;
else if(result == GL_INVALID_VALUE) return 0;
else if(result == GL_INVALID_OPERATION) return 0;
else if(result == GL_INVALID_FRAMEBUFFER_OPERATION) return 0;
else if(result == GL_STACK_OVERFLOW)return 0;
else if(result == GL_STACK_UNDERFLOW)return 0;
else if(result == GL_OUT_OF_MEMORY) return 0;
else return 0;
return 1;
}
int GLExt::error() {
int ret = 0;
while(error(glGetError())) ret = 1;
return ret;
}
<file_sep>/source/basis/base_kernel_impl/Core.h
#ifndef CORE_H
#define CORE_H
#include "kernel/ifc/ICore.h"
namespace smeta3d
{
class CCore : public ICore
{
public:
CCore();
~CCore();
DECL_PUBLIC_ICORE;
public:
smeta3d::SP_IEngine m_ptrEngine;
bool m_bInit;
};
}
#endif<file_sep>/CMakeLists.txt
######################################################################################
# #
# Smeta3d Project #
# #
# #
# All rights reserved. #
# Welcome to the CMake build #
######################################################################################
cmake_minimum_required(VERSION 2.8.1 FATAL_ERROR)
cmake_policy(VERSION 2.8.1)
######################################################################################
set(CMAKE_VERBOSE_MAKEFILE false) # high verbosity setting in make files and VS
set(CMAKE_USE_RELATIVE_PATHS true) # Use relative paths
set(CMAKE_SUPPRESS_REGENERATION false) # hide CMakeLists.txt
set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS true) # Allow empty endif() and such with CMake 2.4.
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "Predefined (Cmake) targets folder" )
######################################################################
# Just debug / release since that's all that's included in SDK
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" )
#####################################################################
# Setup version build
set(SMT3D_VERSION_MAJOR 1) #Version MAJOR
set(SMT3D_VERSION_MINOR 0) #Version MINOR
set(SMT3D_VERSION_PATCH 0) #Version PATCH
set(SMT3D_VERSION_NAME "Summer") #Version NAME
set(SMT3D_VERSION_SUFFIX "devel") #Version SUFFIX
set( SMT3D_VERSION ${SMT3D_VERSION_MAJOR}.${SMT3D_VERSION_PATCH} )
set( PROJECT_NAME_SMT3D smeta3d-${SMT3D_VERSION_SUFFIX}-${SMT3D_VERSION} )
set( CURRENT_VERSION ${SMT3D_VERSION} )
set( CURRENT_VERSION_MAJOR ${SMT3D_VERSION_MAJOR} )
message(STATUS "Configure Root project: ${PROJECT_NAME_SMT3D}")
project( ${PROJECT_NAME_SMT3D} )
set(SMT3D_SRC_DIR ${${PROJECT_NAME_SMT3D}_SOURCE_DIR})
set(SMT3D_BUILD_DIR ${${PROJECT_NAME_SMT3D}_BINARY_DIR})
set( CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake"
"${CMAKE_MODULE_PATH}" )
enable_language(C)
enable_language(CXX)
include(make_default_output_directory)
make_default_output_directory( ${PROJECT_BINARY_DIR} )
## set(WINVER "0x0501" CACHE STRING "WINVER version (see MSDN documentation)") #Windows XP and newer
add_definitions(-DPSAPI_VERSION=1) #compatibility Windows XP for build in Windows 7
# Replace /INCREMENTAL:NO[YES] by /INCREMENTAL:NO.
string(REGEX REPLACE "/INCREMENTAL[0-Y]*" "" CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" )
string(REGEX REPLACE "/INCREMENTAL[0-Y]*" "" CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" )
string(REGEX REPLACE "/INCREMENTAL[0-Y]*" "" CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG}" )
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /INCREMENTAL:NO")
set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} /INCREMENTAL:NO")
set(CMAKE_MODULE_LINKER_FLAGS_DEBUG "${CMAKE_MODULE_LINKER_FLAGS_DEBUG} /INCREMENTAL:NO")
if (MSVC_VERSION GREATER 1599)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") # Multi-processor Compilation
elseif(CMAKE_SIZEOF_VOID_P EQUAL 8) # x64
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_WIN64")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_WIN64")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zm300") #error C3859: virtual memory range for PCH exceeded
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /EHa /fp:except-") #high warning level with C++ and SEH exceptions
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Oi") # Enable intrinsics on MSVC in debug mode
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") # increases the number of sections that an object file can contain.
if (CMAKE_SIZEOF_VOID_P EQUAL 8) #x64
set( GLOB_LINKER_FLAGS "${GLOB_LINKER_FLAGS} /MACHINE:X64" )
else()
set( GLOB_LINKER_FLAGS "${GLOB_LINKER_FLAGS} /MACHINE:X86 /LARGEADDRESSAWARE" )
endif()
message(!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Boost_DEBUG ": ${Boost_DEBUG}")
include(find_dependencies)
include_directories(${CMAKE_SOURCE_DIR}/include)
add_subdirectory( "source/system" "source/system" ) #Library's location
add_subdirectory( "source/main" "source/main" ) #Library's location
add_subdirectory( "source/basis/base_kernel_impl" "source/basis/base_kernel_impl" ) #Library's location
add_subdirectory( "source/gui_basis" "source/gui_basis" ) #Library's location
add_subdirectory( "source/render/engine" "source/render/engine" ) #Library's location
add_subdirectory( "source/render/extern/glew" "source/render/extern/glew" ) #Library's location
add_subdirectory( "source/tests/test_signals" "source/tests/test_signals" ) #Library's location
add_subdirectory( "source/tests/test_event_manager" "source/tests/test_event_manager" ) #Library's location
###################################################################
## determine if we are compiling for a 32bit or 64bit system
## it's did not work ???
#if(CMAKE_SIZEOF_VOID_P EQUAL 8)
# set(MSVS_PLATFORM_X64 TRUE)
#endif()
#
#if(MSVS_PLATFORM_X64)
# set( GLOB_LINKER_FLAGS "${GLOB_LINKER_FLAGS} /MACHINE:X64" )
# message("----------------------------- use MS VS x64 ----------------------------")
#else()
# set( GLOB_LINKER_FLAGS "${GLOB_LINKER_FLAGS} /MACHINE:X86 /LARGEADDRESSAWARE" )
# message("----------------------------- use MS VS x86 ----------------------------")
#endif()
#
#if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
# # We don't want to install in default system location, install is really for the SDK, so call it that
# set( CMAKE_INSTALL_PREFIX "${IRIS_PLUGINS_BUILD_DIR}/${PROJECT_NAME_IRIS_PLUGINS}" CACHE PATH "EIMS install prefix" FORCE )
#endif ()
#
#if(MSVS_PLATFORM_X64)
# set(3DSMAX_INSTALL_PATH $ENV{ADSK_3DSMAX_x64_2012} CACHE STRING "" FORCE)
#else()
# set(3DSMAX_INSTALL_PATH $ENV{ADSK_3DSMAX_x32_2012} CACHE STRING "" FORCE)
#endif()
#
#set(3DSMAX_INSTALL_PATH "C:\\Program Files (x86)\\Autodesk\\3ds Max 2012")
#
#######################################################################################
#if (MSVC_VERSION GREATER 1499 AND MSVC_VERSION LESS 1600)
#
# # default installation path for 3ds Max SDK
# set(3DSMAX_SDK_2010_SDK_PATH "C:/Program Files/Autodesk/3ds Max 2010 SDK/")
#
# # system environment for install 3ds max
# # 3DSMAX_2011_SDK_PATH - x86 or 3DSMAX_2011x64_PATH - x64 build IDE: Visual C++ 9.0
# # ADSK_3DSMAX_SDK_2012 - x86 or ADSK_3DSMAX_x64_2012 - x64 build IDE: Visual C++ 9.0
#
# if(!3DSMAX_XXXX_SDK_PATH)
# message( "!!!Please set 3dsMax version!!!" )
# return()
# endif()
#
# find_path(3DSMAX10_SDK_INCLUDE_DIR NAMES max.h HINTS ${3DSMAX_SDK_2010_SDK_PATH} PATH_SUFFIXES "maxsdk/include")
# find_path(3DSMAX11_SDK_INCLUDE_DIR NAMES max.h HINTS ${3DSMAX_2011_SDK_PATH} PATH_SUFFIXES "maxsdk/include")
# find_path(3DSMAX12_SDK_INCLUDE_DIR NAMES max.h HINTS ${3DSMAX_INSTALL_PATH} PATH_SUFFIXES "maxsdk/include")
#
# if(3DSMAX10_SDK_INCLUDE_DIR)
# set( 3DSMAX10 3dsMax2010 )
# list( APPEND CUSTOM_BUILD_TYPES ${3DSMAX10} )
# message("3DSMAX10_SDK_INCLUDE_DIR-FOUND")
# else()
# message( "${3DSMAX10_SDK_INCLUDE_DIR}" )
# endif()
#
# if(3DSMAX11_SDK_INCLUDE_DIR)
# set( 3DSMAX11 3dsMax2011 )
# list( APPEND CUSTOM_BUILD_TYPES ${3DSMAX11} )
# message("3DSMAX11_SDK_INCLUDE_DIR-FOUND")
# else()
# message( "${3DSMAX11_SDK_INCLUDE_DIR}" )
# endif()
#
# if(3DSMAX12_SDK_INCLUDE_DIR)
# set( 3DSMAX12 3dsMax2012 )
# list( APPEND CUSTOM_BUILD_TYPES ${3DSMAX12} )
# message("3DSMAX12_SDK_INCLUDE_DIR-FOUND")
# else()
# message( "${3DSMAX12_SDK_INCLUDE_DIR}" )
# endif()
#
# list(LENGTH CUSTOM_BUILD_TYPES CUSTOM_BUILD_TYPES_NUM_ELEM)
# if(NOT CUSTOM_BUILD_TYPES_NUM_ELEM) # есть ли в списке найденные пути к версиям 3ds Max SDK
# message( "not found 3ds Max SDK in default installation path for this version of MS Visual Studio" )
# return()
# endif()
#
# #set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE)
# #set(CMAKE_CONFIGURATION_TYPES "Release" CACHE STRING "Configurations" FORCE) #"${CUSTOM_BUILD_TYPES}"
# set(CMAKE_CONFIGURATION_TYPES "Release" CACHE STRING "" FORCE)
#
# include(${CMAKE_SOURCE_DIR}/cmake/configuration_3ds_max.cmake)
#
#endif()
#
#
#######################################################################################
## find 3ds Max sdk
#set( CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" )
#find_package(3DSMAX_SDK)
#
#######################################################################################
##
##if(NOT 3DSMAX_SDK_FOUND)
# #message("----------------------------- 3DSMAX_SDK not found ----------------------------")
# #return()
##endif()
#
#
#######################################################################################
## Create the IrisExportUtility library for 3ds max.
#add_subdirectory(export)
#add_subdirectory(import)
#add_subdirectory(utility)
#
#
#
####################################################################
## install
#
#install(FILES "${IRIS_PLUGINS_SRC_DIR}/scripts/IrisMenu.ms" DESTINATION "Scripts/Startup" CONFIGURATIONS Release)
##install(FILES "IrisExport.dle" DESTINATION "plugins" CONFIGURATIONS Release)
#if(MSVS_PLATFORM_X64)
# install(FILES "${IRIS_PLUGINS_BUILD_DIR}/export/${CMAKE_CONFIGURATION_TYPES}/IrisExport.dle" DESTINATION "plugins" CONFIGURATIONS Release)
# install(FILES "${IRIS_PLUGINS_BUILD_DIR}/import/${CMAKE_CONFIGURATION_TYPES}/IrisImport.dli" DESTINATION "plugins" CONFIGURATIONS Release)
# install(FILES "${IRIS_PLUGINS_BUILD_DIR}/utility/${CMAKE_CONFIGURATION_TYPES}/IrisExportUtility.dlu" DESTINATION "plugins" CONFIGURATIONS Release)
#else()
# install(FILES "${IRIS_PLUGINS_BUILD_DIR}/export/${CMAKE_CONFIGURATION_TYPES}/IrisExport.dle" DESTINATION "plugins" CONFIGURATIONS Release)
# install(FILES "${IRIS_PLUGINS_BUILD_DIR}/import/${CMAKE_CONFIGURATION_TYPES}/IrisImport.dli" DESTINATION "plugins" CONFIGURATIONS Release)
# install(FILES "${IRIS_PLUGINS_BUILD_DIR}/utility/${CMAKE_CONFIGURATION_TYPES}/IrisExportUtility.dlu" DESTINATION "plugins" CONFIGURATIONS Release)
#endif()
#
#
####################################################################
## Provide CPack packaging target
#set(CPACK_PACKAGE_NAME "iris 3ds Max plugins")
#set(CPACK_PACKAGE_VERSION ${IRIS_PLUGINS_VERSION})
#set(CPACK_PACKAGE_VERSION_MAJOR ${IRIS_PLUGINS_VERSION_MAJOR})
#set(CPACK_PACKAGE_VERSION_MINOR ${IRIS_PLUGINS_VERSION_MINOR})
#set(CPACK_PACKAGE_VERSION_PATCH ${IRIS_PLUGINS_VERSION_PATCH})
#set(CPACK_PACKAGE_VENDOR "JSC RPA RusBITech")
#set(CPACK_PACKAGE_CONTACT "<<EMAIL>>")
#set(CPACK_NSIS_URL_INFO_ABOUT "www.rusbitech.ru")
#set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Plugins reflect 3d")
#set(CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES\\\\Autodesk\\\\")
#set(CPACK_PACKAGE_INSTALL_DIRECTORY "${PROJECT_NAME_IRIS_PLUGINS}")
#set(CPACK_RESOURCE_FILE_LICENSE "${IRIS_PLUGINS_SRC_DIR}/LICENSE.TXT")
#set(CPACK_NSIS_DISPLAY_NAME ${PROJECT_NAME_IRIS_PLUGINS})
#
##set(CPACK_INSTALL_CMAKE_PROJECTS "${WS3D_BINARY_DIR}" "WS3D" "ALL" "/")
##set(CPACK_PACKAGE_ICON "${WS3D_SRC_DIR}\\\\ws3dlogo.gif")
##set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "WriteRegStr \\\${WriteEnvStr_RegKey} \\\"WS3D_HOME\\\" $INSTDIR")
#
#include(CPack)
###################################################################
<file_sep>/source/render/engine/Engine.h
#ifndef ENGINE_H_
#define ENGINE_H_
#include "render/engine/ifc/IEngine.h"
#include "render/engine/ifc/IContext.h"
namespace smeta3d
{
class CEngine : public IEngine
{
public:
CEngine();
~CEngine();
DECL_PUBLIC_IENGINE
private:
std::shared_ptr<IContext> m_ptrContext;
};
}
#endif /* GLContex_H_ */
<file_sep>/source/system/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB PRIVATE_HEADERS *.h)
file(GLOB PUBLIC_HEADERS ${CMAKE_SOURCE_DIR}/include/system/*.h)
file(GLOB PUBLIC_IFC_HEADERS ${CMAKE_SOURCE_DIR}/include/system/ifc/*.h)
file(GLOB SOURCES *.cpp)
list(SORT PRIVATE_HEADERS)
list(SORT SOURCES)
add_library(system SHARED ${PUBLIC_HEADERS} ${PUBLIC_IFC_HEADERS} ${PRIVATE_HEADERS} ${SOURCES})
##set_executable_output_postfix( kernel)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
set_target_properties( system PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
)
include_directories(${Boost_INCLUDE_DIR})
#message(Qt5Widgets_LIBRARIES ": ${Qt5Widgets_LIBRARIES}")
#set(LIBRARIES engine)
#target_link_libraries(system ${LIBRARIES})
<file_sep>/source/render/extern/glew/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB HEADERS include/GL/*.h)
file(GLOB SOURCES src/*.c)
list(SORT HEADERS)
list(SORT SOURCES)
set(ADDITIONAL_COMPILE_FLAGS "${ADDITIONAL_COMPILE_FLAGS} -DGLEW_BUILD")
add_library(glew SHARED ${HEADERS} ${SOURCES})
##set_executable_output_postfix( kernel)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
include_directories(./include)
set_target_properties( glew PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
)
set(LIBRARIES ${OPENGL_gl_LIBRARY} )
target_link_libraries(glew ${LIBRARIES})
<file_sep>/include/system/event_manager.h
#ifndef _EVENT_MANAGE_
#define _EVENT_MANAGE_
#include <map>
#include <memory.h>
#include "system/ifc/ievent.h"
#define EVENT_CALLBACK(Class, Metod, Arg) Arg, Class, &Class::Metod
namespace smeta3d
{
/**
\brief Класс Вспомогательный класс для регистрации обработчиков событий
*/
class CEventCallback
{
public:
CEventCallback() : object_ptr(0), stub_ptr(0){}
template <class C, class E, void (C::*Method)(E*)>
static CEventCallback from_method(C* object_ptr)
{
CEventCallback d;
d.object_ptr = object_ptr;
d.stub_ptr = &method_stub<C, E, Method>; // #1
return d;
}
void* Listener() const {return object_ptr;}
void operator()(IEvent* value) const
{
(*stub_ptr)(object_ptr, value);
}
private:
typedef void (*stub_type)(void*, IEvent*);
void* object_ptr;
stub_type stub_ptr;
template <class C, class E, void (C::*Method)(E*)>
static void method_stub(void* object_ptr, IEvent* value)
{
C* p = static_cast<C*>(object_ptr);
E* e = static_cast<E*>(value);
(p->*Method)(e); // #2
}
};
////////////////////////////////////////////////////////////////////////////////
/**
\brief Класс "Менеджер событий".
Отвечает за хранение зарегистрированной информации и рассылку адресатам
*/
class __declspec(dllexport) CEventManager
{
public:
CEventManager();
typedef CEventCallback TEventCallBack;
struct SListenerData
{
SListenerData(){memset(this, 0, sizeof(SListenerData));}
TEventCallBack handler_func;
void* m_pSender;
void* m_pReceiver;
bool m_bIgnoreSender;
bool m_bIgnoreReceiver;
/// Флаг введен для исправления ошибки отключения абонентов во время рассылки сообщений
bool m_bConnected; /// Получатель подключен и готов получать событие
};
/// Испустить событие.
/// На переданное событие @see pEvent_ выполняется поиск обработчиков и вызывается их
/// функция (метод) обработки события.
/// @param pSender_ источник сообытия
/// @param pReceiver_ получатель события
/// @param pEvent_ интерфейс события
void sendEvent(const void *pSender_, const void *pReceiver_, IEvent* pEvent_);
template<class ES, class ER>
void Conversion(ER*){}
///
template <class ES, class ER, class C, void (C::*Method)(ER*) >
void connectEvent(C *pListener, void *pSender, void *pReceiver, bool anySender=true, bool anyReceiver=true)
{
Conversion<ES,ER>((ES*)0); // Проверка конвертации E в P
TEventCallBack handler_func = TEventCallBack::from_method<C, ER, Method>(pListener);
connectEvent(ES::GetCurrentID(), anySender, pSender, anyReceiver, pReceiver, handler_func);
}
///
template <class ES, class C >
void disconnectEvent(C *pListener)
{
disconnectEvent(ES::GetCurrentID(), pListener);
}
protected:
void connectEvent(const event_id& id, bool ignoreSender, void *pSender, bool ignoreReceiver, void *pReceiver, const TEventCallBack& handler_func);
void disconnectEvent(const event_id& id, void *pListener);
private:
typedef std::map<void*, SListenerData> TListenerMap;
typedef std::map<event_id, TListenerMap> TEventHandlerMap;
TEventHandlerMap m_EventHandlers;
};
//////////////////////////////////////
// Менеджер по умолчанию (глобальный)
// DECL_IN_DLL CEventMgr& EventMgr();
} // end namespace smeta3d
/* Пример использования см. в "system/ifc/ievent.h" */
#endif // _EVENT_MANAGE_
<file_sep>/source/tests/test_event_manager/main.cpp
#include <system/event_manager.h>
#include <string>
#include <iostream>
#define CEventTest_STR "59032DEA-497F-4480-A2BB-2D240DB052DD"
class CEventTest : public smeta3d::IEvent
{
DECL_EVENT(CEventTest)
CEventTest() {}
};
smeta3d::CEventManager eventMng;
class CReceiver
{
public:
CReceiver()
{
eventMng.connectEvent<CEventTest, EVENT_CALLBACK(CReceiver, testEvent, CEventTest)>(this, NULL, this, true, true);
}
~CReceiver()
{
eventMng.disconnectEvent<CEventTest, CReceiver>(this);
}
void testEvent(CEventTest* event_)
{
std::cout << "event!!!" << std::endl;
}
};
int main()
{
CReceiver receiver;
CEventTest test;
eventMng.sendEvent(NULL, NULL, &test);
}<file_sep>/source/basis/base_kernel_impl/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB PRIVATE_HEADERS *.h)
file(GLOB PUBLIC_HEADERS ${CMAKE_SOURCE_DIR}/include/kernel/ifc/*.h)
file(GLOB SOURCES *.cpp)
list(SORT PRIVATE_HEADERS)
list(SORT SOURCES)
add_library(kernel SHARED ${PUBLIC_HEADERS} ${PRIVATE_HEADERS} ${SOURCES})
##set_executable_output_postfix( kernel)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
set_target_properties( kernel PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
)
#message(Qt5Widgets_LIBRARIES ": ${Qt5Widgets_LIBRARIES}")
set(LIBRARIES engine system)
target_link_libraries(kernel ${LIBRARIES})
<file_sep>/source/system/event_manager.cpp
#include "system/event_manager.h"
#include <cassert>
namespace smeta3d
{
//-----------------------------------------------------------------------------
///
CEventManager::CEventManager()
{
}
//-----------------------------------------------------------------------------
///
void CEventManager::connectEvent(const event_id& id, bool ignoreSender, void *pSender,
bool ignoreReceiver, void *pReceiver,
const TEventCallBack& handler_func)
{
void *pListener = handler_func.Listener();
assert(pListener);
if (!pListener)
return;
SListenerData data;
data.handler_func = handler_func;
data.m_bIgnoreSender = ignoreSender;
data.m_bIgnoreReceiver = ignoreReceiver;
data.m_pSender = pSender;
data.m_pReceiver = pReceiver;
data.m_bConnected = true; /// Получатель действительно подключен
TEventHandlerMap::iterator it = m_EventHandlers.find(id);
if (it == m_EventHandlers.end())
{
TListenerMap listmap;
listmap.insert(TListenerMap::value_type(pListener, data));
m_EventHandlers.insert(TEventHandlerMap::value_type(id, listmap));
}
else
{
if (it->second.find(pListener) == it->second.end())
it->second.insert(TListenerMap::value_type(pListener, data)); /// Добавление нового получателя заданного события
else
it->second[pListener] = data; /// Обновление условий получения событий
}
}
//-----------------------------------------------------------------------------
///
void CEventManager::disconnectEvent(const event_id& id, void *pListener)
{
TEventHandlerMap::iterator it = m_EventHandlers.find(id);
if (it != m_EventHandlers.end())
{
TListenerMap::iterator lit = it->second.find(pListener);
if (lit != it->second.end())
{
lit->second.m_bConnected = false; // Удаление будет произведено в момент ближайшей рассылки события текущего типа
return;
}
}
}
//-----------------------------------------------------------------------------
///
void CEventManager::sendEvent(const void *pSender, const void *pReceiver, IEvent* pEvent)
{
assert(pEvent && "Null event interface");
TEventHandlerMap::iterator itHandler = pEvent ? m_EventHandlers.find(pEvent->GetID()) : m_EventHandlers.end();
if (itHandler == m_EventHandlers.end())
return;
TListenerMap::iterator itListener = itHandler->second.begin(), itEndListener = itHandler->second.end();
while (itListener != itEndListener)
{
SListenerData &ldata = itListener->second; // информация об обработчике события
if (!ldata.m_bConnected) // Отложенное удаление
{
itHandler->second.erase(itListener++);
continue;
}
if ((ldata.m_bIgnoreSender || ldata.m_pSender == pSender) && (ldata.m_bIgnoreReceiver || ldata.m_pReceiver == pReceiver))
ldata.handler_func(pEvent);
++itListener;
}
}
//-----------------------------------------------------------------------------
/// Менеджер по умолчанию (глобальный)
/*DECL_IN_DLL CEventMgr& bm::EventMgr()
{
static CEventMgr Mgr;
return Mgr;
}*/
}<file_sep>/source/tests/test_signals/main.cpp
#include <iostream>
#include <boost/signals2/signal.hpp>
#include <boost/signals2/shared_connection_block.hpp>
struct HelloWorld
{
void operator()() const
{
std::cout << "Hello, World!" << std::endl;
}
};
void disconnect_example()
{
boost::signals2::signal<void()> sig;
//[ disconnect_code_snippet
boost::signals2::connection c = sig.connect(HelloWorld());
std::cout << "c is connected\n";
sig(); // Prints "Hello, World!"
c.disconnect(); // Disconnect the HelloWorld object
std::cout << "c is disconnected\n";
sig(); // Does nothing: there are no connected slots
//]
}
void block_example()
{
boost::signals2::signal<void()> sig;
//[ block_code_snippet
boost::signals2::connection c = sig.connect(HelloWorld());
std::cout << "c is not blocked.\n";
sig(); // Prints "Hello, World!"
{
boost::signals2::shared_connection_block block(c); // block the slot
std::cout << "c is blocked.\n";
sig(); // No output: the slot is blocked
} // shared_connection_block going out of scope unblocks the slot
std::cout << "c is not blocked.\n";
sig(); // Prints "Hello, World!"}
//]
}
struct ShortLived
{
void operator()() const
{
std::cout << "Life is short!" << std::endl;
}
};
void scoped_connection_example()
{
boost::signals2::signal<void()> sig;
//[ scoped_connection_code_snippet
{
boost::signals2::scoped_connection c(sig.connect(ShortLived()));
sig(); // will call ShortLived function object
} // scoped_connection goes out of scope and disconnects
sig(); // ShortLived function object no longer connected to sig
//]
}
//[ disconnect_by_slot_def_code_snippet
void foo() { std::cout << "foo"; }
void bar() { std::cout << "bar\n"; }
//]
void disconnect_by_slot_example()
{
boost::signals2::signal<void()> sig;
//[ disconnect_by_slot_usage_code_snippet
sig.connect(&foo);
sig.connect(&bar);
sig();
// disconnects foo, but not bar
sig.disconnect(&foo);
sig();
//]
}
int main()
{
std::cout << "Disconnect example:\n";
disconnect_example();
std::cout << "\nBlock example:\n";
block_example();
std::cout << "\nScoped connection example:\n";
scoped_connection_example();
std::cout << "\nDisconnect by slot example:\n";
disconnect_by_slot_example();
return 0;
}<file_sep>/source/main/main.cpp
#include <QApplication>
#include "gui_basis/mainwindow.h"
#include "kernel/ifc/ICore.h"
#include "Context.h"
using namespace smeta3d;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
SP_ICore ptrCore = GetSingltonCore();
CMainWindow w;
CContext* context = new CContext(&w);
w.setCentralWidget(context);
ptrCore->Init((HWND)context->winId());
w.show();
int nRet = a.exec();
ptrCore->DeInit();
return nRet;
}
<file_sep>/cmake/find_dependencies.cmake
####################################################################################
# Find Qt5
####################################################################################
if(MSVC)# look for user-registry pointing to qtcreator
get_filename_component(QT_BIN [HKEY_CURRENT_USER\\Software\\Classes\\Applications\\QtProject.QtCreator.cpp\\shell\\Open\\Command] PATH)# get root path so we can search for 5.3, 5.4, 5.5, etc
string(REPLACE "/Tools" ";" QT_BIN "${QT_BIN}")
list(GET QT_BIN 0 QT_BIN)
file(GLOB QT_VERSIONS "${QT_BIN}/5.*")
list(SORT QT_VERSIONS)# assume the latest version will be last alphabetically
list(REVERSE QT_VERSIONS)
list(GET QT_VERSIONS 0 QT_VERSION)# fix any double slashes which seem to be common
string(REPLACE "//""/" QT_VERSION "${QT_VERSION}")# do some math trickery to guess folder# - qt uses (e.g.) "msvc2012"# - cmake uses (e.g.) "1800"# - see also https://cmake.org/cmake/help/v3.0/variable/MSVC_VERSION.html
message(MSVC_VERSION " : ${MSVC_VERSION}")
if(MSVC_VERSION GREATER 1999)
message(WARNING "MSVC ${MSVC_VERSION} not yet supported.")
elseif(MSVC_VERSION GREATER 1909)
set(QT_MSVC 2017)
endif()
message(QT_MSVC " : ${QT_MSVC}")
if(CMAKE_SYSTEM_PROCESSOR MATCHES 64)
set(QT_MSVC "${QT_MSVC}_64")
endif()
set(QT_PATH "${QT_VERSION}/msvc${QT_MSVC}")
message(QT_PATH " : ${QT_PATH}")
set(CMAKE_PREFIX_PATH ${QT_PATH} ${CMAKE_PREFIX_PATH})
endif()# use Qt_DIR approach so you can find Qt after cmake has been invoked
find_package(Qt5 COMPONENTS Core Gui Widgets) # REQUIRED QUIET
set(REQ_MOD thread system iostreams filesystem program_options date_time regex)
find_package( Boost 1.68.0 COMPONENTS ${REQ_MOD} REQUIRED)
find_package(OpenGL)
####################################################################################
# Find Qt5
####################################################################################
# if (Qt5_FOUND)
# set(QT_COMPONENTS Core Gui Widgets Xml Sql Network OpenGL Svg Quick Qml PrintSupport Designer)
#
# if(UNIX)
# if(Qt5_VERSION VERSION_GREATER "5.6.0")
# set(QT_COMPONENTS ${QT_COMPONENTS} X11Extras WebEngineCore WebEngineWidgets WebEngine WebKitWidgets WebKit)
# else()
# set(QT_COMPONENTS ${QT_COMPONENTS} X11Extras WebKitWidgets WebKit)
# endif()
# else()
# if(MSVC12 AND (QT_VER VERSION_GREATER "5.6.0") )
# set(QT_COMPONENTS ${QT_COMPONENTS} WebKitWidgets WebKit)
# else()
# set(QT_COMPONENTS ${QT_COMPONENTS} WebEngineCore WebEngineWidgets WebEngine)
# endif()
# endif()
#
# set(QT_OPTIONAL_COMPONENTS LinguistTools Help) #
#
# find_package(Qt5 5.5.1 REQUIRED COMPONENTS ${QT_COMPONENTS} OPTIONAL_COMPONENTS ${QT_OPTIONAL_COMPONENTS} ) # REQUIRED QUIET
# endif()
#message(FATAL_ERROR "Qt5_FOUND" = ${Qt5_FOUND})
# if (Qt5_FOUND)
# message(STATUS "Configure with Qt5")
#
# set(QT5_FOUND TRUE)
#
# foreach(module ${QT_COMPONENTS} ${QT_OPTIONAL_COMPONENTS})
# set( QT_INCLUDES ${QT_INCLUDES} ${Qt5${module}_INCLUDE_DIRS} )
# string(TOUPPER ${module} module_up)
# set(QT_QT${module_up}_LIBRARY ${Qt5${module}_LIBRARIES})
# endforeach()
#
# list(REMOVE_DUPLICATES QT_INCLUDES)
# get_filename_component(QT_LIBRARY_DIR "${Qt5Core_DIR}/../../" ABSOLUTE)
# if(NOT UNIX)
# get_filename_component(QT_PLUGINS_DIR "${_qt5Core_install_prefix}/plugins" ABSOLUTE)
# else()
# get_filename_component(QT_PLUGINS_DIR "${_qt5Core_install_prefix}/lib/x86_64-linux-gnu/qt5/plugins" ABSOLUTE)
# endif()
# set(QT_USE_FILE ${CMAKE_CURRENT_LIST_DIR}/modules/UseQt5.cmake)
#
# if (NOT UNIX)
# set(_QtWebProcess "QtWebProcess.exe")
# set(_QtWebEngineProcess "QtWebEngineProcess.exe")
# set(_QtWebEngineProcess_debug "QtWebEngineProcess-debug.exe")
# set(_QtWebEngineProcess_debug_2 "QtWebEngineProcessd.exe")
# endif()
#
# find_file(Qt5_QtWebProcess ${_QtWebProcess} PATHS "${_qt5Core_install_prefix}/bin" NO_DEFAULT_PATH NO_SYSTEM_ENVIRONMENT_PATH )
# find_file(Qt5_QtWebEngineProcess ${_QtWebEngineProcess} PATHS "${_qt5Core_install_prefix}/bin" NO_DEFAULT_PATH NO_SYSTEM_ENVIRONMENT_PATH )
# find_file(Qt5_QtWebEngineProcess_DBG ${_QtWebEngineProcess_debug} PATHS "${_qt5Core_install_prefix}/bin" NO_DEFAULT_PATH NO_SYSTEM_ENVIRONMENT_PATH )
#
# if (NOT Qt5_QtWebEngineProcess_DBG)
# find_file(Qt5_QtWebEngineProcess_DBG ${_QtWebEngineProcess_debug_2} PATHS "${_qt5Core_install_prefix}/bin" NO_DEFAULT_PATH NO_SYSTEM_ENVIRONMENT_PATH )
# endif()
<file_sep>/source/basis/base_kernel_impl/Core.cpp
#include "Core.h"
namespace smeta3d
{
/////////////////////////////////////////////////////////////////////
///
SP_ICore GetSingltonCore()
{
static SP_ICore ptrCore = nullptr;
if (!ptrCore)
ptrCore.reset(new CCore());
return ptrCore;
}
/////////////////////////////////////////////////////////////////////
///
CCore::CCore()
{
}
/////////////////////////////////////////////////////////////////////
///
CCore::~CCore()
{
}
/////////////////////////////////////////////////////////////////////
///
bool CCore::Init(const HWND& HWnd)
{
if (IsInit())
return false;
m_ptrEngine = smeta3d::GetSingltonEngine();
if (m_ptrEngine)
m_ptrEngine->Init(HWnd);
m_bInit = m_ptrEngine->IsInit();
return IsInit();
}
/////////////////////////////////////////////////////////////////////
///
bool CCore::IsInit() const
{
return m_bInit && m_ptrEngine;
}
/////////////////////////////////////////////////////////////////////
///
void CCore::DeInit()
{
if (!IsInit())
return;
if (m_ptrEngine)
{
m_ptrEngine->DeInit();
m_ptrEngine = nullptr;
}
m_bInit = false;
}
/////////////////////////////////////////////////////////////////////
///
const smeta3d::SP_IEngine& CCore::GetEngine() const
{
return m_ptrEngine;
}
/////////////////////////////////////////////////////////////////////
///
void CCore::Simulate(float fTimePerSec)
{
/*m_pContext->Clear();
m_pMenu->Render();
m_pGame->Update(fTimePerSec);
m_pGame->Render();
m_pContext->Swap();*/
m_ptrEngine->Update(fTimePerSec);
m_ptrEngine->BeginRender();
m_ptrEngine->Render();
m_ptrEngine->EndRender();
}
void CCore::Resize(int w, int h)
{
m_ptrEngine->Resize(w, h);
}
}
<file_sep>/source/render/GLContext.h
#ifndef GLContex_H_
#define GLContex_H_
#include <windows.h>
#include "Shader.h"
class GLContext
{
public:
GLContext(const HWND& HWnd);
~GLContext();
void Clear();
void Swap();
void Resize(int w, int h);
void InitShader();
private:
bool CreateContext(const HWND& HWnd);
private:
struct SAppContext* m_pAppContext;
Shader* m_pShader;
};
#endif /* GLContex_H_ */
<file_sep>/include/system/ifc/ievent.h
#ifndef _IEVENT_
#define _IEVENT_
#include <system/uuid.h>
namespace smeta3d
{
using event_id = CUUID;
class IEvent
{
public:
~IEvent() {};
virtual const event_id& GetID() const = 0;
};
} // end namespace smeta3d
#define DECL_EVENT(CLASS_EVENT) \
public: \
static const smeta3d::event_id& GetCurrentID() \
{ \
static smeta3d::event_id s_id = smeta3d::CUUID(CLASS_EVENT##_STR); \
return s_id; \
} \
virtual const smeta3d::event_id& GetID() const \
{ \
return CLASS_EVENT::GetCurrentID(); \
} \
#endif // _IEVENT_
<file_sep>/include/system/uuid.h
#ifndef _UUID_
#define _UUID_
#include <string>
#include <boost/uuid/uuid.hpp>
namespace smeta3d
{
class __declspec(dllexport) CUUID final
{
public:
CUUID() {}
CUUID(const std::string& strUUID);
void FromStr(const std::string& strUUID);
std::string ToStr() const;
static CUUID Generate();
private:
friend inline bool operator<(const smeta3d::CUUID& lft_, const smeta3d::CUUID& rgh_);
boost::uuids::uuid m_UUID;
};
inline bool operator<(const smeta3d::CUUID& lft_, const smeta3d::CUUID& rgh_)
{
return lft_.m_UUID < rgh_.m_UUID;
}
} // end namespace smeta3d
#endif // _EVENT_MANAGE_
<file_sep>/source/render/GL/gl.h
#ifndef __gl_h_
#define __gl_h_
#ifdef __cplusplus
extern "C" {
#endif
/*
** Copyright (c) 2007-2012 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#ifndef APIENTRY
#define APIENTRY
#endif
#ifndef WIN32
#define WINGDIAPI
#endif
/* Base GL types */
typedef unsigned int GLenum;
typedef unsigned char GLboolean;
typedef unsigned int GLbitfield;
typedef signed char GLbyte;
typedef short GLshort;
typedef int GLint;
typedef int GLsizei;
typedef unsigned char GLubyte;
typedef unsigned short GLushort;
typedef unsigned int GLuint;
typedef unsigned short GLhalf;
typedef float GLfloat;
typedef float GLclampf;
typedef double GLdouble;
typedef double GLclampd;
typedef void GLvoid;
/*************************************************************/
#ifndef GL_VERSION_1_1
/* AttribMask */
#define GL_DEPTH_BUFFER_BIT 0x00000100
#define GL_STENCIL_BUFFER_BIT 0x00000400
#define GL_COLOR_BUFFER_BIT 0x00004000
/* Boolean */
#define GL_FALSE 0
#define GL_TRUE 1
/* BeginMode */
#define GL_POINTS 0x0000
#define GL_LINES 0x0001
#define GL_LINE_LOOP 0x0002
#define GL_LINE_STRIP 0x0003
#define GL_TRIANGLES 0x0004
#define GL_TRIANGLE_STRIP 0x0005
#define GL_TRIANGLE_FAN 0x0006
#define GL_QUADS 0x0007
/* AlphaFunction */
#define GL_NEVER 0x0200
#define GL_LESS 0x0201
#define GL_EQUAL 0x0202
#define GL_LEQUAL 0x0203
#define GL_GREATER 0x0204
#define GL_NOTEQUAL 0x0205
#define GL_GEQUAL 0x0206
#define GL_ALWAYS 0x0207
/* BlendingFactorDest */
#define GL_ZERO 0
#define GL_ONE 1
#define GL_SRC_COLOR 0x0300
#define GL_ONE_MINUS_SRC_COLOR 0x0301
#define GL_SRC_ALPHA 0x0302
#define GL_ONE_MINUS_SRC_ALPHA 0x0303
#define GL_DST_ALPHA 0x0304
#define GL_ONE_MINUS_DST_ALPHA 0x0305
/* BlendingFactorSrc */
#define GL_DST_COLOR 0x0306
#define GL_ONE_MINUS_DST_COLOR 0x0307
#define GL_SRC_ALPHA_SATURATE 0x0308
/* DrawBufferMode */
#define GL_NONE 0
#define GL_FRONT_LEFT 0x0400
#define GL_FRONT_RIGHT 0x0401
#define GL_BACK_LEFT 0x0402
#define GL_BACK_RIGHT 0x0403
#define GL_FRONT 0x0404
#define GL_BACK 0x0405
#define GL_LEFT 0x0406
#define GL_RIGHT 0x0407
#define GL_FRONT_AND_BACK 0x0408
/* ErrorCode */
#define GL_NO_ERROR 0
#define GL_INVALID_ENUM 0x0500
#define GL_INVALID_VALUE 0x0501
#define GL_INVALID_OPERATION 0x0502
#define GL_OUT_OF_MEMORY 0x0505
/* FrontFaceDirection */
#define GL_CW 0x0900
#define GL_CCW 0x0901
/* GetPName */
#define GL_POINT_SIZE 0x0B11
#define GL_POINT_SIZE_RANGE 0x0B12
#define GL_POINT_SIZE_GRANULARITY 0x0B13
#define GL_LINE_SMOOTH 0x0B20
#define GL_LINE_WIDTH 0x0B21
#define GL_LINE_WIDTH_RANGE 0x0B22
#define GL_LINE_WIDTH_GRANULARITY 0x0B23
#define GL_LINE_STIPPLE 0x0B24
#define GL_POLYGON_MODE 0x0B40
#define GL_POLYGON_SMOOTH 0x0B41
#define GL_CULL_FACE 0x0B44
#define GL_CULL_FACE_MODE 0x0B45
#define GL_FRONT_FACE 0x0B46
#define GL_DEPTH_RANGE 0x0B70
#define GL_DEPTH_TEST 0x0B71
#define GL_DEPTH_WRITEMASK 0x0B72
#define GL_DEPTH_CLEAR_VALUE 0x0B73
#define GL_DEPTH_FUNC 0x0B74
#define GL_STENCIL_TEST 0x0B90
#define GL_STENCIL_CLEAR_VALUE 0x0B91
#define GL_STENCIL_FUNC 0x0B92
#define GL_STENCIL_VALUE_MASK 0x0B93
#define GL_STENCIL_FAIL 0x0B94
#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96
#define GL_STENCIL_REF 0x0B97
#define GL_STENCIL_WRITEMASK 0x0B98
#define GL_VIEWPORT 0x0BA2
#define GL_ALPHA_TEST 0x0BC0
#define GL_DITHER 0x0BD0
#define GL_BLEND_DST 0x0BE0
#define GL_BLEND_SRC 0x0BE1
#define GL_BLEND 0x0BE2
#define GL_LOGIC_OP_MODE 0x0BF0
#define GL_COLOR_LOGIC_OP 0x0BF2
#define GL_DRAW_BUFFER 0x0C01
#define GL_READ_BUFFER 0x0C02
#define GL_SCISSOR_BOX 0x0C10
#define GL_SCISSOR_TEST 0x0C11
#define GL_COLOR_CLEAR_VALUE 0x0C22
#define GL_COLOR_WRITEMASK 0x0C23
#define GL_DOUBLEBUFFER 0x0C32
#define GL_STEREO 0x0C33
#define GL_LINE_SMOOTH_HINT 0x0C52
#define GL_POLYGON_SMOOTH_HINT 0x0C53
#define GL_UNPACK_SWAP_BYTES 0x0CF0
#define GL_UNPACK_LSB_FIRST 0x0CF1
#define GL_UNPACK_ROW_LENGTH 0x0CF2
#define GL_UNPACK_SKIP_ROWS 0x0CF3
#define GL_UNPACK_SKIP_PIXELS 0x0CF4
#define GL_UNPACK_ALIGNMENT 0x0CF5
#define GL_PACK_SWAP_BYTES 0x0D00
#define GL_PACK_LSB_FIRST 0x0D01
#define GL_PACK_ROW_LENGTH 0x0D02
#define GL_PACK_SKIP_ROWS 0x0D03
#define GL_PACK_SKIP_PIXELS 0x0D04
#define GL_PACK_ALIGNMENT 0x0D05
#define GL_MAX_TEXTURE_SIZE 0x0D33
#define GL_MAX_VIEWPORT_DIMS 0x0D3A
#define GL_SUBPIXEL_BITS 0x0D50
#define GL_TEXTURE_1D 0x0DE0
#define GL_TEXTURE_2D 0x0DE1
#define GL_POLYGON_OFFSET_UNITS 0x2A00
#define GL_POLYGON_OFFSET_POINT 0x2A01
#define GL_POLYGON_OFFSET_LINE 0x2A02
#define GL_POLYGON_OFFSET_FILL 0x8037
#define GL_POLYGON_OFFSET_FACTOR 0x8038
#define GL_TEXTURE_BINDING_1D 0x8068
#define GL_TEXTURE_BINDING_2D 0x8069
/* GetTextureParameter */
#define GL_TEXTURE_WIDTH 0x1000
#define GL_TEXTURE_HEIGHT 0x1001
#define GL_TEXTURE_INTERNAL_FORMAT 0x1003
#define GL_TEXTURE_BORDER_COLOR 0x1004
#define GL_TEXTURE_RED_SIZE 0x805C
#define GL_TEXTURE_GREEN_SIZE 0x805D
#define GL_TEXTURE_BLUE_SIZE 0x805E
#define GL_TEXTURE_ALPHA_SIZE 0x805F
/* HintMode */
#define GL_DONT_CARE 0x1100
#define GL_FASTEST 0x1101
#define GL_NICEST 0x1102
/* DataType */
#define GL_BYTE 0x1400
#define GL_UNSIGNED_BYTE 0x1401
#define GL_SHORT 0x1402
#define GL_UNSIGNED_SHORT 0x1403
#define GL_INT 0x1404
#define GL_UNSIGNED_INT 0x1405
#define GL_FLOAT 0x1406
#define GL_DOUBLE 0x140A
/* ErrorCode */
#define GL_STACK_OVERFLOW 0x0503
#define GL_STACK_UNDERFLOW 0x0504
/* LogicOp */
#define GL_CLEAR 0x1500
#define GL_AND 0x1501
#define GL_AND_REVERSE 0x1502
#define GL_COPY 0x1503
#define GL_AND_INVERTED 0x1504
#define GL_NOOP 0x1505
#define GL_XOR 0x1506
#define GL_OR 0x1507
#define GL_NOR 0x1508
#define GL_EQUIV 0x1509
#define GL_INVERT 0x150A
#define GL_OR_REVERSE 0x150B
#define GL_COPY_INVERTED 0x150C
#define GL_OR_INVERTED 0x150D
#define GL_NAND 0x150E
#define GL_SET 0x150F
/* MatrixMode (for gl3.h, FBO attachment type) */
#define GL_TEXTURE 0x1702
/* PixelCopyType */
#define GL_COLOR 0x1800
#define GL_DEPTH 0x1801
#define GL_STENCIL 0x1802
/* PixelFormat */
#define GL_STENCIL_INDEX 0x1901
#define GL_DEPTH_COMPONENT 0x1902
#define GL_RED 0x1903
#define GL_GREEN 0x1904
#define GL_BLUE 0x1905
#define GL_ALPHA 0x1906
#define GL_RGB 0x1907
#define GL_RGBA 0x1908
/* PolygonMode */
#define GL_POINT 0x1B00
#define GL_LINE 0x1B01
#define GL_FILL 0x1B02
/* StencilOp */
#define GL_KEEP 0x1E00
#define GL_REPLACE 0x1E01
#define GL_INCR 0x1E02
#define GL_DECR 0x1E03
/* StringName */
#define GL_VENDOR 0x1F00
#define GL_RENDERER 0x1F01
#define GL_VERSION 0x1F02
#define GL_EXTENSIONS 0x1F03
/* TextureMagFilter */
#define GL_NEAREST 0x2600
#define GL_LINEAR 0x2601
/* TextureMinFilter */
#define GL_NEAREST_MIPMAP_NEAREST 0x2700
#define GL_LINEAR_MIPMAP_NEAREST 0x2701
#define GL_NEAREST_MIPMAP_LINEAR 0x2702
#define GL_LINEAR_MIPMAP_LINEAR 0x2703
/* TextureParameterName */
#define GL_TEXTURE_MAG_FILTER 0x2800
#define GL_TEXTURE_MIN_FILTER 0x2801
#define GL_TEXTURE_WRAP_S 0x2802
#define GL_TEXTURE_WRAP_T 0x2803
/* TextureTarget */
#define GL_PROXY_TEXTURE_1D 0x8063
#define GL_PROXY_TEXTURE_2D 0x8064
/* TextureWrapMode */
#define GL_CLAMP 0x2900
#define GL_REPEAT 0x2901
/* PixelInternalFormat */
#define GL_R3_G3_B2 0x2A10
#define GL_RGB4 0x804F
#define GL_RGB5 0x8050
#define GL_RGB8 0x8051
#define GL_RGB10 0x8052
#define GL_RGB12 0x8053
#define GL_RGB16 0x8054
#define GL_RGBA2 0x8055
#define GL_RGBA4 0x8056
#define GL_RGB5_A1 0x8057
#define GL_RGBA8 0x8058
#define GL_RGB10_A2 0x8059
#define GL_RGBA12 0x805A
#define GL_RGBA16 0x805B
#endif
/*************************************************************/
#ifndef GL_VERSION_1_0
#define GL_VERSION_1_0 1
WINGDIAPI void APIENTRY glCullFace (GLenum mode);
WINGDIAPI void APIENTRY glFrontFace (GLenum mode);
WINGDIAPI void APIENTRY glHint (GLenum target, GLenum mode);
WINGDIAPI void APIENTRY glLineWidth (GLfloat width);
WINGDIAPI void APIENTRY glPointSize (GLfloat size);
WINGDIAPI void APIENTRY glPolygonMode (GLenum face, GLenum mode);
WINGDIAPI void APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height);
WINGDIAPI void APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param);
WINGDIAPI void APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params);
WINGDIAPI void APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param);
WINGDIAPI void APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params);
WINGDIAPI void APIENTRY glTexImage1D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid *pixels);
WINGDIAPI void APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid *pixels);
WINGDIAPI void APIENTRY glDrawBuffer (GLenum mode);
WINGDIAPI void APIENTRY glClear (GLbitfield mask);
WINGDIAPI void APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
WINGDIAPI void APIENTRY glAlphaFunc (GLenum func, GLclampf ref);WINGDIAPI void APIENTRY glClearStencil (GLint s);
WINGDIAPI void APIENTRY glClearDepth (GLdouble depth);
WINGDIAPI void APIENTRY glStencilMask (GLuint mask);
WINGDIAPI void APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
WINGDIAPI void APIENTRY glDepthMask (GLboolean flag);
WINGDIAPI void APIENTRY glDisable (GLenum cap);
WINGDIAPI void APIENTRY glEnable (GLenum cap);
WINGDIAPI void APIENTRY glFinish (void);
WINGDIAPI void APIENTRY glFlush (void);
WINGDIAPI void APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor);
WINGDIAPI void APIENTRY glLogicOp (GLenum opcode);
WINGDIAPI void APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask);
WINGDIAPI void APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass);
WINGDIAPI void APIENTRY glDepthFunc (GLenum func);
WINGDIAPI void APIENTRY glPixelStoref (GLenum pname, GLfloat param);
WINGDIAPI void APIENTRY glPixelStorei (GLenum pname, GLint param);
WINGDIAPI void APIENTRY glReadBuffer (GLenum mode);
WINGDIAPI void APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid *pixels);
WINGDIAPI void APIENTRY glGetBooleanv (GLenum pname, GLboolean *params);
WINGDIAPI void APIENTRY glGetDoublev (GLenum pname, GLdouble *params);
WINGDIAPI GLenum APIENTRY glGetError (void);
WINGDIAPI void APIENTRY glGetFloatv (GLenum pname, GLfloat *params);
WINGDIAPI void APIENTRY glGetIntegerv (GLenum pname, GLint *params);
WINGDIAPI const GLubyte * APIENTRY glGetString (GLenum name);
WINGDIAPI void APIENTRY glGetTexImage (GLenum target, GLint level, GLenum format, GLenum type, GLvoid *pixels);
WINGDIAPI void APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params);
WINGDIAPI void APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params);
WINGDIAPI void APIENTRY glGetTexLevelParameterfv (GLenum target, GLint level, GLenum pname, GLfloat *params);
WINGDIAPI void APIENTRY glGetTexLevelParameteriv (GLenum target, GLint level, GLenum pname, GLint *params);
WINGDIAPI GLboolean APIENTRY glIsEnabled (GLenum cap);
WINGDIAPI void APIENTRY glDepthRange (GLdouble near, GLdouble far);
WINGDIAPI void APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height);
#endif
#ifndef GL_VERSION_1_1
#define GL_VERSION_1_1 1
WINGDIAPI void APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count);
WINGDIAPI void APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices);
WINGDIAPI void APIENTRY glGetPointerv (GLenum pname, GLvoid* *params);
WINGDIAPI void APIENTRY glLineStipple (GLint factor, GLushort pattern);
WINGDIAPI void APIENTRY glPolygonOffset (GLfloat factor, GLfloat units);
WINGDIAPI void APIENTRY glCopyTexImage1D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
WINGDIAPI void APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
WINGDIAPI void APIENTRY glCopyTexSubImage1D (GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
WINGDIAPI void APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
WINGDIAPI void APIENTRY glTexSubImage1D (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels);
WINGDIAPI void APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels);
WINGDIAPI void APIENTRY glBindTexture (GLenum target, GLuint texture);
WINGDIAPI void APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures);
WINGDIAPI void APIENTRY glGenTextures (GLsizei n, GLuint *textures);
WINGDIAPI GLboolean APIENTRY glIsTexture (GLuint texture);
#endif
#ifndef GL_GLEXT_LEGACY
#include <GL/glext.h>
#endif
#ifdef __cplusplus
}
#endif
#endif /* __gl_h_ */
<file_sep>/source/main/Context.h
#ifndef CContext_H
#define CContext_H
#include <QWidget>
#include <QTime>
#include <qdatetime.h>
class CContext : public QWidget
{
public:
CContext(QWidget* pParent);
~CContext();
void timerEvent(QTimerEvent *pEvent);
virtual void resizeEvent(QResizeEvent *);
QPaintEngine *paintEngine() const {return NULL;}
/*void keyPressEvent(QKeyEvent *);
void keyReleaseEvent(QKeyEvent *);
bool event(QEvent *);*/
private:
int m_TimerID_25;
QTime m_Time;
int countframe;
float sec;
};
#endif
<file_sep>/source/gui_basis/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB PRIVATE_HEADERS *.h)
file(GLOB PUBLIC_HEADERS ${CMAKE_SOURCE_DIR}/include/gui_basis/*.h)
file(GLOB SOURCES *.cpp)
file(GLOB FORM_FILES *.ui)
list(SORT PRIVATE_HEADERS)
list(SORT PUBLIC_HEADERS)
list(SORT SOURCES)
qt5_wrap_cpp(MOC_OUT ${PUBLIC_HEADERS} )
qt5_wrap_ui(UI_OUT ${FORM_FILES})
set(ADDITION_COMPILE_FLAGS "-DQT_DLL -DQT_GUI_LIB -DQT_CORE_LIB -DQT_THREAD_SUPPORT") #QT_NO_DEBUG
if(MSVC)
set(ADDITION_COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS} -DQT_LARGEFILE_SUPPORT=64")
endif()
add_library(gui_basis SHARED ${_WIN32} ${PRIVATE_HEADERS} ${PUBLIC_HEADERS} ${SOURCES} ${MOC_OUT} ${UI_OUT})
##set_executable_output_postfix( gui_basis)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
set_target_properties( gui_basis PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
message(Qt5Widgets_LIBRARIES ": ${Qt5Widgets_LIBRARIES}")
set(LIBRARIES ${Qt5Widgets_LIBRARIES} )
target_link_libraries(gui_basis ${LIBRARIES})
<file_sep>/include/gui_basis/mainwindow.h
#ifndef MAIN_WINDOW_H
#define MAIN_WINDOW_H
#include <QMainWindow>
#ifndef GUI_BASIS_EXPORT
#if defined(gui_basis_EXPORTS)
#define GUI_BASIS_EXPORT __declspec(dllexport)
#else
#define GUI_BASIS_EXPORT __declspec(dllimport)
#endif
#endif
namespace Ui
{
class MainWindow;
}
class GUI_BASIS_EXPORT CMainWindow : public QMainWindow
{
Q_OBJECT
public:
CMainWindow(QWidget *parent = 0, Qt::WindowFlags flags = 0);
~CMainWindow();
private:
Ui::MainWindow* m_pUI;
};
#endif // MAIN_WINDOW_H*/
<file_sep>/source/render/engine/gl_render_system/GLContext.h
#ifndef GLContex_H_
#define GLContex_H_
#include <windows.h>
#include "render\engine\ifc\IContext.h"
namespace smeta3d
{
class GLContext : public IContext
{
public:
GLContext(const HWND& HWnd);
~GLContext();
DECL_PUBLIC_ICONTEXT
private:
bool CreateContext(const HWND& HWnd);
private:
struct SAppContext* m_pAppContext;
};
}
#endif /* GLContex_H_ */
<file_sep>/source/render/engine/gl_render_system/GLContext.cpp
#include "GLContext.h"
#include "../extern/glew/include/GL/glew.h"
#include "../extern/glew/include/GL/wglew.h"
/*
#include "../extern/glew/include/GL/eglew.h"
#include "../extern/glew/include/GL/wglew.h"
#include <gl/GL.h>*/
namespace smeta3d
{
struct SAppContext
{
SAppContext()
{
hdc = NULL;
context = NULL;
wglChoosePixelFormatARB = NULL;
}
HDC hdc;
HGLRC context;
PFNWGLCHOOSEPIXELFORMATARBPROC wglChoosePixelFormatARB;
};
GLContext::GLContext(const HWND& HWnd)
{
m_pAppContext = new SAppContext();
bool bOk = CreateContext(HWnd);
if (!bOk)
throw int(1);
}
GLContext::~GLContext()
{
delete m_pAppContext;
m_pAppContext = NULL;
}
bool GLContext::CreateContext(const HWND& HWnd)
{
if (m_pAppContext->context != NULL)
if (m_pAppContext->context != NULL)
{
wglMakeCurrent(m_pAppContext->hdc, NULL);
wglDeleteContext(m_pAppContext->context);
m_pAppContext->context = NULL;
}
// get new hdc
if (m_pAppContext->hdc != NULL)
{
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->hdc = NULL;
}
m_pAppContext->hdc = GetDC(HWnd);
if (m_pAppContext->hdc == NULL)
{
return 0;
}
// pixel format descriptor
PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), 1, PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER | PFD_SWAP_EXCHANGE,
PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 8, 0, PFD_MAIN_PLANE, 0, 0, 0, 0
};
// create simple opengl context
if (m_pAppContext->wglChoosePixelFormatARB == NULL) {
// choose pixel format
int pixelformat = ChoosePixelFormat(m_pAppContext->hdc, &pfd);
SetPixelFormat(m_pAppContext->hdc, pixelformat, &pfd);
// create simple context
HGLRC old_context = m_pAppContext->context;
m_pAppContext->context = wglCreateContext(m_pAppContext->hdc);
if (m_pAppContext->context == NULL) {
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->hdc = NULL;
return 0;
}
// share context
if (old_context && wglShareLists(old_context, m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// set current context
if (wglMakeCurrent(m_pAppContext->hdc, m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// get proc address
m_pAppContext->wglChoosePixelFormatARB = (PFNWGLCHOOSEPIXELFORMATARBPROC)wglGetProcAddress("wglChoosePixelFormatARB");
if (m_pAppContext->wglChoosePixelFormatARB == NULL) {
wglMakeCurrent(m_pAppContext->hdc, NULL);
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// destroy context
wglMakeCurrent(m_pAppContext->hdc, NULL);
wglDeleteContext(m_pAppContext->context);
m_pAppContext->context = NULL;
}
// attributes
GLint attributes[] = {
WGL_DRAW_TO_WINDOW_ARB, GL_TRUE,
WGL_ACCELERATION_ARB, WGL_FULL_ACCELERATION_ARB,
WGL_DOUBLE_BUFFER_ARB, GL_TRUE,
WGL_COLOR_BITS_ARB, 32,
WGL_DEPTH_BITS_ARB, 24,
WGL_STENCIL_BITS_ARB, 8,
0,
};
// choose pixel format
int pixelformat;
unsigned int num_formats;
m_pAppContext->wglChoosePixelFormatARB(m_pAppContext->hdc, attributes, NULL, 1, &pixelformat, &num_formats);
if (num_formats == 0) pixelformat = ChoosePixelFormat(m_pAppContext->hdc, &pfd);
// set pixel format
SetPixelFormat(m_pAppContext->hdc, pixelformat, &pfd);
HGLRC old_context = m_pAppContext->context;
// create context
m_pAppContext->context = wglCreateContext(m_pAppContext->hdc);
if (m_pAppContext->context == NULL) {
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->hdc = NULL;
return 0;
}
// share context
if (old_context && wglShareLists(old_context, m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// set current context
if (wglMakeCurrent(m_pAppContext->hdc, m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
glewInit();
return true;
//Resize(0, 0);
}
void GLContext::Clear()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0, 0, 1, 1);
}
void GLContext::Swap()
{
SwapBuffers(m_pAppContext->hdc);
}
void GLContext::Resize(int w, int h)
{
glViewport(0, 0, w, h);
}
}
<file_sep>/source/main/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB PRIVATE_HEADERS *.h)
file(GLOB SOURCES *.cpp)
file(GLOB FORM_FILES *.ui)
list(SORT PRIVATE_HEADERS)
list(SORT SOURCES)
qt5_wrap_cpp(MOC_OUT ${PRIVATE_HEADERS})
qt5_wrap_ui(UI_OUT ${FORM_FILES})
message(MOC_OUT " : ${MOC_OUT}")
message(UI_OUT " : ${UI_OUT}")
set(ADDITION_COMPILE_FLAGS "-DMY_EXE")
set(ADDITION_LINK_FLAGS "")
if(MSVC)
set(ADDITION_COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}")
set(ADDITION_LINK_FLAGS "${ADDITION_LINK_FLAGS} /ENTRY:mainCRTStartup" )
set(_WIN32 "WIN32")
endif()
add_executable(main ${_WIN32} ${PRIVATE_HEADERS} ${SOURCES} ${MOC_OUT} ${UI_OUT})
##set_executable_output_postfix( my_project)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
set_target_properties( main PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
LINK_FLAGS "${ADDITION_LINK_FLAGS}"
ENABLE_EXPORTS FALSE
)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
message(Qt5Widgets_LIBRARIES ": ${Qt5Widgets_LIBRARIES}")
set(LIBRARIES ${Qt5Widgets_LIBRARIES} gui_basis kernel system)
target_link_libraries(main ${LIBRARIES})
<file_sep>/source/system/uuid.cpp
#include "system/uuid.h"
#include <boost/uuid/uuid_generators.hpp>
#include <boost/uuid/uuid_io.hpp>
namespace smeta3d
{
CUUID::CUUID(const std::string & strUUID)
{
FromStr(strUUID);
}
void CUUID::FromStr(const std::string & strUUID_)
{
boost::uuids::string_generator gen;
try
{
m_UUID = gen(strUUID_);
}
catch (...)
{
}
}
std::string CUUID::ToStr() const
{
std::string strUUID = boost::uuids::to_string(m_UUID);
return strUUID;
}
CUUID CUUID::Generate()
{
CUUID id;
id.m_UUID = boost::uuids::random_generator()();
return id;
}
}<file_sep>/source/main/Context.cpp
#include "Context.h"
#include <QResizeEvent>
#include <QPainter>
#include <QMainWindow>
#include <QStatusBar>
#include "kernel/ifc/ICore.h"
CContext::CContext(QWidget* pParent):
QWidget(pParent)
{
setAttribute(Qt::WA_PaintOnScreen);
m_TimerID_25 = startTimer( 0 );
m_Time.start();
countframe = 0;
sec = 0;
}
CContext::~CContext()
{
killTimer(m_TimerID_25);
}
//таймер для вызова отрисовки
void CContext::timerEvent(QTimerEvent *pEvent)
{
float fTimePerFrame = m_Time.restart();
smeta3d::SP_ICore ptrCore = smeta3d::GetSingltonCore();
if(ptrCore && ptrCore->IsInit())
ptrCore->Simulate(fTimePerFrame);
countframe++;
sec += fTimePerFrame;
if(sec > 1000.f)
{
QPainter p(this);
p.setBrush(QBrush(QColor(255,255,255)));
p.drawText(100,100,QString("%1").arg(countframe));
qobject_cast<QMainWindow*>(parentWidget())->statusBar()->showMessage(QString("%1").arg(countframe));
countframe = 0;
sec = 0;
}
}
void CContext::resizeEvent(QResizeEvent *evnt)
{
smeta3d::SP_ICore ptrCore = smeta3d::GetSingltonCore();
if(ptrCore && ptrCore->IsInit())
ptrCore->Resize(this->width(), this->height());
}
/*
void CContext::keyPressEvent(QKeyEvent *evn)
{
Core* pCore = GetSingltonCore();
if(!pCore || !pCore->IsInit())
return;
switch(evn->key())
{
case Qt::Key_Left:
{
if(evn->modifiers()==Qt::ShiftModifier)
pCore->SetEvent(Core::eTE_FastLeft);
else
pCore->SetEvent(Core::eTE_Left);
break;
}
case Qt::Key_Right:
{
if(evn->modifiers()==Qt::ShiftModifier)
pCore->SetEvent(Core::eTE_FastRight);
else
pCore->SetEvent(Core::eTE_Right);
break;
}
default: break;
}
QWidget::keyPressEvent(evn);
}
void CContext::keyReleaseEvent(QKeyEvent *evn)
{
Core* pCore = GetSingltonCore();
if(!pCore || !pCore->IsInit())
return;
pCore->SetEvent(Core::eTE_Unknow);
QWidget::keyReleaseEvent(evn);
}
bool CContext::event(QEvent *evn)
{
Core* pCore = GetSingltonCore();
if(!pCore || !pCore->IsInit())
return QWidget::event(evn);
QEvent::Type type = evn->type();
if(type != QEvent::KeyPress)
return QWidget::event(evn);
QKeyEvent* pEvent = dynamic_cast<QKeyEvent*>(evn);
switch(pEvent->key())
{
case Qt::Key_Left:
{
pCore->SetEvent(Core::eTE_Left);
break;
}
case Qt::Key_Right:
{
pCore->SetEvent(Core::eTE_Right);
break;
}
default: break;
}
return QWidget::event(evn);
}*/
<file_sep>/include/render/engine/ifc/IEngine.h
#ifndef IENGINE_H_
#define IENGINE_H_
#include <memory>
#include <windows.h>
namespace smeta3d
{
class IEngine
{
public:
~IEngine() {};
virtual bool Init(const HWND& HWnd) = 0;
virtual bool IsInit() const = 0;
virtual void DeInit() = 0;
virtual void Update(float fTimePerSec) = 0;
virtual void BeginRender() = 0;
virtual void Render() = 0;
virtual void EndRender() = 0;
virtual void Resize(int w, int h) = 0;
};
using SP_IEngine = std::shared_ptr<IEngine>;
SP_IEngine __declspec(dllexport) GetSingltonEngine();
}
#define DECL_PUBLIC_IENGINE \
bool Init(const HWND& HWnd); \
bool IsInit() const; \
void DeInit(); \
void Update(float fTimePerSec); \
void BeginRender(); \
void Render(); \
void EndRender(); \
void Resize(int w, int h);
#endif /* IENGINE_H_ */
<file_sep>/source/render/GLContext.cpp
#include "GLContext.h"
#include <GL/GL.h>
#include "GL/glext.h"
#include "GL/wglext.h"
struct SAppContext
{
SAppContext()
{
hdc = NULL;
context = NULL;
wglChoosePixelFormatARB = NULL;
}
HDC hdc;
HGLRC context;
PFNWGLCHOOSEPIXELFORMATARBPROC wglChoosePixelFormatARB;
};
GLContext::GLContext(const HWND& HWnd)
{
m_pAppContext = new SAppContext();
bool bOk = CreateContext(HWnd);
if(!bOk)
throw int(1);
}
GLContext::~GLContext()
{
delete m_pAppContext;
m_pAppContext = NULL;
}
bool GLContext::CreateContext(const HWND& HWnd)
{
if(m_pAppContext->context != NULL)
if(m_pAppContext->context != NULL)
{
wglMakeCurrent(m_pAppContext->hdc,NULL);
wglDeleteContext(m_pAppContext->context);
m_pAppContext->context = NULL;
}
// get new hdc
if(m_pAppContext->hdc != NULL)
{
ReleaseDC(HWnd, m_pAppContext->hdc);
m_pAppContext->hdc = NULL;
}
m_pAppContext->hdc = GetDC(HWnd);
if(m_pAppContext->hdc == NULL)
{
return 0;
}
// pixel format descriptor
PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), 1, PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER | PFD_SWAP_EXCHANGE,
PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 8, 0, PFD_MAIN_PLANE, 0, 0, 0, 0
};
// create simple opengl context
if(m_pAppContext->wglChoosePixelFormatARB == NULL) {
// choose pixel format
int pixelformat = ChoosePixelFormat(m_pAppContext->hdc,&pfd);
SetPixelFormat(m_pAppContext->hdc,pixelformat,&pfd);
// create simple context
HGLRC old_context = m_pAppContext->context;
m_pAppContext->context = wglCreateContext(m_pAppContext->hdc);
if(m_pAppContext->context == NULL) {
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->hdc = NULL;
return 0;
}
// share context
if(old_context && wglShareLists(old_context,m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// set current context
if(wglMakeCurrent(m_pAppContext->hdc,m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// get proc address
m_pAppContext->wglChoosePixelFormatARB = (PFNWGLCHOOSEPIXELFORMATARBPROC)wglGetProcAddress("wglChoosePixelFormatARB");
if(m_pAppContext->wglChoosePixelFormatARB == NULL) {
wglMakeCurrent(m_pAppContext->hdc,NULL);
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// destroy context
wglMakeCurrent(m_pAppContext->hdc,NULL);
wglDeleteContext(m_pAppContext->context);
m_pAppContext->context = NULL;
}
// attributes
GLint attributes[] = {
WGL_DRAW_TO_WINDOW_ARB, GL_TRUE,
WGL_ACCELERATION_ARB, WGL_FULL_ACCELERATION_ARB,
WGL_DOUBLE_BUFFER_ARB, GL_TRUE,
WGL_COLOR_BITS_ARB, 32,
WGL_DEPTH_BITS_ARB, 24,
WGL_STENCIL_BITS_ARB, 8,
0,
};
// choose pixel format
int pixelformat;
unsigned int num_formats;
m_pAppContext->wglChoosePixelFormatARB(m_pAppContext->hdc,attributes,NULL,1,&pixelformat,&num_formats);
if(num_formats == 0) pixelformat = ChoosePixelFormat(m_pAppContext->hdc,&pfd);
// set pixel format
SetPixelFormat(m_pAppContext->hdc,pixelformat,&pfd);
HGLRC old_context = m_pAppContext->context;
// create context
m_pAppContext->context = wglCreateContext(m_pAppContext->hdc);
if(m_pAppContext->context == NULL) {
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->hdc = NULL;
return 0;
}
// share context
if(old_context && wglShareLists(old_context,m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
// set current context
if(wglMakeCurrent(m_pAppContext->hdc,m_pAppContext->context) == 0) {
wglDeleteContext(m_pAppContext->context);
ReleaseDC(HWnd,m_pAppContext->hdc);
m_pAppContext->context = NULL;
m_pAppContext->hdc = NULL;
return 0;
}
GLExt::init();
}
void GLContext::Clear()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0,0,0,1);
}
void GLContext::Swap()
{
SwapBuffers(m_pAppContext->hdc);
}
void GLContext::Resize(int w, int h)
{
glViewport(0, 0,w,h);
}
<file_sep>/include/render/engine/ifc/IContext.h
#ifndef IContex_H_
#define IContex_H_
#include <windows.h>
namespace smeta3d
{
class IContext
{
public:
virtual ~IContext() {};
virtual void Clear() = 0;
virtual void Swap() = 0;
virtual void Resize(int w, int h) = 0;
};
}
#define DECL_PUBLIC_ICONTEXT \
void Clear(); \
void Swap(); \
void Resize(int w, int h);
#endif /* IContext */
<file_sep>/source/render/GLExt.h
#ifndef __GL_EXT_H__
#define __GL_EXT_H__
#ifdef _WIN32
#include <windows.h>
#endif
#include <GL/gl.h>
#include "GL/glext.h"
class GLExt {
GLExt();
public:
// initialize GLExt
static int init();
static int shutdown();
// check OpenGL status
static int isInitialized();
// OpenGL info
static const char *getVendor();
static const char *getRenderer();
static const char *getVersion();
static const char *getShadingLanguage();
// OpenGL extensions
static const char *getExtensions();
static int checkExtension(const char *extension);
// OpenGL multisample
static void setMultisample(int multisample);
static int getMultisample();
// OpenGL viewport
static void setViewport(int x,int y,int width,int height);
static void getViewport(int *viewport);
// OpenGL program
static void setProgramID(GLuint program_id);
static GLuint getProgramID();
// OpenGL textures
static void clearTextures();
static void deleteTexture(GLuint target,GLuint texture_id);
static void setTexture(int unit,GLuint target,GLuint texture_id);
static void setTextureUnit(int unit,GLuint target,GLuint texture_id);
static GLuint getTextureTarget(int unit);
static GLuint getTextureID(int unit);
// check OpenGL errors
static int error(GLenum result);
static int error();
};
/******************************************************************************\
*
* Extensions
*
\******************************************************************************/
// EXT texture filter anisotropic
#ifndef GL_EXT_texture_filter_anisotropic
#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE
#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF
#endif
// EXT texture compression s3tc
#ifndef GL_EXT_texture_compression_s3tc
#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
#endif
// EXT texture srgb
#ifndef GL_EXT_texture_sRGB
#define GL_COMPRESSED_SRGB_S3TC_DXT1_EXT 0x8C4C
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT 0x8C4D
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT 0x8C4E
#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT 0x8C4F
#endif
// OpenGL 1.2
extern PFNGLBLENDCOLORPROC glBlendColor;
extern PFNGLBLENDEQUATIONPROC glBlendEquation;
extern PFNGLDRAWRANGEELEMENTSPROC glDrawRangeElements;
extern PFNGLTEXIMAGE3DPROC glTexImage3D;
extern PFNGLTEXSUBIMAGE3DPROC glTexSubImage3D;
extern PFNGLCOPYTEXSUBIMAGE3DPROC glCopyTexSubImage3D;
// OpenGL 1.3
extern PFNGLACTIVETEXTUREPROC glActiveTexture;
extern PFNGLSAMPLECOVERAGEPROC glSampleCoverage;
extern PFNGLCOMPRESSEDTEXIMAGE3DPROC glCompressedTexImage3D;
extern PFNGLCOMPRESSEDTEXIMAGE2DPROC glCompressedTexImage2D;
extern PFNGLCOMPRESSEDTEXIMAGE1DPROC glCompressedTexImage1D;
extern PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glCompressedTexSubImage3D;
extern PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glCompressedTexSubImage2D;
extern PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glCompressedTexSubImage1D;
extern PFNGLGETCOMPRESSEDTEXIMAGEPROC glGetCompressedTexImage;
// OpenGL 1.4
extern PFNGLBLENDFUNCSEPARATEPROC glBlendFuncSeparate;
extern PFNGLMULTIDRAWARRAYSPROC glMultiDrawArrays;
extern PFNGLMULTIDRAWELEMENTSPROC glMultiDrawElements;
extern PFNGLPOINTPARAMETERFPROC glPointParameterf;
extern PFNGLPOINTPARAMETERFVPROC glPointParameterfv;
extern PFNGLPOINTPARAMETERIPROC glPointParameteri;
extern PFNGLPOINTPARAMETERIVPROC glPointParameteriv;
// OpenGL 1.5
extern PFNGLGENQUERIESPROC glGenQueries;
extern PFNGLDELETEQUERIESPROC glDeleteQueries;
extern PFNGLISQUERYPROC glIsQuery;
extern PFNGLBEGINQUERYPROC glBeginQuery;
extern PFNGLENDQUERYPROC glEndQuery;
extern PFNGLGETQUERYIVPROC glGetQueryiv;
extern PFNGLGETQUERYOBJECTIVPROC glGetQueryObjectiv;
extern PFNGLGETQUERYOBJECTUIVPROC glGetQueryObjectuiv;
extern PFNGLBINDBUFFERPROC glBindBuffer;
extern PFNGLDELETEBUFFERSPROC glDeleteBuffers;
extern PFNGLGENBUFFERSPROC glGenBuffers;
extern PFNGLISBUFFERPROC glIsBuffer;
extern PFNGLBUFFERDATAPROC glBufferData;
extern PFNGLBUFFERSUBDATAPROC glBufferSubData;
extern PFNGLGETBUFFERSUBDATAPROC glGetBufferSubData;
extern PFNGLMAPBUFFERPROC glMapBuffer;
extern PFNGLUNMAPBUFFERPROC glUnmapBuffer;
extern PFNGLGETBUFFERPARAMETERIVPROC glGetBufferParameteriv;
extern PFNGLGETBUFFERPOINTERVPROC glGetBufferPointerv;
// OpenGL 2.0
extern PFNGLBLENDEQUATIONSEPARATEPROC glBlendEquationSeparate;
extern PFNGLDRAWBUFFERSPROC glDrawBuffers;
extern PFNGLSTENCILOPSEPARATEPROC glStencilOpSeparate;
extern PFNGLSTENCILFUNCSEPARATEPROC glStencilFuncSeparate;
extern PFNGLSTENCILMASKSEPARATEPROC glStencilMaskSeparate;
extern PFNGLATTACHSHADERPROC glAttachShader;
extern PFNGLBINDATTRIBLOCATIONPROC glBindAttribLocation;
extern PFNGLCOMPILESHADERPROC glCompileShader;
extern PFNGLCREATEPROGRAMPROC glCreateProgram;
extern PFNGLCREATESHADERPROC glCreateShader;
extern PFNGLDELETEPROGRAMPROC glDeleteProgram;
extern PFNGLDELETESHADERPROC glDeleteShader;
extern PFNGLDETACHSHADERPROC glDetachShader;
extern PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray;
extern PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray;
extern PFNGLGETACTIVEATTRIBPROC glGetActiveAttrib;
extern PFNGLGETACTIVEUNIFORMPROC glGetActiveUniform;
extern PFNGLGETATTACHEDSHADERSPROC glGetAttachedShaders;
extern PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation;
extern PFNGLGETPROGRAMIVPROC glGetProgramiv;
extern PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog;
extern PFNGLGETSHADERIVPROC glGetShaderiv;
extern PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog;
extern PFNGLGETSHADERSOURCEPROC glGetShaderSource;
extern PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation;
extern PFNGLGETUNIFORMFVPROC glGetUniformfv;
extern PFNGLGETUNIFORMIVPROC glGetUniformiv;
extern PFNGLGETVERTEXATTRIBDVPROC glGetVertexAttribdv;
extern PFNGLGETVERTEXATTRIBFVPROC glGetVertexAttribfv;
extern PFNGLGETVERTEXATTRIBIVPROC glGetVertexAttribiv;
extern PFNGLGETVERTEXATTRIBPOINTERVPROC glGetVertexAttribPointerv;
extern PFNGLISPROGRAMPROC glIsProgram;
extern PFNGLISSHADERPROC glIsShader;
extern PFNGLLINKPROGRAMPROC glLinkProgram;
extern PFNGLSHADERSOURCEPROC glShaderSource;
extern PFNGLUSEPROGRAMPROC glUseProgram;
extern PFNGLUNIFORM1FPROC glUniform1f;
extern PFNGLUNIFORM2FPROC glUniform2f;
extern PFNGLUNIFORM3FPROC glUniform3f;
extern PFNGLUNIFORM4FPROC glUniform4f;
extern PFNGLUNIFORM1IPROC glUniform1i;
extern PFNGLUNIFORM2IPROC glUniform2i;
extern PFNGLUNIFORM3IPROC glUniform3i;
extern PFNGLUNIFORM4IPROC glUniform4i;
extern PFNGLUNIFORM1FVPROC glUniform1fv;
extern PFNGLUNIFORM2FVPROC glUniform2fv;
extern PFNGLUNIFORM3FVPROC glUniform3fv;
extern PFNGLUNIFORM4FVPROC glUniform4fv;
extern PFNGLUNIFORM1IVPROC glUniform1iv;
extern PFNGLUNIFORM2IVPROC glUniform2iv;
extern PFNGLUNIFORM3IVPROC glUniform3iv;
extern PFNGLUNIFORM4IVPROC glUniform4iv;
extern PFNGLUNIFORMMATRIX2FVPROC glUniformMatrix2fv;
extern PFNGLUNIFORMMATRIX3FVPROC glUniformMatrix3fv;
extern PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv;
extern PFNGLVALIDATEPROGRAMPROC glValidateProgram;
extern PFNGLVERTEXATTRIB1DPROC glVertexAttrib1d;
extern PFNGLVERTEXATTRIB1DVPROC glVertexAttrib1dv;
extern PFNGLVERTEXATTRIB1FPROC glVertexAttrib1f;
extern PFNGLVERTEXATTRIB1FVPROC glVertexAttrib1fv;
extern PFNGLVERTEXATTRIB1SPROC glVertexAttrib1s;
extern PFNGLVERTEXATTRIB1SVPROC glVertexAttrib1sv;
extern PFNGLVERTEXATTRIB2DPROC glVertexAttrib2d;
extern PFNGLVERTEXATTRIB2DVPROC glVertexAttrib2dv;
extern PFNGLVERTEXATTRIB2FPROC glVertexAttrib2f;
extern PFNGLVERTEXATTRIB2FVPROC glVertexAttrib2fv;
extern PFNGLVERTEXATTRIB2SPROC glVertexAttrib2s;
extern PFNGLVERTEXATTRIB2SVPROC glVertexAttrib2sv;
extern PFNGLVERTEXATTRIB3DPROC glVertexAttrib3d;
extern PFNGLVERTEXATTRIB3DVPROC glVertexAttrib3dv;
extern PFNGLVERTEXATTRIB3FPROC glVertexAttrib3f;
extern PFNGLVERTEXATTRIB3FVPROC glVertexAttrib3fv;
extern PFNGLVERTEXATTRIB3SPROC glVertexAttrib3s;
extern PFNGLVERTEXATTRIB3SVPROC glVertexAttrib3sv;
extern PFNGLVERTEXATTRIB4NBVPROC glVertexAttrib4Nbv;
extern PFNGLVERTEXATTRIB4NIVPROC glVertexAttrib4Niv;
extern PFNGLVERTEXATTRIB4NSVPROC glVertexAttrib4Nsv;
extern PFNGLVERTEXATTRIB4NUBPROC glVertexAttrib4Nub;
extern PFNGLVERTEXATTRIB4NUBVPROC glVertexAttrib4Nubv;
extern PFNGLVERTEXATTRIB4NUIVPROC glVertexAttrib4Nuiv;
extern PFNGLVERTEXATTRIB4NUSVPROC glVertexAttrib4Nusv;
extern PFNGLVERTEXATTRIB4BVPROC glVertexAttrib4bv;
extern PFNGLVERTEXATTRIB4DPROC glVertexAttrib4d;
extern PFNGLVERTEXATTRIB4DVPROC glVertexAttrib4dv;
extern PFNGLVERTEXATTRIB4FPROC glVertexAttrib4f;
extern PFNGLVERTEXATTRIB4FVPROC glVertexAttrib4fv;
extern PFNGLVERTEXATTRIB4IVPROC glVertexAttrib4iv;
extern PFNGLVERTEXATTRIB4SPROC glVertexAttrib4s;
extern PFNGLVERTEXATTRIB4SVPROC glVertexAttrib4sv;
extern PFNGLVERTEXATTRIB4UBVPROC glVertexAttrib4ubv;
extern PFNGLVERTEXATTRIB4UIVPROC glVertexAttrib4uiv;
extern PFNGLVERTEXATTRIB4USVPROC glVertexAttrib4usv;
extern PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer;
// OpenGL 2.1
extern PFNGLUNIFORMMATRIX2X3FVPROC glUniformMatrix2x3fv;
extern PFNGLUNIFORMMATRIX3X2FVPROC glUniformMatrix3x2fv;
extern PFNGLUNIFORMMATRIX2X4FVPROC glUniformMatrix2x4fv;
extern PFNGLUNIFORMMATRIX4X2FVPROC glUniformMatrix4x2fv;
extern PFNGLUNIFORMMATRIX3X4FVPROC glUniformMatrix3x4fv;
extern PFNGLUNIFORMMATRIX4X3FVPROC glUniformMatrix4x3fv;
// OpenGL 3.0
extern PFNGLCOLORMASKIPROC glColorMaski;
extern PFNGLGETBOOLEANI_VPROC glGetBooleani_v;
extern PFNGLGETINTEGERI_VPROC glGetIntegeri_v;
extern PFNGLENABLEIPROC glEnablei;
extern PFNGLDISABLEIPROC glDisablei;
extern PFNGLISENABLEDIPROC glIsEnabledi;
extern PFNGLBEGINTRANSFORMFEEDBACKPROC glBeginTransformFeedback;
extern PFNGLENDTRANSFORMFEEDBACKPROC glEndTransformFeedback;
extern PFNGLBINDBUFFERRANGEPROC glBindBufferRange;
extern PFNGLBINDBUFFERBASEPROC glBindBufferBase;
extern PFNGLTRANSFORMFEEDBACKVARYINGSPROC glTransformFeedbackVaryings;
extern PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glGetTransformFeedbackVarying;
extern PFNGLCLAMPCOLORPROC glClampColor;
extern PFNGLBEGINCONDITIONALRENDERPROC glBeginConditionalRender;
extern PFNGLENDCONDITIONALRENDERPROC glEndConditionalRender;
extern PFNGLVERTEXATTRIBIPOINTERPROC glVertexAttribIPointer;
extern PFNGLGETVERTEXATTRIBIIVPROC glGetVertexAttribIiv;
extern PFNGLGETVERTEXATTRIBIUIVPROC glGetVertexAttribIuiv;
extern PFNGLVERTEXATTRIBI1IPROC glVertexAttribI1i;
extern PFNGLVERTEXATTRIBI2IPROC glVertexAttribI2i;
extern PFNGLVERTEXATTRIBI3IPROC glVertexAttribI3i;
extern PFNGLVERTEXATTRIBI4IPROC glVertexAttribI4i;
extern PFNGLVERTEXATTRIBI1UIPROC glVertexAttribI1ui;
extern PFNGLVERTEXATTRIBI2UIPROC glVertexAttribI2ui;
extern PFNGLVERTEXATTRIBI3UIPROC glVertexAttribI3ui;
extern PFNGLVERTEXATTRIBI4UIPROC glVertexAttribI4ui;
extern PFNGLVERTEXATTRIBI1IVPROC glVertexAttribI1iv;
extern PFNGLVERTEXATTRIBI2IVPROC glVertexAttribI2iv;
extern PFNGLVERTEXATTRIBI3IVPROC glVertexAttribI3iv;
extern PFNGLVERTEXATTRIBI4IVPROC glVertexAttribI4iv;
extern PFNGLVERTEXATTRIBI1UIVPROC glVertexAttribI1uiv;
extern PFNGLVERTEXATTRIBI2UIVPROC glVertexAttribI2uiv;
extern PFNGLVERTEXATTRIBI3UIVPROC glVertexAttribI3uiv;
extern PFNGLVERTEXATTRIBI4UIVPROC glVertexAttribI4uiv;
extern PFNGLVERTEXATTRIBI4BVPROC glVertexAttribI4bv;
extern PFNGLVERTEXATTRIBI4SVPROC glVertexAttribI4sv;
extern PFNGLVERTEXATTRIBI4UBVPROC glVertexAttribI4ubv;
extern PFNGLVERTEXATTRIBI4USVPROC glVertexAttribI4usv;
extern PFNGLGETUNIFORMUIVPROC glGetUniformuiv;
extern PFNGLBINDFRAGDATALOCATIONPROC glBindFragDataLocation;
extern PFNGLGETFRAGDATALOCATIONPROC glGetFragDataLocation;
extern PFNGLUNIFORM1UIPROC glUniform1ui;
extern PFNGLUNIFORM2UIPROC glUniform2ui;
extern PFNGLUNIFORM3UIPROC glUniform3ui;
extern PFNGLUNIFORM4UIPROC glUniform4ui;
extern PFNGLUNIFORM1UIVPROC glUniform1uiv;
extern PFNGLUNIFORM2UIVPROC glUniform2uiv;
extern PFNGLUNIFORM3UIVPROC glUniform3uiv;
extern PFNGLUNIFORM4UIVPROC glUniform4uiv;
extern PFNGLTEXPARAMETERIIVPROC glTexParameterIiv;
extern PFNGLTEXPARAMETERIUIVPROC glTexParameterIuiv;
extern PFNGLGETTEXPARAMETERIIVPROC glGetTexParameterIiv;
extern PFNGLGETTEXPARAMETERIUIVPROC glGetTexParameterIuiv;
extern PFNGLCLEARBUFFERIVPROC glClearBufferiv;
extern PFNGLCLEARBUFFERUIVPROC glClearBufferuiv;
extern PFNGLCLEARBUFFERFVPROC glClearBufferfv;
extern PFNGLCLEARBUFFERFIPROC glClearBufferfi;
extern PFNGLGETSTRINGIPROC glGetStringi;
// OpenGL 3.1
extern PFNGLDRAWARRAYSINSTANCEDPROC glDrawArraysInstanced;
extern PFNGLDRAWELEMENTSINSTANCEDPROC glDrawElementsInstanced;
extern PFNGLTEXBUFFERPROC glTexBuffer;
extern PFNGLPRIMITIVERESTARTINDEXPROC glPrimitiveRestartIndex;
// OpenGL 3.2
extern PFNGLGETINTEGER64I_VPROC glGetInteger64i_v;
extern PFNGLGETBUFFERPARAMETERI64VPROC glGetBufferParameteri64v;
extern PFNGLFRAMEBUFFERTEXTUREPROC glFramebufferTexture;
// OpenGL 3.3
extern PFNGLVERTEXATTRIBDIVISORPROC glVertexAttribDivisor;
// OpenGL 4.0
extern PFNGLMINSAMPLESHADINGPROC glMinSampleShading;
extern PFNGLBLENDEQUATIONIPROC glBlendEquationi;
extern PFNGLBLENDEQUATIONSEPARATEIPROC glBlendEquationSeparatei;
extern PFNGLBLENDFUNCIPROC glBlendFunci;
extern PFNGLBLENDFUNCSEPARATEIPROC glBlendFuncSeparatei;
// ARB framebuffer object
extern PFNGLISRENDERBUFFERPROC glIsRenderbuffer;
extern PFNGLBINDRENDERBUFFERPROC glBindRenderbuffer;
extern PFNGLDELETERENDERBUFFERSPROC glDeleteRenderbuffers;
extern PFNGLGENRENDERBUFFERSPROC glGenRenderbuffers;
extern PFNGLRENDERBUFFERSTORAGEPROC glRenderbufferStorage;
extern PFNGLGETRENDERBUFFERPARAMETERIVPROC glGetRenderbufferParameteriv;
extern PFNGLISFRAMEBUFFERPROC glIsFramebuffer;
extern PFNGLBINDFRAMEBUFFERPROC glBindFramebuffer;
extern PFNGLDELETEFRAMEBUFFERSPROC glDeleteFramebuffers;
extern PFNGLGENFRAMEBUFFERSPROC glGenFramebuffers;
extern PFNGLCHECKFRAMEBUFFERSTATUSPROC glCheckFramebufferStatus;
extern PFNGLFRAMEBUFFERTEXTURE1DPROC glFramebufferTexture1D;
extern PFNGLFRAMEBUFFERTEXTURE2DPROC glFramebufferTexture2D;
extern PFNGLFRAMEBUFFERTEXTURE3DPROC glFramebufferTexture3D;
extern PFNGLFRAMEBUFFERRENDERBUFFERPROC glFramebufferRenderbuffer;
extern PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glGetFramebufferAttachmentParameteriv;
extern PFNGLGENERATEMIPMAPPROC glGenerateMipmap;
extern PFNGLBLITFRAMEBUFFERPROC glBlitFramebuffer;
extern PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glRenderbufferStorageMultisample;
extern PFNGLFRAMEBUFFERTEXTURELAYERPROC glFramebufferTextureLayer;
// ARB map buffer range
extern PFNGLMAPBUFFERRANGEPROC glMapBufferRange;
extern PFNGLFLUSHMAPPEDBUFFERRANGEPROC glFlushMappedBufferRange;
// ARB vertex array object
extern PFNGLBINDVERTEXARRAYPROC glBindVertexArray;
extern PFNGLDELETEVERTEXARRAYSPROC glDeleteVertexArrays;
extern PFNGLGENVERTEXARRAYSPROC glGenVertexArrays;
extern PFNGLISVERTEXARRAYPROC glIsVertexArray;
// ARB uniform buffer object
extern PFNGLGETUNIFORMINDICESPROC glGetUniformIndices;
extern PFNGLGETACTIVEUNIFORMSIVPROC glGetActiveUniformsiv;
extern PFNGLGETACTIVEUNIFORMNAMEPROC glGetActiveUniformName;
extern PFNGLGETUNIFORMBLOCKINDEXPROC glGetUniformBlockIndex;
extern PFNGLGETACTIVEUNIFORMBLOCKIVPROC glGetActiveUniformBlockiv;
extern PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glGetActiveUniformBlockName;
extern PFNGLUNIFORMBLOCKBINDINGPROC glUniformBlockBinding;
// ARB copy buffer
extern PFNGLCOPYBUFFERSUBDATAPROC glCopyBufferSubData;
// ARB blend func extended
extern PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glBindFragDataLocationIndexed;
// ARB draw elements base vertex
extern PFNGLDRAWELEMENTSBASEVERTEXPROC glDrawElementsBaseVertex;
extern PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glDrawRangeElementsBaseVertex;
extern PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glDrawElementsInstancedBaseVertex;
extern PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glMultiDrawElementsBaseVertex;
// ARB texture multisample
extern PFNGLTEXIMAGE2DMULTISAMPLEPROC glTexImage2DMultisample;
extern PFNGLTEXIMAGE3DMULTISAMPLEPROC glTexImage3DMultisample;
extern PFNGLGETMULTISAMPLEFVPROC glGetMultisamplefv;
extern PFNGLSAMPLEMASKIPROC glSampleMaski;
// ARB tessellation shader
extern PFNGLPATCHPARAMETERIPROC glPatchParameteri;
extern PFNGLPATCHPARAMETERFVPROC glPatchParameterfv;
// ARB debug output
extern PFNGLDEBUGMESSAGECONTROLARBPROC glDebugMessageControlARB;
extern PFNGLDEBUGMESSAGEINSERTARBPROC glDebugMessageInsertARB;
extern PFNGLDEBUGMESSAGECALLBACKARBPROC glDebugMessageCallbackARB;
extern PFNGLGETDEBUGMESSAGELOGARBPROC glGetDebugMessageLogARB;
// ARB compute shader
extern PFNGLDISPATCHCOMPUTEPROC glDispatchCompute;
extern PFNGLDISPATCHCOMPUTEINDIRECTPROC glDispatchComputeIndirect;
#endif /* __GL_EXT_H__ */
<file_sep>/source/render/engine/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB PRIVATE_HEADERS *.h)
file(GLOB PUBLIC_HEADERS ${CMAKE_SOURCE_DIR}/include/render/engine/ifc/*.h)
file(GLOB PRIVATE_GL_SYSTEM_HEADERS gl_render_system/*.h)
file(GLOB SOURCES_GL_SYSTEM_HEADERS gl_render_system/*.cpp)
file(GLOB SOURCES *.cpp)
list(SORT PRIVATE_HEADERS)
list(SORT SOURCES)
add_library(engine SHARED ${PUBLIC_HEADERS} ${PRIVATE_HEADERS} ${PRIVATE_GL_SYSTEM_HEADERS} ${SOURCES_GL_SYSTEM_HEADERS} ${SOURCES})
##set_executable_output_postfix( kernel)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
include_directories(${CMAKE_SOURCE_DIR}/source/render/engine)
set_target_properties( engine PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
)
#message(Qt5Widgets_LIBRARIES ": ${Qt5Widgets_LIBRARIES}")
set(LIBRARIES glew )
target_link_libraries(engine ${LIBRARIES})
<file_sep>/cmake/make_default_output_directory.cmake
##################################################################
#
# Create variables for configurations of output directory (LIBRARY, ARCHIVE, RUNTIME)
# Usage: make_default_output_directory(BUILD_DIR )
#
##################################################################
macro( make_default_output_directory BUILD_DIR )
if( CMAKE_CONFIGURATION_TYPES )
set(_conf_types ${CMAKE_CONFIGURATION_TYPES})
elseif(CMAKE_BUILD_TYPE)
set(_conf_types ${CMAKE_BUILD_TYPE})
endif()
if (UNIX)
set( _lib bin )
else()
set( _lib lib )
endif()
foreach(_conf_type ${_conf_types})
string(TOUPPER ${_conf_type} _conf_type_up)
if (UNIX)
string(REPLACE "//" "/" _build_dir_and_conf_type ${BUILD_DIR} ) # В UNIX конфигурации одиночный
else()
string(REPLACE "//" "/" _build_dir_and_conf_type ${BUILD_DIR}/${_conf_type} ) # ${BUILD_DIR}/${_conf_type}
endif()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${_conf_type_up} ${_build_dir_and_conf_type}/${_lib} )
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${_conf_type_up} ${_build_dir_and_conf_type}/lib )
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${_conf_type_up} ${_build_dir_and_conf_type}/bin )
# build path for output runtime
list (APPEND RUNTIME_OUTPUT_PATH ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_${_conf_type_up}} )
list (APPEND RUNTIME_OUTPUT_PATH_${_conf_type_up} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_${_conf_type_up}} )
list(REMOVE_DUPLICATES RUNTIME_OUTPUT_PATH_${_conf_type_up})
set (RUNTIME_OUTPUT_PATH_${_conf_type_up} ${RUNTIME_OUTPUT_PATH_${_conf_type_up}} CACHE INTERNAL "" FORCE )
endforeach()
list(REMOVE_DUPLICATES RUNTIME_OUTPUT_PATH)
set (RUNTIME_OUTPUT_PATH ${RUNTIME_OUTPUT_PATH} CACHE INTERNAL "" FORCE )
unset(_conf_type)
unset(_conf_types)
unset(_conf_type_up)
endmacro( make_default_output_directory )
<file_sep>/source/gui_basis/mainwindow.cpp
#include "gui_basis/mainwindow.h"
#include "ui_mainwindow.h"
CMainWindow::CMainWindow(QWidget *parent, Qt::WindowFlags flags)
: QMainWindow(parent, flags)
{
m_pUI = new Ui::MainWindow;
m_pUI->setupUi(this);
}
CMainWindow::~CMainWindow()
{
delete m_pUI;
}<file_sep>/source/tests/test_signals/CMakeLists.txt
######################################################################################
######################################################################################
file(GLOB PRIVATE_HEADERS *.h)
file(GLOB SOURCES *.cpp)
list(SORT PRIVATE_HEADERS)
list(SORT SOURCES)
#set(ADDITION_COMPILE_FLAGS "-DMY_EXE")
#set(ADDITION_LINK_FLAGS "")
#if(MSVC)
# set(ADDITION_COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}")
# set(ADDITION_LINK_FLAGS "${ADDITION_LINK_FLAGS} /ENTRY:mainCRTStartup" )
# set(_WIN32 "WIN32")
#endif()
add_executable(test_signals ${_WIN32} ${PRIVATE_HEADERS} ${SOURCES})
##set_executable_output_postfix( my_project)
#set_version_and_label( ${CURRENT_LIBRARY_NAME} )
set_target_properties( test_signals PROPERTIES
COMPILE_FLAGS "${ADDITION_COMPILE_FLAGS}"
LINK_FLAGS "${ADDITION_LINK_FLAGS}"
ENABLE_EXPORTS FALSE
)
include_directories(${Boost_INCLUDE_DIR})
#set(LIBRARIES ${Qt5Widgets_LIBRARIES} gui_basis kernel)
#target_link_libraries(test ${LIBRARIES})
| 8cf42029745d876291216a290f470b6b11b8f5d4 | [
"C",
"CMake",
"C++"
] | 36 | C++ | kupavcevdenis/smeta3d | 7add4b6bc5e1726047527b24eab19dfe174dded5 | dfc66d78593989655420e67c25e9cca4d3943ff9 |
refs/heads/master | <repo_name>elielzamora/6006<file_sep>/lec4/heap.h
/**
<NAME>
6.006 Intro to Algorithms
Header describing:
- Priority Queue ADT
- Heap functions
- Heapsort
*/
typedef struct {
int * heap
int size;
int cap;
} Heap;
typedef Heap PriorityQueue;
<file_sep>/lec1/notes.txt
8 modules:
- Alogorithmic Thinking
- Sorting and Trees
- Hashing
- Numerics
- Graphs
- Shortest Paths
- Dynamic Programming
- Advanced Topics
Peak Finding Problem (1d):
1 2 3 4 5 ... n
x x x x x x x
Problem:
-find A peak if it exists in the array A from 1 to n such that
a[x-1] <= a[x] >= a[x+1]
Solutions:
- Compare all elements O(n)
- Divide and Conquer O(lg(n))
- T(n) = T(n/2) + O(1) => O(lg(n))
Peak Finding Problem (2d):
- greater than surrounds
Solutions:
- Gready Ascent O(mn) -> O(n^2)
- Divide and Conquer
<file_sep>/lec4/heapsort.c
/**
<NAME>
6.006 Intro to Algorithms
Header describing:
- Priority Queue ADT
- Heap functions
- Heapsort
*/
typedef struct {
int * heap
int size;
} Heap;
typedeft struct{
int * array
int size;
} Array;
typedef Heap PriorityQueue;
int heapify (Heap h, int i);
int makeHeap(int * array, int size);
int parent(int i);
int left(int i);
int right(int i);
int getParent(Heap h, int i);
int getLeft(Heap h int i);
int getRight(Heap h, int i);
int assertMaxHeapProperty(Heap h);
int assertSorted(Array a);
Array heapsort(Heap heap);
int main (int argc, char ** argv){
}
/** Definitions */
int parent(int i){
return (i/2) -1
}
int left(int i){
return (i*2) -1
}
int right(int i){
return (i*2)
}
| e1742e3b9cc54e8323f686bab237a1027ee3693f | [
"C",
"Text"
] | 3 | C | elielzamora/6006 | dcef481ff975393c61f6f5952811b1651386dea6 | 3a041742a2f28fb693f66e6c2a96aa8ca25b6229 |
refs/heads/master | <repo_name>ShanilKoshitha/Blitz-Restaurant<file_sep>/Blitz.Services.OrderAPI/Messaging/AzureServiceBusConsumer.cs
using Azure.Messaging.ServiceBus;
using Blitz.MessageBus;
using Blitz.Services.OrderAPI.Messages;
using Blitz.Services.OrderAPI.Models;
using Blitz.Services.OrderAPI.Repository;
using Microsoft.Extensions.Configuration;
using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Blitz.Services.OrderAPI.Messaging
{
public class AzureServiceBusConsumer : IAzureServiceBusConsumer
{
private readonly string serviceBusConnectionString;
private readonly string checkOutMessageTopic;
private readonly string subscriptionCheckOut;
private readonly string orderPaymentProcessTopic;
private readonly string orderUpdatePaymentResultTopic;
private ServiceBusProcessor checkOutProcessor;
private ServiceBusProcessor orderUpdatePaymentProcessor;
private readonly IMessageBus _messageBus;
private readonly IConfiguration _configuration;
private readonly OrderRepository _orderRepository;
public AzureServiceBusConsumer(OrderRepository orderRepository, IConfiguration configuration, IMessageBus messageBus)
{
_orderRepository = orderRepository;
_configuration = configuration;
_messageBus = messageBus;
serviceBusConnectionString = _configuration.GetValue<string>("ServiceBusConnectionString");
checkOutMessageTopic = _configuration.GetValue<string>("CheckoutMessageTopic");
subscriptionCheckOut = _configuration.GetValue<string>("SubscriptionCheckOut");
orderPaymentProcessTopic = _configuration.GetValue<string>("OrderPaymentProcessTopic");
orderUpdatePaymentResultTopic = _configuration.GetValue<string>("OrderUpdatePaymentResultTopic");
var client = new ServiceBusClient(serviceBusConnectionString);
//we need to instantiate the checkout Processor
checkOutProcessor = client.CreateProcessor(checkOutMessageTopic);
orderUpdatePaymentProcessor = client.CreateProcessor(orderUpdatePaymentResultTopic, subscriptionCheckOut);
}
public async Task Start()
{
checkOutProcessor.ProcessMessageAsync += OnCheckOutMessageReceived;
checkOutProcessor.ProcessErrorAsync += ErrorHandler;
await checkOutProcessor.StartProcessingAsync();
orderUpdatePaymentProcessor.ProcessMessageAsync += OnOrderPaymentUpdateReceived;
orderUpdatePaymentProcessor.ProcessErrorAsync += ErrorHandler;
await orderUpdatePaymentProcessor.StartProcessingAsync();
}
public async Task Stop()
{
await checkOutProcessor.StopProcessingAsync();
await checkOutProcessor.DisposeAsync();
await orderUpdatePaymentProcessor.StopProcessingAsync();
await orderUpdatePaymentProcessor.DisposeAsync();
}
Task ErrorHandler(ProcessErrorEventArgs args)
{
Console.WriteLine(args.Exception.ToString());
return Task.CompletedTask;
}
private async Task OnCheckOutMessageReceived(ProcessMessageEventArgs args)
{
var message = args.Message;
var body = Encoding.UTF8.GetString(message.Body);
CheckoutHeaderDto checkoutHeaderDto = JsonConvert.DeserializeObject<CheckoutHeaderDto>(body);
OrderHeader orderHeader = new()
{
UserId = checkoutHeaderDto.UserId,
FirstName = checkoutHeaderDto.FirstName,
LastName = checkoutHeaderDto.LastName,
OrderDetails = new List<OrderDetails>(),
CardNumber = checkoutHeaderDto.CardNumber,
CouponCode = checkoutHeaderDto.CouponCode,
CVV = checkoutHeaderDto.CVV,
DiscountTotal = checkoutHeaderDto.DiscountTotal,
Email = checkoutHeaderDto.Email,
ExpiryMonthYear = checkoutHeaderDto.ExpiryMonthYear,
OrderTime = DateTime.Now,
OrderTotal = checkoutHeaderDto.OrderTotal,
PaymentStatus = false,
PhoneNumber = checkoutHeaderDto.PhoneNumber,
PickupDateTime = checkoutHeaderDto.PickupDateTime
};
foreach(var detailList in checkoutHeaderDto.CartDetails)
{
OrderDetails orderDetails = new()
{
ProductId = detailList.ProductId,
ProductName = detailList.Product.Name,
Price = detailList.Product.Price,
Count = detailList.Count
};
orderHeader.CartTotalItems += detailList.Count;
orderHeader.OrderDetails.Add(orderDetails);
}
await _orderRepository.AddOrder(orderHeader);
PaymentRequestMessage paymentRequestMessage = new()
{
Name = orderHeader.FirstName + " " + orderHeader.LastName,
CardNumber = orderHeader.CardNumber,
CVV = orderHeader.CVV,
ExpiryMonthYear = orderHeader.ExpiryMonthYear,
OrderId = orderHeader.OrderHeaderId,
OrderTotal = orderHeader.OrderTotal,
Email = orderHeader.Email
};
try
{
//Create a topic called orderpaymentprocesstopic make the max delivery count to be 3
await _messageBus.PublishMessage(paymentRequestMessage, orderPaymentProcessTopic);
await args.CompleteMessageAsync(args.Message);
}catch(Exception e)
{
var errorMessages = new List<string>() { e.ToString() };
Console.WriteLine(errorMessages);
}
}
private async Task OnOrderPaymentUpdateReceived(ProcessMessageEventArgs args)
{
var message = args.Message;
var body = Encoding.UTF8.GetString(message.Body);
UpdatePaymentResultMessage paymentResultMessage = JsonConvert.DeserializeObject<UpdatePaymentResultMessage>(body);
await _orderRepository.UpdateOrderPaymentStatus(paymentResultMessage.OrderId, paymentResultMessage.status);
await args.CompleteMessageAsync(args.Message);
}
}
}
<file_sep>/Blitz.Services.OrderAPI/Messages/PaymentRequestMessage.cs
using Blitz.MessageBus;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace Blitz.Services.OrderAPI.Messages
{
public class PaymentRequestMessage :BaseMessage
{
public int OrderId { get; set; }
public string Name { get; set; }
public long CardNumber { get; set; }
public int CVV { get; set; }
public string ExpiryMonthYear { get; set; }
public double OrderTotal { get; set; }
public string Email { get; set; }
}
}
<file_sep>/Blitz.Services.Identity/Services/IUserClaimPrincipalFactory.cs
namespace Blitz.Services.Identity.Services
{
internal interface IUserClaimPrincipalFactory<T>
{
}
}<file_sep>/Blitz.Services.Identity/SD.cs
using Duende.IdentityServer;
using Duende.IdentityServer.Models;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace Blitz.Services.Identity
{
public static class SD
{
public const string Admin = "Admin";
public const string Customer = "Customer";
public static IEnumerable<IdentityResource> IdentityResources =>
new List<IdentityResource>
{
new IdentityResources.OpenId(),
new IdentityResources.Email(),
new IdentityResources.Profile(),
};
public static IEnumerable<ApiScope> ApiScopes =>
new List<ApiScope>
{
new ApiScope("Blitz", "Blitz Server"),
new ApiScope(name:"read", displayName:"Read your Data"),
new ApiScope(name:"write", displayName:"Write your Data"),
new ApiScope(name:"delete", displayName:"Delete your Data")
};
public static IEnumerable<Client> Clients =>
new List<Client>
{
new Client
{
ClientId = "Client",
ClientSecrets = {new Secret("Secret".Sha256())},
AllowedGrantTypes = GrantTypes.ClientCredentials,
AllowedScopes = {"read","write","profile" }
},
new Client
{
ClientId = "Blitz",
ClientSecrets = {new Secret("Secret".Sha256())},
AllowedGrantTypes = GrantTypes.Code,
RedirectUris = { "https://localhost:44382/signin-oidc" },
PostLogoutRedirectUris = { "https://localhost:44382/signout-callback-oidc" },
AllowedScopes = new List<String>
{
IdentityServerConstants.StandardScopes.OpenId,
IdentityServerConstants.StandardScopes.Profile,
IdentityServerConstants.StandardScopes.Email,
"Blitz"
}
}
};
}
}
<file_sep>/Blitz.Web/Models/CartHeaderDto.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace Blitz.Web.Models
{
public class CartHeaderDto
{
public int CartHeaderId { get; set; }
public string UserId { get; set; }
public string CouponCode { get; set; }
public double OrderTotal { get; set; }
public double DiscountTotal { get; set; }
public string FirstName { get; set; }
public string LastName { get; set; }
public DateTime PickupDateTime { get; set; }
public long PhoneNumber{ get; set; }
public string Email { get; set; }
public long CardNumber { get; set; }
public int CVV { get; set; }
public string ExpiryMonthYear { get; set; }
}
}
<file_sep>/Blitz.MessageBus/AzureServiceBusMessageBus.cs
using Azure.Messaging.ServiceBus;
using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Blitz.MessageBus
{
public class AzureServiceBusMessageBus : IMessageBus
{
//needs to go into the appsetting file
private string connectionString = "Endpoint=sb://aszdwwertaskdsler.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=<KEY>;
public async Task PublishMessage(BaseMessage message, string topicName)
{
await using var client = new ServiceBusClient(connectionString);
ServiceBusSender sender = client.CreateSender(topicName);
var jsonMessage = JsonConvert.SerializeObject(message);
ServiceBusMessage finalMessage = new ServiceBusMessage(Encoding.UTF8.GetBytes(jsonMessage))
{
CorrelationId = Guid.NewGuid().ToString()
};
await sender.SendMessageAsync(finalMessage);
await client.DisposeAsync();
}
}
}
<file_sep>/Blitz.Services.Email/Messaging/AzureServiceBusConsumer.cs
using Azure.Messaging.ServiceBus;
using Blitz.Services.Email.Messages;
using Blitz.Services.Email.Repository;
using Microsoft.Extensions.Configuration;
using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Blitz.Services.Email.Messaging
{
public class AzureServiceBusConsumer : IAzureServiceBusConsumer
{
private readonly string serviceBusConnectionString;
private readonly string subscriptionEmail;
private readonly string orderUpdatePaymentResultTopic;
private ServiceBusProcessor orderUpdatePaymentProcessor;
private readonly IConfiguration _configuration;
private readonly EmailRepository _emailRepository;
public AzureServiceBusConsumer(EmailRepository emailRepository, IConfiguration configuration)
{
_emailRepository = emailRepository;
_configuration = configuration;
serviceBusConnectionString = _configuration.GetValue<string>("ServiceBusConnectionString");
subscriptionEmail = _configuration.GetValue<string>("SubscriptionName");
orderUpdatePaymentResultTopic = _configuration.GetValue<string>("OrderUpdatePaymentResultTopic");
var client = new ServiceBusClient(serviceBusConnectionString);
orderUpdatePaymentProcessor = client.CreateProcessor(orderUpdatePaymentResultTopic, subscriptionEmail);
}
public async Task Start()
{
orderUpdatePaymentProcessor.ProcessMessageAsync += OnOrderPaymentUpdateReceived;
orderUpdatePaymentProcessor.ProcessErrorAsync += ErrorHandler;
await orderUpdatePaymentProcessor.StartProcessingAsync();
}
public async Task Stop()
{
await orderUpdatePaymentProcessor.StopProcessingAsync();
await orderUpdatePaymentProcessor.DisposeAsync();
}
Task ErrorHandler(ProcessErrorEventArgs args)
{
Console.WriteLine(args.Exception.ToString());
return Task.CompletedTask;
}
private async Task OnOrderPaymentUpdateReceived(ProcessMessageEventArgs args)
{
var message = args.Message;
var body = Encoding.UTF8.GetString(message.Body);
UpdatePaymentResultMessage objMessage = JsonConvert.DeserializeObject<UpdatePaymentResultMessage>(body);
try
{
//Create a topic called orderpaymentprocesstopic make the max delivery count to be 3
await _emailRepository.SendAndLogEmail(objMessage);
await args.CompleteMessageAsync(args.Message);
}
catch (Exception e)
{
var errorMessages = new List<string>() { e.ToString() };
Console.WriteLine(errorMessages);
}
}
}
}
<file_sep>/README.md
# Blitz-Restaurant
This is an online order service application made for learning and testing Microservices architecture
This will eventually have a Gateway and Azure service bus to communicate (Still Implementing the services)
current architecture
| 2f61264df2bb88ea61a2d6332432410761d4f386 | [
"Markdown",
"C#"
] | 8 | C# | ShanilKoshitha/Blitz-Restaurant | b11c93ac5b6d265e675c1866657aa72eb970c43e | 00fc9ecd1bca5e66c63e47f469c05386f25d1518 |
refs/heads/master | <repo_name>datalink747/CinemaWorld<file_sep>/app/src/main/java/com/project/cinemaworld/FilmsFragment.java
package com.project.cinemaworld;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.android.volley.Request;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.android.volley.VolleyLog;
import com.android.volley.toolbox.JsonObjectRequest;
import com.project.cinemaworld.Model.AppController;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
public class FilmsFragment extends Fragment {
private ArrayList<ConstFilm> list_films=new ArrayList<ConstFilm>();
private RecyclerView mRecyclerView;
private static String TAG = FilmsFragment.class.getSimpleName();
private RecyclerView.Adapter mAdapter;
public static FilmsFragment newInstance() {
return new FilmsFragment();
}
public FilmsFragment() {
// Required empty public constructor
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
return inflater.inflate(R.layout.fragment_films, container, false);
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
mRecyclerView = (RecyclerView) view.findViewById(R.id.recyclerView);
// RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(getActivity());
LinearLayoutManager mLayoutManager = new LinearLayoutManager(getActivity());
mLayoutManager.setReverseLayout(true);
mLayoutManager.setStackFromEnd(true);
mRecyclerView.setLayoutManager(mLayoutManager);
mRecyclerView.setHasFixedSize(true);
if(list_films.isEmpty())
{
makeJsonObjectRequest();
}
else {
list_films.clear();
makeJsonObjectRequest();
}
}
/*
* get database from json
* */
/**
* Method to make json object request where json response starts wtih {
* */
private void makeJsonObjectRequest() {
JsonObjectRequest jsonObjReq = new JsonObjectRequest(Request.Method.GET,"http://"+getString(R.string.ip_adresse)+EndPonts.url_film, null, new Response.Listener<JSONObject>() {
@Override
public void onResponse(JSONObject response) {
Log.d(TAG, response.toString());
try {
// Parsing json object response
// response will be a json object
String success = response.getString("success");
String Date = response.getString("Date");
JSONArray service_clients = response.getJSONArray("list_films");
for (int i = 0; i < service_clients.length(); i++) {
JSONObject c = service_clients.getJSONObject(i);
int id_film = c.getInt("id_film");
String nom_film = c.getString("nom_film");
String synopsis_film = c.getString("synopsis_film");
String date_film = c.getString("date_film");
String realisateurs_film = c.getString("realisateurs_film");
String acteurs_film = c.getString("acteurs_film");
String genre_film = c.getString("genre_film");
String nationalite_film = c.getString("nationalite_film");
String bande_film = c.getString("bande_film");
String image_film = c.getString("image_film");
String salle_film = c.getString("nom_salle");
String horaires_film = c.getString("horaires");
String duree_film = c.getString("duree_film");
String prix_film = c.getString("prix_film");
ConstFilm item = new ConstFilm();
item.setId_film(id_film);
item.setNom_film(nom_film);
item.setSynopsis_film(synopsis_film);
item.setDate_sortie_film(date_film);
item.setRealisateurs_film(realisateurs_film);
item.setActeur_film(acteurs_film);
item.setGenre_film(genre_film);
item.setNationalite_film(nationalite_film);
item.setBonde_annonce_film(bande_film);
item.setHoraires(horaires_film);
item.setDuree_film(duree_film);
item.setPrix_film(prix_film);
item.setImage_film("http://"+getString(R.string.ip_adresse)+EndPonts.url_affiche+image_film);
item.setNom_salle(salle_film);
list_films.add(item);
Log.d("all menu 1: ", "> " + c);
Log.d("image film: ", "> " + "http://"+getString(R.string.ip_adresse)+EndPonts.url_affiche+image_film);
}
} catch (JSONException e) {
e.printStackTrace();
/* Toast.makeText(MainActivity.this,
"Error: " + e.getMessage(),
Toast.LENGTH_LONG).show();*/
}
mAdapter = new RecyclerAdaptateurFilm(getActivity(), list_films);
mRecyclerView.setAdapter(mAdapter);
// mRecyclerView.scrollToPosition(mAdapter.getItemCount()-1);
}
}, new Response.ErrorListener() {
@Override
public void onErrorResponse(VolleyError error) {
VolleyLog.d(TAG, "Error: " + error.getMessage());
/*Toast.makeText(MainActivity.this,
error.getMessage(), Toast.LENGTH_SHORT).show();*/
// hide the progress dialog
//hidepDialog();
}
});
// Adding request to request queue
AppController.getInstance().addToRequestQueue(jsonObjReq);
}
/*
* */
}
<file_sep>/app/src/main/java/com/project/cinemaworld/RecyclerAdaptateurFav.java
package com.project.cinemaworld;
import android.content.Context;
import android.content.Intent;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.TextView;
import com.squareup.picasso.Picasso;
import java.util.ArrayList;
/**
* Created by Soussi on 29/04/2016.
*/
public class RecyclerAdaptateurFav extends RecyclerView.Adapter<RecyclerAdaptateurFav.ServicesViewHolder> {
private Context context;
private ArrayList<ConstFilm> items;
private ArrayList<ConstFilm> list_films=new ArrayList<ConstFilm>();
private final OnItemLongClickListener listener;
public interface OnItemClickListener {
public void onItemClicked(int position);
}
public interface OnItemLongClickListener {
public boolean onItemLongClicked(int position);
}
public RecyclerAdaptateurFav(Context context, ArrayList<ConstFilm> items,OnItemLongClickListener listener) {
this.items = items;
this.context = context;
this.list_films=items;
this.listener = listener;
}
@Override
public int getItemCount() {
return items.size();
}
@Override
public ServicesViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = LayoutInflater.from(context)
.inflate(R.layout.list_item_card_fav, parent, false);
ServicesViewHolder viewHolder = new ServicesViewHolder(context, view);
return viewHolder;
}
@Override
public void onBindViewHolder(final ServicesViewHolder holder, final int position) {
final ConstFilm salle = items.get(position);
holder.nom_salle.setText(salle.getNom_film());
// ImageLoader class instance
Picasso.with(context).load(salle.getImage_film())
//.transform(new BlurTransformation(this))
//.transform(new ResizeTransformation(50))
//.resize(50,50)
//.fit().centerCrop()
.error(R.color.color_primary)
.fit().centerInside()
.into(holder.ivFlag);
/////////////////////////////////////////////////////////////////////////////////////////////////////
holder.itemView.setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
/*String id= String.valueOf(salle.getId_favoris());
Toast.makeText(context,id,Toast.LENGTH_SHORT).show();*/
listener.onItemLongClicked(salle.getId_favoris());
return true;
}
});
/////////////////////////////////////////////////////////////////////////////////////////////////////
}
public class ServicesViewHolder extends RecyclerView.ViewHolder {
private Context context;
public TextView nom_salle;
public ImageView ivFlag;
public ServicesViewHolder(final Context context, final View itemView) {
super(itemView);
this.context = context;
nom_salle = (TextView) itemView.findViewById(R.id.nom_fav);
ivFlag = (ImageView) itemView.findViewById(R.id.image_fav);
itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent aff_detail =new Intent(context,Detail.class);
int position = getPosition();
aff_detail.putExtra("pos", items.get(position));
context.startActivity(aff_detail);
}
});
}
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/ConstSallon.java
package com.project.cinemaworld;
import android.os.Parcel;
import android.os.Parcelable;
/**
* Created by Soussi on 29/04/2016.
*/
public class ConstSallon implements Parcelable {
int id_sallon;
String nom_salon;
Double longitude;
Double lattitude;
String image_salle;
String tel_salle;
String website_salle;
public ConstSallon() {
}
protected ConstSallon(Parcel in) {
id_sallon = in.readInt();
nom_salon = in.readString();
image_salle = in.readString();
longitude = in.readDouble();
lattitude = in.readDouble();
tel_salle = in.readString();
website_salle = in.readString();
}
public static final Creator<ConstSallon> CREATOR = new Creator<ConstSallon>() {
@Override
public ConstSallon createFromParcel(Parcel in) {
return new ConstSallon(in);
}
@Override
public ConstSallon[] newArray(int size) {
return new ConstSallon[size];
}
};
public int getId_sallon() {
return id_sallon;
}
public void setId_sallon(int id_sallon) {
this.id_sallon = id_sallon;
}
public String getNom_salon() {
return nom_salon;
}
public void setNom_salon(String nom_salon) {
this.nom_salon = nom_salon;
}
public String getTel_salle() {
return tel_salle;
}
public void setTel_salle(String tel_salle) {
this.tel_salle = tel_salle;
}
public String getWebsite_salle() {
return website_salle;
}
public void setWebsite_salle(String website_salle) {
this.website_salle = website_salle;
}
public Double getLongitude() {
return longitude;
}
public void setLongitude(Double longitude) {
this.longitude = longitude;
}
public Double getLattitude() {
return lattitude;
}
public void setLattitude(Double lattitude) {
this.lattitude = lattitude;
}
public String getImage_salle() {
return image_salle;
}
public void setImage_salle(String image_salle) {
this.image_salle = image_salle;
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeInt(id_sallon);
dest.writeString(nom_salon);
dest.writeString(image_salle);
dest.writeDouble(lattitude);
dest.writeDouble(longitude);
dest.writeString(tel_salle);
dest.writeString(website_salle);
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/ConstFilm.java
package com.project.cinemaworld;
import android.os.Parcel;
import android.os.Parcelable;
/**
* Created by Soussi on 29/04/2016.
*/
public class ConstFilm implements Parcelable {
int id_film,id_favoris;
String nom_film;
String synopsis_film;
String date_sortie_film;
String realisateurs_film;
String acteur_film;
String genre_film;
String nationalite_film;
String bonde_annonce_film;
String image_film;
String horaires;
String duree_film;
String prix_film;
public ConstFilm() {
}
protected ConstFilm(Parcel in) {
id_film = in.readInt();
nom_film = in.readString();
synopsis_film = in.readString();
date_sortie_film = in.readString();
realisateurs_film = in.readString();
acteur_film = in.readString();
genre_film = in.readString();
nationalite_film = in.readString();
bonde_annonce_film = in.readString();
image_film = in.readString();
nom_salle = in.readString();
horaires = in.readString();
duree_film = in.readString();
prix_film = in.readString();
id_favoris = in.readInt();
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeInt(id_film);
dest.writeString(nom_film);
dest.writeString(synopsis_film);
dest.writeString(date_sortie_film);
dest.writeString(realisateurs_film);
dest.writeString(acteur_film);
dest.writeString(genre_film);
dest.writeString(nationalite_film);
dest.writeString(bonde_annonce_film);
dest.writeString(image_film);
dest.writeString(nom_salle);
dest.writeString(horaires);
dest.writeString(duree_film);
dest.writeString(prix_film);
dest.writeInt(id_favoris);
}
@Override
public int describeContents() {
return 0;
}
public static final Creator<ConstFilm> CREATOR = new Creator<ConstFilm>() {
@Override
public ConstFilm createFromParcel(Parcel in) {
return new ConstFilm(in);
}
@Override
public ConstFilm[] newArray(int size) {
return new ConstFilm[size];
}
};
public String getNom_salle() {
return nom_salle;
}
public void setNom_salle(String nom_salle) {
this.nom_salle = nom_salle;
}
String nom_salle;
public int getId_film() {
return id_film;
}
public void setId_film(int id_film) {
this.id_film = id_film;
}
public String getNom_film() {
return nom_film;
}
public void setNom_film(String nom_film) {
this.nom_film = nom_film;
}
public String getSynopsis_film() {
return synopsis_film;
}
public void setSynopsis_film(String synopsis_film) {
this.synopsis_film = synopsis_film;
}
public String getDate_sortie_film() {
return date_sortie_film;
}
public void setDate_sortie_film(String date_sortie_film) {
this.date_sortie_film = date_sortie_film;
}
public String getRealisateurs_film() {
return realisateurs_film;
}
public void setRealisateurs_film(String realisateurs_film) {
this.realisateurs_film = realisateurs_film;
}
public int getId_favoris() {
return id_favoris;
}
public void setId_favoris(int id_favoris) {
this.id_favoris = id_favoris;
}
public String getHoraires() {
return horaires;
}
public void setHoraires(String horaires) {
this.horaires = horaires;
}
public String getDuree_film() {
return duree_film;
}
public void setDuree_film(String duree_film) {
this.duree_film = duree_film;
}
public String getPrix_film() {
return prix_film;
}
public void setPrix_film(String prix_film) {
this.prix_film = prix_film;
}
public String getActeur_film() {
return acteur_film;
}
public void setActeur_film(String acteur_film) {
this.acteur_film = acteur_film;
}
public String getGenre_film() {
return genre_film;
}
public void setGenre_film(String genre_film) {
this.genre_film = genre_film;
}
public String getNationalite_film() {
return nationalite_film;
}
public void setNationalite_film(String nationalite_film) {
this.nationalite_film = nationalite_film;
}
public String getBonde_annonce_film() {
return bonde_annonce_film;
}
public void setBonde_annonce_film(String bonde_annonce_film) {
this.bonde_annonce_film = bonde_annonce_film;
}
public String getImage_film() {
return image_film;
}
public void setImage_film(String image_film) {
this.image_film = image_film;
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/SallonsFragment.java
package com.project.cinemaworld;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.StaggeredGridLayoutManager;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.android.volley.Request;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.android.volley.VolleyLog;
import com.android.volley.toolbox.JsonObjectRequest;
import com.project.cinemaworld.Model.AppController;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
/**
* A simple {@link Fragment} subclass.
*/
public class SallonsFragment extends Fragment {
private ArrayList<ConstSallon> list_salle=new ArrayList<ConstSallon>();
private RecyclerView mRecyclerView1;
private static String TAG = SallonsFragment.class.getSimpleName();
private RecyclerView.Adapter mAdapter;
public SallonsFragment() {
// Required empty public constructor
}
public static SallonsFragment newInstance() {
return new SallonsFragment();
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
return inflater.inflate(R.layout.fragment_sallons, container, false);
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
mRecyclerView1 = (RecyclerView) view.findViewById(R.id.recyclerView_salle);
RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(getActivity());
// mRecyclerView1.setLayoutManager(layoutManager);
// mRecyclerView1.setHasFixedSize(true);
mRecyclerView1.setLayoutManager(new StaggeredGridLayoutManager(2,StaggeredGridLayoutManager.VERTICAL));
if(list_salle.isEmpty())
{
makeJsonObjectRequest();
}
else {
list_salle.clear();
makeJsonObjectRequest();
}
}
/**
* Method to make json object request where json response starts wtih {
* */
private void makeJsonObjectRequest() {
JsonObjectRequest jsonObjReq = new JsonObjectRequest(Request.Method.GET,
"http://"+getString(R.string.ip_adresse)+EndPonts.url_salle, null, new Response.Listener<JSONObject>() {
@Override
public void onResponse(JSONObject response) {
Log.d(TAG, response.toString());
try {
// Parsing json object response
// response will be a json object
String success = response.getString("success");
String Date = response.getString("Date");
JSONArray service_clients = response.getJSONArray("list_salles");
for (int i = 0; i < service_clients.length(); i++) {
JSONObject c = service_clients.getJSONObject(i);
int id_salle = c.getInt("id_salle");
String nom_salle = c.getString("nom_salle");
Double longitude_salle = c.getDouble("longitude_salle");
Double lattitude_salle = c.getDouble("lattitude_salle");
String image_salle = c.getString("image_salle");
String tel_salle = c.getString("tel_salle");
String siteweb_salle = c.getString("site_web_salle");
ConstSallon item = new ConstSallon();
item.setId_sallon(id_salle);
item.setNom_salon(nom_salle);
item.setLattitude(lattitude_salle);
item.setLongitude(longitude_salle);
item.setTel_salle(tel_salle);
item.setWebsite_salle(siteweb_salle);
item.setImage_salle("http://"+getString(R.string.ip_adresse)+EndPonts.url_affiche+image_salle);
list_salle.add(item);
Log.d("all salle 1: ", "> " + c);
}
} catch (JSONException e) {
e.printStackTrace();
/* Toast.makeText(MainActivity.this,
"Error: " + e.getMessage(),
Toast.LENGTH_LONG).show();*/
Log.d("Erreur all salle 1: ", "> " + e.getMessage());
}
mAdapter = new RecyclerAdaptateurSalle(getActivity(), list_salle);
mRecyclerView1.setAdapter(mAdapter);
}
}, new Response.ErrorListener() {
@Override
public void onErrorResponse(VolleyError error) {
VolleyLog.d(TAG, "Error: " + error.getMessage());
/*Toast.makeText(MainActivity.this,
error.getMessage(), Toast.LENGTH_SHORT).show();*/
// hide the progress dialog
//hidepDialog();
}
});
// Adding request to request queue
AppController.getInstance().addToRequestQueue(jsonObjReq);
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/Detail.java
package com.project.cinemaworld;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.AsyncTask;
import android.os.Bundle;
import android.support.design.widget.CollapsingToolbarLayout;
import android.support.design.widget.FloatingActionButton;
import android.support.design.widget.Snackbar;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.AppCompatButton;
import android.support.v7.widget.Toolbar;
import android.text.Html;
import android.text.SpannableStringBuilder;
import android.text.Spanned;
import android.text.method.LinkMovementMethod;
import android.text.style.ClickableSpan;
import android.util.Log;
import android.view.View;
import android.view.ViewTreeObserver;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.facebook.CallbackManager;
import com.facebook.FacebookCallback;
import com.facebook.FacebookException;
import com.facebook.FacebookSdk;
import com.facebook.appevents.AppEventsLogger;
import com.facebook.login.LoginManager;
import com.facebook.login.LoginResult;
import com.facebook.share.ShareApi;
import com.facebook.share.model.SharePhoto;
import com.facebook.share.model.SharePhotoContent;
import com.project.cinemaworld.login_facebook.PrefUtils;
import com.project.cinemaworld.login_facebook.User;
import com.squareup.picasso.Picasso;
import org.apache.http.NameValuePair;
import org.apache.http.message.BasicNameValuePair;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class Detail extends AppCompatActivity {
private Intent intent_recu;
private ConstFilm const1;
private TextView synapsis,date_sortie,realisateur,acteurs,gener,nationalite,salle_film,prix,horaire,duree;
private CollapsingToolbarLayout collapsingtoolbarlayout;
private ImageView affichafe;
private AppCompatButton play;
private FloatingActionButton partage,add_favoris;
private CallbackManager callbackManager;
private LoginManager loginManager;
private Bitmap image_btm;
private User user;
private String id_film,msg;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_detail_events);
FacebookSdk.sdkInitialize(getApplicationContext());
AppEventsLogger.activateApp(this);
user= PrefUtils.getCurrentUser(Detail.this);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar2);
toolbar.setNavigationIcon(R.drawable.retour2);
setSupportActionBar(toolbar);
getSupportActionBar().setDisplayShowHomeEnabled(true);
toolbar.setNavigationOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Detail.this.finish();
}
});
intent_recu=getIntent();
const1= (ConstFilm) intent_recu.getParcelableExtra("pos");
System.out.println("data recu :" + const1.getNom_film());
id_film= String.valueOf(const1.getId_film());
salle_film = (TextView)findViewById(R.id.salle_film);
synapsis = (TextView)findViewById(R.id.synapsis_film);
date_sortie = (TextView)findViewById(R.id.date_film);
realisateur = (TextView)findViewById(R.id.realisateur_film);
acteurs = (TextView)findViewById(R.id.acteur_film);
gener = (TextView)findViewById(R.id.gener_film);
nationalite = (TextView)findViewById(R.id.nationalite_film);
horaire = (TextView)findViewById(R.id.horaire_film);
duree = (TextView)findViewById(R.id.duree_film);
prix = (TextView)findViewById(R.id.prix_film);
affichafe = (ImageView) findViewById(R.id.backgroundImageView);
play = (AppCompatButton) findViewById(R.id.play_video);
partage =(FloatingActionButton)findViewById(R.id.partage);
add_favoris =(FloatingActionButton)findViewById(R.id.fab_favoris);
salle_film.setText(const1.getNom_salle());
synapsis.setText(const1.getSynopsis_film());
makeTextViewResizable(synapsis, 2, "Afficher Plus", true);
date_sortie.setText(const1.getDate_sortie_film());
realisateur.setText(const1.getRealisateurs_film());
acteurs.setText(const1.getActeur_film());
gener.setText(const1.getGenre_film());
nationalite.setText(const1.getNationalite_film());
horaire.setText(const1.getHoraires());
duree.setText(const1.getDuree_film());
prix.setText(const1.getPrix_film());
Picasso.with(Detail.this).load(const1.getImage_film())
.error(R.drawable.logo3)
.fit().centerInside()
.into(affichafe);
collapsingtoolbarlayout =(CollapsingToolbarLayout)findViewById(R.id.collapsing_toolbar);
collapsingtoolbarlayout.setTitle(const1.getNom_film());
play.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if(! const1.getBonde_annonce_film().isEmpty()) {
Intent gotovideo = new Intent(Detail.this, Youtube_activity_view.class);
gotovideo.putExtra("id_video", const1.getBonde_annonce_film());
gotovideo.putExtra("salle_film", const1.getNom_salle());
gotovideo.putExtra("nom_film", const1.getNom_film());
gotovideo.putExtra("gener_film", const1.getGenre_film());
gotovideo.putExtra("date_film", const1.getDate_sortie_film());
startActivity(gotovideo);
}
else
{
Snackbar.make(v, "Video non disponible !", Snackbar.LENGTH_LONG)
.setAction("Retour", new View.OnClickListener() {
@Override
public void onClick(View v) {
}
}).show();
}
}
});
// partage contents to facebook
partage.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
callbackManager = CallbackManager.Factory.create();
List<String> permissionNeeds = Arrays.asList("publish_actions");
//this loginManager helps you eliminate adding a LoginButton to your UI
loginManager = LoginManager.getInstance();
loginManager.logInWithPublishPermissions(Detail.this, permissionNeeds);
loginManager.registerCallback(callbackManager, new FacebookCallback<LoginResult>()
{
@Override
public void onSuccess(LoginResult loginResult)
{
////////////////////////////////////////////////////////////////////////////////////////////////////
new AsyncTask<Void,Void,Void>(){
@Override
protected Void doInBackground(Void... params) {
URL imageURL = null;
try {
imageURL = new URL(const1.getImage_film());
} catch (MalformedURLException e) {
e.printStackTrace();
}
try {
image_btm = BitmapFactory.decodeStream(imageURL.openConnection().getInputStream());
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
super.onPostExecute(aVoid);
SharePhoto photo = new SharePhoto.Builder()
.setBitmap(image_btm)
.setCaption(const1.getNom_film()+" #"+const1.getNom_salle().toString().trim())
.build();
SharePhotoContent content = new SharePhotoContent.Builder()
.addPhoto(photo)
.build();
ShareApi.share(content, null);
}
}.execute(); // end AsyncTask
} //end onSuccess
@Override
public void onCancel()
{
System.out.println("onCancel");
}
@Override
public void onError(FacebookException exception)
{
System.out.println("onError");
}
});
}
});//end fab_partage
// btn_add favoris
add_favoris.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
new add_favoris().execute();
}
});
}
@Override
protected void onActivityResult(int requestCode, int responseCode, Intent data)
{
super.onActivityResult(requestCode, responseCode, data);
callbackManager.onActivityResult(requestCode, responseCode, data);
}
// Ajoutez à chaque activité de longue durée
@Override
protected void onResume() {
super.onResume();
AppEventsLogger.activateApp(this);
}
// pour Android, vous devez également enregistrer la désactivation de l’app
@Override
protected void onPause() {
super.onPause();
AppEventsLogger.deactivateApp(this);
}
private class add_favoris extends AsyncTask<Void, Void, Void> {
@Override
protected Void doInBackground(Void... arg0) {
// Creating service handler class instance
ServiceHandler sh = new ServiceHandler();
List<NameValuePair> params = new ArrayList<NameValuePair>();
params.add(new BasicNameValuePair("id_facebook", user.facebookID));
params.add(new BasicNameValuePair("email_facebook", user.email));
params.add(new BasicNameValuePair("id_film", id_film.toString()));
params.add(new BasicNameValuePair("nom_film", const1.getNom_film()));
params.add(new BasicNameValuePair("nom_salle", const1.getNom_salle()));
params.add(new BasicNameValuePair("date_film", const1.getDate_sortie_film()));
params.add(new BasicNameValuePair("genre_film", const1.getGenre_film()));
params.add(new BasicNameValuePair("acteurs_film", const1.getActeur_film()));
params.add(new BasicNameValuePair("bande_film", const1.getBonde_annonce_film()));
params.add(new BasicNameValuePair("image_film", const1.getImage_film().substring(33)));
params.add(new BasicNameValuePair("nationalite_film", const1.getNationalite_film()));
params.add(new BasicNameValuePair("realisateurs_film", const1.getRealisateurs_film()));
params.add(new BasicNameValuePair("synopsis_film", const1.getSynopsis_film()));
params.add(new BasicNameValuePair("duree_film", const1.getDuree_film()));
params.add(new BasicNameValuePair("horaires", const1.getHoraires()));
params.add(new BasicNameValuePair("prix_film", const1.getPrix_film()));
// Making a request to url and getting response
String jsonStr = sh.makeServiceCall("http://" + getString(R.string.ip_adresse) + EndPonts.url_add_favoris, ServiceHandler.POST, params);
Log.d("Response: ", "> " + jsonStr);
if (jsonStr != null) {
try {
JSONObject jsonObj = new JSONObject(jsonStr);
msg= jsonObj.getString("message");
} catch (JSONException e) {
e.printStackTrace();
}
} else {
Log.e("ServiceHandler", "Couldn't get any data from the url");
}
return null;
}
@Override
protected void onPostExecute(Void result) {
super.onPostExecute(result);
Toast.makeText(Detail.this,msg,Toast.LENGTH_SHORT).show();
}
}
//textview show more
public static void makeTextViewResizable(final TextView tv, final int maxLine, final String expandText, final boolean viewMore) {
if (tv.getTag() == null) {
tv.setTag(tv.getText());
}
ViewTreeObserver vto = tv.getViewTreeObserver();
vto.addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() {
@SuppressWarnings("deprecation")
@Override
public void onGlobalLayout() {
ViewTreeObserver obs = tv.getViewTreeObserver();
obs.removeGlobalOnLayoutListener(this);
if (maxLine == 0) {
int lineEndIndex = tv.getLayout().getLineEnd(0);
String text = tv.getText().subSequence(0, lineEndIndex - expandText.length() + 1) + " " + expandText;
tv.setText(text);
tv.setMovementMethod(LinkMovementMethod.getInstance());
tv.setText(
addClickablePartTextViewResizable(Html.fromHtml(tv.getText().toString()), tv, maxLine, expandText,
viewMore), TextView.BufferType.SPANNABLE);
} else if (maxLine > 0 && tv.getLineCount() >= maxLine) {
int lineEndIndex = tv.getLayout().getLineEnd(maxLine - 1);
String text = tv.getText().subSequence(0, lineEndIndex - expandText.length() + 1) + " " + expandText;
tv.setText(text);
tv.setMovementMethod(LinkMovementMethod.getInstance());
tv.setText(
addClickablePartTextViewResizable(Html.fromHtml(tv.getText().toString()), tv, maxLine, expandText,
viewMore), TextView.BufferType.SPANNABLE);
} else {
int lineEndIndex = tv.getLayout().getLineEnd(tv.getLayout().getLineCount() - 1);
String text = tv.getText().subSequence(0, lineEndIndex) + " " + expandText;
tv.setText(text);
tv.setMovementMethod(LinkMovementMethod.getInstance());
tv.setText(
addClickablePartTextViewResizable(Html.fromHtml(tv.getText().toString()), tv, lineEndIndex, expandText,
viewMore), TextView.BufferType.SPANNABLE);
}
}
});
}
private static SpannableStringBuilder addClickablePartTextViewResizable(final Spanned strSpanned, final TextView tv,
final int maxLine, final String spanableText, final boolean viewMore) {
String str = strSpanned.toString();
SpannableStringBuilder ssb = new SpannableStringBuilder(strSpanned);
if (str.contains(spanableText)) {
ssb.setSpan(new ClickableSpan() {
@Override
public void onClick(View widget) {
if (viewMore) {
tv.setLayoutParams(tv.getLayoutParams());
tv.setText(tv.getTag().toString(), TextView.BufferType.SPANNABLE);
tv.invalidate();
makeTextViewResizable(tv, -1, "Retour", false);
} else {
tv.setLayoutParams(tv.getLayoutParams());
tv.setText(tv.getTag().toString(), TextView.BufferType.SPANNABLE);
tv.invalidate();
makeTextViewResizable(tv, 3, "Afficher Plus", true);
}
}
}, str.indexOf(spanableText), str.indexOf(spanableText) + spanableText.length(), 0);
}
return ssb;
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/Propos.java
package com.project.cinemaworld;
import android.annotation.TargetApi;
import android.content.Intent;
import android.os.Build;
import android.os.Bundle;
import android.support.design.widget.CollapsingToolbarLayout;
import android.support.design.widget.FloatingActionButton;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.view.View;
import android.widget.ImageView;
public class Propos extends AppCompatActivity {
private ImageView aff_propos;
private CollapsingToolbarLayout collapsingtoolbarlayout2;
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_propos);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
aff_propos=(ImageView)findViewById(R.id.backgroundaff);
FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.send_mail);
fab.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
Intent mailIntent = new Intent();
mailIntent.setAction(Intent.ACTION_SEND);
mailIntent.setType("message/rfc822");
mailIntent.putExtra(Intent.EXTRA_EMAIL, new String[] {""});
mailIntent.putExtra(Intent.EXTRA_SUBJECT, "contact");
mailIntent.putExtra(Intent.EXTRA_TEXT, "");
startActivity(Intent.createChooser(mailIntent, EndPonts.email_to));
}
});
aff_propos.setImageDrawable(getDrawable(R.drawable.them));
collapsingtoolbarlayout2 =(CollapsingToolbarLayout)findViewById(R.id.collapsing_toolbar2);
collapsingtoolbarlayout2.setTitle(getString(R.string.name_titre));
}
@Override
public void onBackPressed() {
Propos.this.finish();
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/Salon_filtre.java
package com.project.cinemaworld;
import android.app.ProgressDialog;
import android.content.Intent;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.widget.Toast;
import org.apache.http.NameValuePair;
import org.apache.http.message.BasicNameValuePair;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.List;
/*import org.apache.http.NameValuePair;
import org.apache.http.message.BasicNameValuePair;*/
/**
* Created by Soussi on 30/04/2016.
*/
public class Salon_filtre extends AppCompatActivity {
private ArrayList<ConstFilm> list_films = new ArrayList<ConstFilm>();
private RecyclerView mRecyclerView;
private static String TAG = FilmsFragment.class.getSimpleName();
private RecyclerView.Adapter mAdapter1;
private String nom_salle;
private Intent intent_recu;
private ConstSallon const1;
private JSONArray all_events = null;
private ProgressDialog pDialog;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_salon_filtre);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
mRecyclerView = (RecyclerView) findViewById(R.id.recyclerView_filtre);
RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(Salon_filtre.this);
mRecyclerView.setLayoutManager(layoutManager);
mRecyclerView.setHasFixedSize(true);
intent_recu = getIntent();
const1 = (ConstSallon) intent_recu.getParcelableExtra("pos2");
System.out.println("data recu :" + const1.getNom_salon());
nom_salle = const1.getNom_salon();
toolbar.setSubtitle(const1.getNom_salon());
if (list_films.isEmpty()) {
// makeJsonObjectRequest();
new GetEvants().execute();
} else {
list_films.clear();
// makeJsonObjectRequest();
new GetEvants().execute();
}
}
/**
* Async task class to get json by making HTTP call
* *//*
*/
private class GetEvants extends AsyncTask<Void, Void, Void> {
@Override
protected void onPreExecute() {
super.onPreExecute();
}
@Override
protected Void doInBackground(Void... arg0) {
// Creating service handler class instance
ServiceHandler sh = new ServiceHandler();
List<NameValuePair> params = new ArrayList<NameValuePair>();
params.add(new BasicNameValuePair("nom_salle", nom_salle));
// Making a request to url and getting response
String jsonStr = sh.makeServiceCall("http://" + getString(R.string.ip_adresse) + EndPonts.url_film_par_salle, ServiceHandler.GET, params);
Log.d("Response: ", "> " + jsonStr);
if (jsonStr != null) {
// list_item_des.clear();
try {
JSONObject jsonObj = new JSONObject(jsonStr);
all_events = jsonObj.getJSONArray("list_films");
Log.d("all categories: ", "> " + all_events);
for (int i = 0; i < all_events.length(); i++) {
JSONObject c = all_events.getJSONObject(i);
int id_film = c.getInt("id_film");
String nom_film = c.getString("nom_film");
String synopsis_film = c.getString("synopsis_film");
String date_film = c.getString("date_film");
String realisateurs_film = c.getString("realisateurs_film");
String acteurs_film = c.getString("acteurs_film");
String genre_film = c.getString("genre_film");
String nationalite_film = c.getString("nationalite_film");
String bande_film = c.getString("bande_film");
String image_film = c.getString("image_film");
String salle_film = c.getString("nom_salle");
String horaires_film = c.getString("horaires");
String duree_film = c.getString("duree_film");
String prix_film = c.getString("prix_film");
ConstFilm item = new ConstFilm();
item.setId_film(id_film);
item.setNom_film(nom_film);
item.setSynopsis_film(synopsis_film);
item.setDate_sortie_film(date_film);
item.setRealisateurs_film(realisateurs_film);
item.setActeur_film(acteurs_film);
item.setGenre_film(genre_film);
item.setNationalite_film(nationalite_film);
item.setBonde_annonce_film(bande_film);
item.setHoraires(horaires_film);
item.setDuree_film(duree_film);
item.setPrix_film(prix_film);
item.setImage_film("http://" + getString(R.string.ip_adresse) + EndPonts.url_affiche + image_film);
item.setNom_salle(salle_film);
list_films.add(item);
Log.d("all menu 1: ", "> " + c);
Log.d("image film: ", "> " + "http://" + getString(R.string.ip_adresse) + EndPonts.url_affiche + image_film);
}
} catch (JSONException e) {
e.printStackTrace();
}
} else {
Log.e("ServiceHandler", "Couldn't get any data from the url");
}
return null;
}
@Override
protected void onPostExecute(Void result) {
super.onPostExecute(result);
// Dismiss the progress dialog
mAdapter1 = new RecyclerAdaptateurFilm(Salon_filtre.this, list_films);
mRecyclerView.setAdapter(mAdapter1);
}
}//end asynctask
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.menu_salon, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
//noinspection SimplifiableIfStatement
if (id == R.id.action_siteweb) {
if (const1.getWebsite_salle().isEmpty()) {
// put your logic to launch call app here
Toast.makeText(Salon_filtre.this,"Site Web n'est pas Disponible !",Toast.LENGTH_SHORT).show();
}
else
{
final Intent webIntent = new Intent(Intent.ACTION_WEB_SEARCH, Uri.parse(const1.getWebsite_salle()));
startActivity(webIntent);
}
return true;
}
if (id == R.id.action_tel) {
if (const1.getTel_salle().isEmpty()) {
// put your logic to launch call app here
Toast.makeText(Salon_filtre.this,"N° Tél n'est pas Disponible !",Toast.LENGTH_SHORT).show();
}
else
{
Intent dialIntent = new Intent(Intent.ACTION_DIAL, Uri.parse("tel:"+const1.getTel_salle()));
startActivity(dialIntent);
}
return true;
}
return super.onOptionsItemSelected(item);
}
}
<file_sep>/app/src/main/java/com/project/cinemaworld/FavorisFragment.java
package com.project.cinemaworld;
import android.annotation.TargetApi;
import android.content.Intent;
import android.graphics.Bitmap;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.design.widget.FloatingActionButton;
import android.support.v4.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.facebook.login.widget.ProfilePictureView;
import com.project.cinemaworld.login_facebook.CircleTransform;
import com.project.cinemaworld.login_facebook.PrefUtils;
import com.project.cinemaworld.login_facebook.User;
import com.squareup.picasso.Picasso;
/**
* A simple {@link Fragment} subclass.
*/
/**
* Created by Soussi on 06/05/2016.
*/
public class FavorisFragment extends Fragment {
private ImageView image_user,image_profil;
private FloatingActionButton goto_favoris;
private TextView name_user_f,email_user_f;
private User user;
private Bitmap bitmap,bitmapcover;
private ProfilePictureView profilePictureView;
private LinearLayout layout_fav;
public FavorisFragment() {
// Required empty public constructor
}
public static FavorisFragment newInstance() {
return new FavorisFragment();
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
return inflater.inflate(R.layout.fragment_favoris, container, false);
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
image_profil =(ImageView)view.findViewById(R.id.image_profil);
image_user =(ImageView)view.findViewById(R.id.image_users);
goto_favoris =(FloatingActionButton)view.findViewById(R.id.gotofavori);
email_user_f =(TextView)view.findViewById(R.id.email_user_f);
name_user_f =(TextView)view.findViewById(R.id.name_user_f);
layout_fav = (LinearLayout)view.findViewById(R.id.layout_fav);
layout_fav.setBackground(getActivity().getDrawable(R.drawable.affiche_fav33));
user= PrefUtils.getCurrentUser(getActivity());
name_user_f.setText(user.facebookname);
email_user_f.setText(user.email);
Picasso.with(getActivity()).load("https://graph.facebook.com/" +user.facebookID + "/picture?type=large")
.transform(new CircleTransform())
.error(R.drawable.icon_acteur)
.into(image_user);
/* Picasso.with(getActivity()).load("https://graph.facebook.com/" +user.facebookID + "?fields=cover&access_token=")
.transform(new CircleTransform())
.error(R.color.color_accent)
.into(image_profil);*/
goto_favoris.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent goto_favoris =new Intent(getActivity(),Listsfavoris.class);
startActivity(goto_favoris);
}
});
// fetching facebook's profile picture
/*new AsyncTask<Void,Void,Void>(){
@Override
protected Void doInBackground(Void... params) {
URL imageURL = null;
URL imageURLcover = null;
try {
imageURL = new URL("https://graph.facebook.com/" +user.facebookID + "/picture?type=large");
imageURLcover = new URL("https://graph.facebook.com/" +user.facebookID + "/fields=cover");
} catch (MalformedURLException e) {
e.printStackTrace();
}
try {
bitmap = BitmapFactory.decodeStream(imageURL.openConnection().getInputStream());
bitmapcover = BitmapFactory.decodeStream(imageURLcover.openConnection().getInputStream());
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
super.onPostExecute(aVoid);
// image_user.setImageBitmap(bitmap);
image_profil.setImageBitmap(bitmapcover);
}
}.execute();*/
}
}
| 174d50c3babab405596b8d829441343183807a7d | [
"Java"
] | 9 | Java | datalink747/CinemaWorld | 07aa90e20b211dde4026812329feb0c5d1386816 | af561f7a8cda32f7af2e41e2eac008e2ce255ee7 |
refs/heads/main | <file_sep>package models;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class HeroTest {
@Before
public void setUp() throws Exception {
Hero.clearAllHeroes();
}
@After
public void tearDown() throws Exception {
Hero.clearAllHeroes();
}
@Test
public void NewHeroObject_true() throws Exception {
Hero hero = new Hero("<NAME>",5,"great leader","love");
assertEquals (true, hero instanceof Hero);
};
@Test
public void HeroInstantiatesWithHeroName_true() throws Exception{
Hero hero = new Hero ("<NAME>",5,"great leader","love");
assertEquals("<NAME>",hero.getName());
}
@Test
public void AllHeroesAreCorrectlyReturned_true(){
Hero hero = new Hero ("<NAME>",5,"great leader","love");
Hero otherHero = new Hero ("<NAME>",5,"great leader","love");
assertEquals(2,Hero.getAll().size());
}
@Test
public void AllHeroesContainsAllHeroes_true() {
Hero hero = new Hero ("<NAME>",5,"great leader","love");
Hero otherHero = new Hero ("<NAME>",5,"great leader","love");
assertTrue(Hero.getAll().contains(hero));
assertTrue(Hero.getAll().contains(otherHero));
}
@Test
public void getId_HeroesInstantiateWithAnID_1() throws Exception{
Hero.clearAllHeroes();
Hero myHero = new Hero ("<NAME>",5,"great leader","love");
assertEquals(1, myHero.getId());
}
private Hero setupNewHero() {
return new Hero ("<NAME>",5,"great leader","love");
}
@Test
public void findReturnsCorrectHero() throws Exception {
Hero hero = setupNewHero();
assertEquals(1, hero.findById(hero.getId()).getId());
}
@Test
public void findReturnsCorrectHeroWhenMoreThanOneHeroesExists() throws Exception {
Hero hero = setupNewHero();
Hero myHero = new Hero ("<NAME>",5,"great leader","love");
assertEquals(2, Hero.findById(myHero.getId()).getId());
}
@Test
public void updateChangesPostContent() throws Exception {
Hero hero = setupNewHero();
String formerSpecialPower = hero.getSpecialPower();
int formerId = hero.getId();
hero.update("kenyatta",5,"kenyan leader","progress");
assertEquals(formerId, hero.getId());
assertNotEquals(formerSpecialPower, hero.getSpecialPower());
}
@Test
public void deleteDeletesASpecificHero() throws Exception {
Hero hero = setupNewHero();
Hero myHero = new Hero ("<NAME>",5,"great leader","love");
hero.deleteHero();
assertEquals(1, Hero.getAll().size()); //one is left
assertEquals(Hero.getAll().get(0).getId(), 2);
}
}<file_sep># Hero squad
This project was generated with [Intellij idea]
#### By <NAME>
## Description
a web application that show cases heros and there talents .
## Setup/Installation Requirements
* create an empty git repository
* git init
* git clone https://github.com/lelemoyog/core-IPs-4.git
* and start using code
## Technologies Used
* gradle
* java
* spark
## Support and contact details
For any support contact me via email <EMAIL>
### License
[MIT License](License).
Copyright (c) {2021} Saitemu Issa. | 13fde837b7d364463ce4658212c217554b37fff4 | [
"Markdown",
"Java"
] | 2 | Java | lelemoyog/core-IPs-4 | 9fad69151c0f88f45dbddb673a6887e62e968703 | c2a41dde760a7dceccd9b25d80fc79652b799e99 |
refs/heads/main | <file_sep>def calculateCost(slope, intercept,data):
totalcost=0
for x in data:
hypothesis= slope * x[0]+ intercept
difference= (hypothesis - x[1]) **2
totalcost=totalcost+difference
totalcost= totalcost* (1/(2 *len(data)))
return totalcost
def calculateSlope(slope, yintercept,data):
totalcost=0
for x in data:
hypothesis=slope*x[0]+yintercept
difference= (hypothesis -x[1]) *x[0]
totalcost=totalcost+difference
totalcost=totalcost*.01*(1/len(data))
slope=slope-totalcost
return slope
def calculateIntercept(slope,yIntercept,data):
totalcost=0
for x in data:
hypothesis=slope*x[0]+yIntercept
difference=(hypothesis-x[1])
totalcost=totalcost+difference
totalcost=totalcost * .01* (1/len(data))
yIntercept=yIntercept-totalcost
return yIntercept
#create dataset
data=[(0.5,1.4),(2.3,1.9),(2.9,3.2)]
#print (data)
slope=0
yInt=1
cost=calculateCost(slope,yInt,data)
previouscost=cost+1
print(cost)
while previouscost>cost and abs(cost-previouscost)>0.01:
slope=calculateSlope(slope,yInt,data)
yInt=calculateIntercept(slope,yInt,data)
previouscost=cost
cost=calculateCost(slope,yInt,data)
print ("Cost = " +str(cost))
print ("Slope = " +str(slope))
print ("Intercept = " +str(yInt))
| 436b0c97dfe26eaf58185c99e2a78608df167ab9 | [
"Python"
] | 1 | Python | Jubayer-Hossain-Abir-404/Calculation-Of-Cost-Slope-And-Intercept | 0f3eaf2906f010071fc8fc241ae091464b971c68 | 755e0593976b0738bc31d85b00bf911b7964da9e |
refs/heads/main | <repo_name>Tadej23/ProductivityModules<file_sep>/DataModel/ProfessionCategory.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblProfessionCategory")]
public class ProfessionCategory
{
public int ProfessionCategoryID { get; set; }
public string Title { get; set; }
}
}
<file_sep>/DataModel/Property.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblProperty")]
public class Property
{
public int PropertyID { get; set; }
public string Name { get; set; }
}
}
<file_sep>/DataModel/ArtWorkAuthor.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblArtWorkAuthor")]
public class ArtWorkAuthor
{
public int AuthorType { get; set; }
}
}
<file_sep>/DataModel/SameWork.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblSameWork")]
public class SameWork
{
}
}
<file_sep>/DataModel/Program.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Microsoft.EntityFrameworkCore;
namespace DataModel
{
class Program
{
static void Main(string[] args)
{
using (var db = new DataModelContext())
{
// Create and save a new Person
Console.Write("Enter a name for a new Person: ");
var name = Console.ReadLine();
var person = new Person { PersonID = 1 };
db.Person.Add(person);
db.SaveChanges();
// Display all Blogs from the database
var query = from b in db.Person
orderby b.Name
select b;
Console.WriteLine("All persons in the database:");
foreach (var item in query)
{
Console.WriteLine(item.Name);
}
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
}
}
public class DataModelContext : DbContext
{
//public const string ConnectionString = "SERVER=1
public DbSet<Person> Person { get; set; }
/* public DbSet<ArtWorks> ArtWorks { get; set; }
public DbSet<Citations> Citations { get; set; }
public DbSet<CitationSources> CitationSources { get; set; }
public DbSet<PatentAuthor> PatentAuthor { get; set; }
public DbSet<Patents> Patents { get; set; }
public DbSet<PersonProfession> PersonProfession { get; set; }
public DbSet<PersonProfiles> PersonProfiles { get; set; }
public DbSet<ProfessionCategory> ProfessionCategory { get; set; }
public DbSet<Profiles> Profiles { get; set; }
public DbSet<ProjectPerson> ProjectPerson { get; set; }
public DbSet<ProjectType> ProjectType { get; set; }
public DbSet<PublicationAuthor> PublicationAuthor { get; set; }
public DbSet<Publications> Publications { get; set; }
public DbSet<SameWork> SameWork { get; set; }
public DbSet<Projects> Projects { get; set; }*/
protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
{
optionsBuilder.UseSqlServer(@"Server=.\SQLEXPRESS;Database=ResearchDB;Trusted_Connection=True;");
//Data Source=DESKTOP-EBQTRNL\SQLEXPRESS;Integrated Security=True;Connect Timeout=30;Encrypt=False;TrustServerCertificate=False;ApplicationIntent=ReadWrite;MultiSubnetFailover=False
}
}
}
<file_sep>/DataModel/ArtWorks.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblArtWorks")]
public class ArtWorks
{
public int ArtWorkID { get; set; }
}
}
<file_sep>/DataModel/ProjectPerson.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblProjectPerson")]
public class ProjectPerson
{
public bool IsLeader { get; set; }
}
}
<file_sep>/DataModel/PersonProfiles.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblPersonProfiles")]
public class PersonProfiles
{
}
}
<file_sep>/DataModel/Publications.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblPublications")]
public class Publications
{
public int PublicationId { get; set; }
public int Typology { get; set; }
}
}
<file_sep>/DataModel/PersonProfession.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table ("tblPersonProfession")]
public class PersonProfession
{
}
}
<file_sep>/DataModel/ProjectType.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblProjectType")]
public class ProjectType
{
public int ProjectTypeID { get; set; }
public string Name { get; set; }
public int Description { get; set; }
}
}
<file_sep>/DataModel/PublicationAuthor.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblPublicationAuthor")]
public class PublicationAuthor
{
public int AuthorType { get; set; }
}
}
<file_sep>/DataModel/PatentAuthor.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblPatentAuthor")]
public class PatentAuthor
{
public int AuthorType { get; set; }
}
}
<file_sep>/DataModel/Patents.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblPatents")]
public class Patents
{
public int PatentID { get; set; }
}
}
<file_sep>/DataModel/Person.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
//[Table("tblPerson")]
public class Person
{
public int PersonID { get; set; }
public string Name { get; set; }
public string LastName { get; set; }
public int BirthYear { get; set; }
public bool Gender { get; set; }
}
}
<file_sep>/DataModel/Projects.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblProjects")]
public class Projects
{
public int ProjectID { get; set; }
public string Title { get; set; }
public int BudgetSize { get; set; }
public string Description { get; set; }
//Foreign key
public int TypeID { get; set; }
}
}
<file_sep>/DataModel/Citations.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table ("tblCitations")]
public class Citations
{
}
}
<file_sep>/DataModel/Profiles.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel.DataAnnotations.Schema;
namespace DataModel
{
[Table("tblProfiles")]
public class Profiles
{
}
}
| adb67b5e63d34b659d4d32b40e8e9b5d55a9cc32 | [
"C#"
] | 18 | C# | Tadej23/ProductivityModules | 638ae9dd816862dbb2c083b34adee1f74645605e | d83be23910686fac72325b1b485e2fc6cb042519 |
refs/heads/main | <file_sep>import Icon from './icon.js'
export default function SocialMedia(props = {}) {
let linkedin = props.linkedin || ''
return `
<div class="display-flex">
${linkedin
? `
<a
href="https://linkedin.com/in/${linkedin}"
target="_blank"
rel="noopener"
>
${Icon({
class: 'margin-right-18 fill-979797 fill-hover-058AEA transition-fill',
style: 'width:1.25rem;height:1.25rem;',
href: 'linkedin'
})}
</a>
`
: ''
}
</div>
`
} | c363bb157a01145318d3a72c1922f455ab69b1cb | [
"JavaScript"
] | 1 | JavaScript | Aiden007700/begin-personal-website | ce454dad2f3bbdad36e661d529234e204392825a | 92a595bd9008f13b87b67a257d310d300f0c25f5 |
refs/heads/master | <file_sep>cryptography >= 2.5
hypothesis # tests
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <errno.h>
#include <limits.h>
#include <openssl/crypto.h>
#include <security/pam_ext.h>
#include <security/pam_modules.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <time.h>
#include "base64.h"
#include "config.h"
#include "login.h"
#include "ui.h"
#define MODULE_NAME "pam_glome"
#define MAX_ERROR_MESSAGE_SIZE 4095
#define UNUSED(var) (void)(var)
static const char *arg_value(const char *arg, const char *key,
const char *default_value) {
int i, key_len = strlen(key);
for (i = 0; i < key_len; i++) {
// Compare key with arg char by char while also allowing _ in place of -
if (!(key[i] == arg[i] || (key[i] == '-' && arg[i] == '_'))) {
return NULL;
}
}
if (arg[key_len] == '=') {
return arg + key_len + 1;
}
if (arg[key_len] == '\0') {
return default_value;
}
return NULL;
}
static int parse_pam_args(pam_handle_t *pamh, int argc, const char **argv,
glome_login_config_t *config) {
int errors = 0;
status_t status;
const char *val;
for (int i = 0; i < argc; ++i) {
if ((val = arg_value(argv[i], "config-path", NULL))) {
status = glome_login_assign_config_option(config, "default",
"config-path", val);
} else if ((val = arg_value(argv[i], "key", NULL))) {
status = glome_login_assign_config_option(config, "service", "key", val);
} else if ((val = arg_value(argv[i], "key-version", NULL))) {
status = glome_login_assign_config_option(config, "service",
"key-version", val);
} else if ((val = arg_value(argv[i], "prompt", NULL))) {
status =
glome_login_assign_config_option(config, "service", "prompt", val);
} else if ((val = arg_value(argv[i], "debug", "true"))) {
status =
glome_login_assign_config_option(config, "default", "verbose", val);
} else if ((val = arg_value(argv[i], "print-secrets", "true"))) {
status = glome_login_assign_config_option(config, "default",
"print-secrets", val);
} else if ((val = arg_value(argv[i], "host-id", NULL))) {
status =
glome_login_assign_config_option(config, "default", "host-id", val);
} else if ((val = arg_value(argv[i], "host-id-type", NULL))) {
status = glome_login_assign_config_option(config, "default",
"host-id-type", val);
} else if ((val = arg_value(argv[i], "ephemeral-key", NULL))) {
status = glome_login_assign_config_option(config, "default",
"ephemeral-key", val);
} else if ((val = arg_value(argv[i], "min-authcode-len", NULL))) {
status = glome_login_assign_config_option(config, "default",
"min-authcode-len", val);
} else {
pam_syslog(pamh, LOG_ERR, "invalid option %s", argv[i]);
errors++;
continue;
}
if (status != STATUS_OK) {
pam_syslog(pamh, LOG_ERR, "failed to set config option '%s': %s", argv[i],
status);
status_free(status);
errors++;
}
}
return errors > 0 ? -1 : 0;
}
static int get_username(pam_handle_t *pamh, glome_login_config_t *config,
const char **error_tag) {
const char *username;
if (pam_get_user(pamh, &username, NULL) != PAM_SUCCESS || !username ||
!*username) {
return failure(EXITCODE_PANIC, error_tag, "get-username");
}
config->username = username;
return 0;
}
void login_error(glome_login_config_t *config, pam_handle_t *pamh,
const char *format, ...) {
UNUSED(config);
char message[MAX_ERROR_MESSAGE_SIZE] = {0};
va_list argptr;
va_start(argptr, format);
int ret = vsnprintf(message, sizeof(message), format, argptr);
va_end(argptr);
if (ret < 0 || ret >= MAX_ERROR_MESSAGE_SIZE) {
return;
}
struct pam_message msg[1] = {
{.msg = message, .msg_style = PAM_ERROR_MSG},
};
const struct pam_message *pmsg[1] = {&msg[0]};
struct pam_response *resp = NULL;
struct pam_conv *conv;
if (pam_get_item(pamh, PAM_CONV, (const void **)&conv) != PAM_SUCCESS) {
return;
}
if (conv->conv(1, pmsg, &resp, conv->appdata_ptr) != PAM_SUCCESS) {
return;
}
if (resp != NULL) {
free(resp->resp);
free(resp);
}
}
void login_syslog(glome_login_config_t *config, pam_handle_t *pamh,
int priority, const char *format, ...) {
UNUSED(config);
va_list argptr;
va_start(argptr, format);
pam_vsyslog(pamh, priority, format, argptr);
va_end(argptr);
}
int login_prompt(glome_login_config_t *config, pam_handle_t *pamh,
const char **error_tag, const char *message, char *input,
size_t input_size) {
UNUSED(config);
struct pam_message msg[1] = {
{.msg = message, .msg_style = PAM_TEXT_INFO},
};
const struct pam_message *pmsg[1] = {&msg[0]};
struct pam_response *resp = NULL;
struct pam_conv *conv;
if (pam_get_item(pamh, PAM_CONV, (const void **)&conv) != PAM_SUCCESS) {
return failure(EXITCODE_PANIC, error_tag, "pam-get-conv");
}
if (conv->conv(1, pmsg, &resp, conv->appdata_ptr) != PAM_SUCCESS) {
return failure(EXITCODE_PANIC, error_tag, "pam-conv");
}
if (resp != NULL) {
free(resp->resp);
free(resp);
}
const char *token;
if (pam_get_authtok(pamh, PAM_AUTHTOK, &token, NULL) != PAM_SUCCESS) {
return failure(EXITCODE_PANIC, error_tag, "pam-get-authtok");
}
if (strlen(token) >= input_size) {
return failure(EXITCODE_PANIC, error_tag, "pam-authtok-size");
}
// OpenSSH provides fake password when login is not allowed,
// for example due to PermitRootLogin set to 'no'
// https://github.com/openssh/openssh-portable/commit/283b97
const char fake_password[] =
<PASSWORD>"; // auth-pam.c from OpenSSH
bool is_fake = true;
// Constant-time comparison in case token contains user's password
for (size_t i = 0; i < strlen(token); i++) {
is_fake &= (token[i] == fake_password[i % (sizeof(fake_password) - 1)]);
}
if (is_fake) {
return failure(EXITCODE_PANIC, error_tag, "pam-authtok-openssh-no-login");
}
strncpy(input, token, input_size);
return 0;
}
int pam_sm_authenticate(pam_handle_t *pamh, int flags, int argc,
const char **argv) {
UNUSED(flags);
const char *error_tag = NULL;
glome_login_config_t config = {0};
int rc = PAM_AUTH_ERR;
// Parse arguments to initialize the config path.
int r = parse_pam_args(pamh, argc, argv, &config);
if (r < 0) {
pam_syslog(pamh, LOG_ERR, "failed to parse pam module arguments (%d)", r);
return rc;
}
// Reset config while preserving the config path.
const char *config_path = config.config_path;
default_config(&config);
config.config_path = config_path;
// Read configuration file.
status_t status = glome_login_parse_config_file(&config);
if (status != STATUS_OK) {
pam_syslog(pamh, LOG_ERR, "failed to read config file %s: %s",
config.config_path, status);
return rc;
}
// Parse arguments again to override config values.
r = parse_pam_args(pamh, argc, argv, &config);
if (r < 0) {
pam_syslog(pamh, LOG_ERR, "failed to parse pam module arguments (%d)", r);
return rc;
}
r = get_username(pamh, &config, &error_tag);
if (r < 0) {
pam_syslog(pamh, LOG_ERR, "failed to get username: %s (%d)", error_tag, r);
return rc;
}
r = login_authenticate(&config, pamh, &error_tag);
if (!r) {
rc = PAM_SUCCESS;
if (config.options & VERBOSE) {
pam_syslog(pamh, LOG_ERR, "authenticated user '%s'", config.username);
}
} else {
pam_syslog(pamh, LOG_ERR, "failed to authenticate user '%s': %s (%d)",
config.username, error_tag, r);
}
return rc;
}
int pam_sm_setcred(pam_handle_t *pamh, int flags, int argc, const char **argv) {
/* This module does not generate any user credentials, so just skip. */
UNUSED(pamh);
UNUSED(flags);
UNUSED(argc);
UNUSED(argv);
return PAM_SUCCESS;
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package login
import (
"encoding/hex"
"fmt"
"strings"
"testing"
"github.com/google/glome/go/glome"
)
var serviceKeyIDs = []uint8{1, 0}
type testVector struct {
kap []byte
ka []byte
kbp []byte
kb []byte
ks []byte
prefix byte
hostIDType string
hostID string
action string
msg []byte
url string
prefixN []byte
tag []byte
token string
}
func fatal(reason string, t *testing.T, testName string, tv int) {
t.Fatalf("%s failed for test vector %d. %s", testName, tv, reason)
}
type keyPair struct {
priv glome.PrivateKey
pub glome.PublicKey
}
func decodeString(t *testing.T, s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
t.Fatalf("Invalid hexadecimal string %v.", s)
}
return b
}
func keys(t *testing.T, kp []byte, k []byte) *keyPair {
aPriv, err := glome.PrivateKeyFromSlice(kp)
if err != nil {
t.Fatalf("PrivateKeyFromSlice failed: %v", err)
}
aPub, err := glome.PublicKeyFromSlice(k)
if err != nil {
t.Fatalf("PublicKeyFromSlice failed: %v", err)
}
return &keyPair{*aPriv, *aPub}
}
func (tv *testVector) dialog(t *testing.T) (*glome.Dialog, *glome.Dialog) {
clientKP := keys(t, tv.kap, tv.ka)
serverKP := keys(t, tv.kbp, tv.kb)
sending, err := clientKP.priv.Exchange(&serverKP.pub)
if err != nil {
t.Fatalf("Client key exchange failed: %v", err)
}
receiving, err := serverKP.priv.Exchange(&clientKP.pub)
if err != nil {
t.Fatalf("Server key exchange failed: %v", err)
}
return sending, receiving
}
func testVectors(t *testing.T) []testVector {
return []testVector{
{
kap: decodeString(t, "<KEY>"),
ka: decodeString(t, "8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a"),
kbp: decodeString(t, "<KEY>"),
kb: decodeString(t, "<KEY>"),
ks: decodeString(t, "4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742"),
prefix: byte(1),
hostIDType: "",
hostID: "my-server.local",
action: "shell/root",
msg: []byte("my-server.local/shell/root"),
url: "v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/",
prefixN: decodeString(t, "d0f59d0b17cb155a1b9cd2b5cdea3a17f37a200e95e3651af2c88e1c5fc8108e"),
tag: decodeString(t, "<KEY>"),
token: "<PASSWORD>",
},
{
kap: decodeString(t, "<PASSWORD>"),
ka: decodeString(t, "872f435bb8b89d0e3ad62aa2e511074ee195e1c39ef6a88001418be656e3c376"),
kbp: decodeString(t, "b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d"),
kb: decodeString(t, "d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647"),
ks: decodeString(t, "<KEY>"),
prefix: byte(0x51),
hostIDType: "serial-number",
hostID: "1234567890=ABCDFGH/#?",
action: "reboot",
msg: []byte("serial-number:1234567890=ABCDFGH/#?/reboot"),
url: "v1/UYcvQ1u4uJ0OOtYqouURB07hleHDnvaogAFBi-ZW48N2/serial-number:1234567890=ABCDFGH%2F%23%3F/reboot/",
prefixN: decodeString(t, "dff5aae753a8bdce06038a20adcdb26c7be19cb6bd05a7850fae542f4af29720"),
tag: decodeString(t, "06476f1f314b06c7f96e5dc62b2308268cbdb6140aefeeb55940731863032277"),
token: "<KEY>
},
}
}
func clientsAndServers(t *testing.T, tvs []testVector) ([]Client, []Server) {
clientTagsLen := []uint{2, 0}
keyPairs := make([][]keyPair, len(tvs))
for i, tv := range tvs {
keyPairs[i] = append(keyPairs[i], *keys(t, tv.kap, tv.ka), *keys(t, tv.kbp, tv.kb))
}
var clients []Client
var servers []Server
for tv := 0; tv < len(tvs); tv++ {
clients = append(clients, *NewClient(keyPairs[tv][1].pub, keyPairs[tv][0].priv, serviceKeyIDs[tv], clientTagsLen[tv]))
sPrivKey := keyPairs[tv][1].priv
servers = append(servers,
Server{func(u uint8) (*glome.PrivateKey, error) {
return &sPrivKey, nil
}})
}
return clients, servers
}
func parsedResponses(t *testing.T, tvs []testVector) []URLResponse {
_, servers := clientsAndServers(t, testVectors(t))
var parsedResponses []URLResponse
for i, tv := range tvs {
t.Run("Test vector "+fmt.Sprint(i+1), func(t *testing.T) {
resp, err := servers[i].ParseURLResponse(tv.url)
if err != nil {
fatal(fmt.Sprintf("Expected: parsed URL, got error: %#v.", err.Error()), t, "parsedResponses", i+1)
}
parsedResponses = append(parsedResponses, *resp)
})
}
return parsedResponses
}
func TestURLParsedCorrectly(t *testing.T) {
tvs := testVectors(t)
responses := parsedResponses(t, tvs)
for i, tv := range tvs {
t.Run("Test vector "+fmt.Sprint(i+1), func(t *testing.T) {
// Check message parsed correctly
msg := responses[i].Msg
for _, m := range []struct {
expected string
got string
}{
{expected: tv.hostIDType, got: msg.HostIDType},
{expected: tv.hostID, got: msg.HostID},
{expected: tv.action, got: msg.Action},
} {
if m.expected != m.got {
fatal(fmt.Sprintf("Expected: %#v, got: %#v.", m.expected, m.got), t, "TestURLParsedCorrectly", i+1)
}
}
})
}
}
func TestServerToken(t *testing.T) {
tvs := testVectors(t)
responses := parsedResponses(t, tvs)
for i, tv := range tvs {
t.Run("Test vector "+fmt.Sprint(i+1), func(t *testing.T) {
if !(strings.HasPrefix(responses[i].EncToken(), tv.token)) {
fatal(fmt.Sprintf("The tags are different: expected %#v, got %#v.", tv.token, responses[i].EncToken()),
t, "TestServerToken", i+1)
}
})
}
}
func TestURLResponseConstruction(t *testing.T) {
tvs := testVectors(t)
clients, _ := clientsAndServers(t, tvs)
for i, tv := range tvs {
t.Run("Test vector "+fmt.Sprint(i+1), func(t *testing.T) {
resp, err := clients[i].Construct(1, tv.hostIDType, tv.hostID, tv.action)
if err != nil {
fatal(fmt.Sprintf("Error while constructing URL: %s.", err.Error()), t, "TestURLResponseConstruction", i+1)
}
if resp != tv.url {
fatal(fmt.Sprintf("The URLs are different: expected %#v, got %#v.", tv.url, resp), t, "TestURLResponseConstruction", i+1)
}
eq, err := clients[i].ValidateAuthCode(tv.token)
if err != nil {
fatal(fmt.Sprintf("Error while validating authorization code: %s.", err.Error()), t, "TestURLResponseConstruction", i+1)
}
if !eq {
fatal("The tokens are different.", t, "TestURLResponseConstruction", i+1)
}
})
}
}
<file_sep>package v2
import (
"bytes"
"encoding/base64"
"errors"
"net/url"
"strings"
"github.com/google/glome/go/glome"
)
// Message represents the context required for authorization.
type Message struct {
HostIDType string // type of identity
HostID string // identity of the target (e.g. hostname, serial number, etc.)
Action string // action that is being authorized
}
// escape a URI path minimally according to RFD001.
func escape(s string) string {
res := url.PathEscape(s)
for _, c := range "!*'();:@&=+$,[]" {
st := string(c)
res = strings.Replace(res, url.PathEscape(st), st, -1)
}
return res
}
// Encode the message into its URI path representation.
func (m *Message) Encode() string {
sb := &strings.Builder{}
if len(m.HostIDType) > 0 {
sb.WriteString(escape(m.HostIDType))
sb.WriteByte(':')
}
sb.WriteString(escape(m.HostID))
sb.WriteByte('/')
sb.WriteString(escape(m.Action))
return sb.String()
}
func decodeMessage(s string) (*Message, error) {
m := &Message{}
subs := strings.Split(s, "/")
if len(subs) != 2 {
return nil, errors.New("message format error")
}
hostSegment, err := url.PathUnescape(subs[0])
if err != nil {
return nil, err
}
hostParts := strings.SplitN(hostSegment, ":", 2)
if len(hostParts) > 1 {
m.HostIDType = hostParts[0]
m.HostID = hostParts[1]
} else {
m.HostID = hostParts[0]
}
action, err := url.PathUnescape(subs[1])
if err != nil {
return nil, err
}
m.Action = action
return m, nil
}
type handshake struct {
Index uint8
Prefix *byte
PublicKey *glome.PublicKey
MessageTagPrefix []byte
}
func (h *handshake) Encode() string {
data := bytes.NewBuffer(nil)
if h.Prefix != nil {
data.WriteByte(*h.Prefix)
} else {
data.WriteByte(1<<7 | h.Index)
}
data.Write(h.PublicKey[:])
data.Write(h.MessageTagPrefix)
return base64.URLEncoding.EncodeToString(data.Bytes())
}
func decodeHandshake(s string) (*handshake, error) {
data, err := base64.URLEncoding.DecodeString(s)
if err != nil {
return nil, err
}
if len(data) < 33 {
return nil, errors.New("handshake too short")
}
h := &handshake{}
if data[0]>>7 == 0 { // check Prefix-type
h.Prefix = &data[0]
} else {
h.Index = data[0] % (1 << 7)
}
key, err := glome.PublicKeyFromSlice(data[1 : glome.PublicKeySize+1])
if err != nil {
return nil, err
}
h.PublicKey = key
msgTagPrefix := data[glome.PublicKeySize+1:]
if len(msgTagPrefix) > glome.MaxTagSize {
return nil, errors.New("message tag prefix too long")
}
if len(msgTagPrefix) > 0 {
h.MessageTagPrefix = msgTagPrefix
}
return h, nil
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include "commands.h"
#include "glome.h"
#define GLOME_CLI_MAX_MESSAGE_LENGTH 4095
#define UNUSED(var) (void)(var)
static const char *kUsage =
"Usage: \n"
" To generate a new keypair\n"
" umask 077\n"
" %s genkey | tee PRIVATE-KEY-FILE | %s pubkey >PUBLIC-KEY-FILE\n\n"
" To generate a tag:\n"
" %s tag --key PRIVATE-KEY-FILE --peer PEER-KEY-FILE "
"[--counter COUNTER] <MESSAGE-FILE\n\n"
" To verify a tag:\n"
" %s verify --key PRIVATE-KEY-FILE --peer PEER-KEY-FILE --tag TAG "
"[--counter COUNTER] <MESSAGE-FILE\n\n"
" To generate a tag for a glome-login challenge:\n"
" %s login --key PRIVATE-KEY-FILE <KEY>"
"Jjga9OukqY6qm05q0PU=/my-server.local/shell/root/\n";
static int print_help(int argc, char **argv) {
UNUSED(argc);
UNUSED(argv);
fprintf(stderr, kUsage, argv[0], argv[0], argv[0], argv[0], argv[0]);
return EXIT_SUCCESS;
}
typedef int (*mainfunc)(int argc, char **argv);
struct cmd {
const char *name;
mainfunc run;
};
// cmds maps subcommand names to their implementation. The last entry with a
// NULL name is executed when the subcommand given by the user is not found.
static const struct cmd cmds[] = {
{"genkey", &genkey}, {"pubkey", &pubkey}, {"tag", &tag},
{"verify", &verify}, {"login", &login}, {NULL, &print_help},
};
int main(int argc, char **argv) {
if (argc == 0) {
fputs("called with empty argv\n", stderr);
return EXIT_FAILURE;
}
if (argc < 2) {
return print_help(argc, argv);
}
// Traverse the known subcommands until the requested subcommand is found, or
// until we hit the catch-all without a name.
const struct cmd *c = cmds;
while (c->name && strcmp(c->name, argv[1])) {
c++;
}
return c->run(argc, argv);
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glome.h"
#include <glib.h>
#include <stdint.h>
#include <stdio.h>
static void decode_hex(uint8_t *dst, const char *in) {
size_t len = strlen(in);
size_t i;
for (i = 0; i < len / 2; i++) {
sscanf(in + (i * 2), "%02hhX", dst + i);
}
}
static void test_vector1() {
uint8_t ka_priv[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t ka_pub[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t kb_pub[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t expected_tag[GLOME_MAX_TAG_LENGTH] = {0};
uint8_t tag[GLOME_MAX_TAG_LENGTH] = {0};
uint8_t counter = 0;
const char *msg = "The quick brown fox";
decode_hex(
ka_priv,
"<KEY>");
decode_hex(
kb_pub,
"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f");
decode_hex(
expected_tag,
"9c44389f462d35d0672faf73a5e118f8b9f5c340bbe8d340e2b947c205ea4fa3");
g_assert_true(glome_derive_key(ka_priv, ka_pub) == 0);
g_assert_true(glome_tag(/* verify */ false, counter, ka_priv, kb_pub,
(const uint8_t *)msg, strlen(msg), tag) == 0);
g_assert_cmpmem(tag, sizeof tag, expected_tag, sizeof expected_tag);
}
static void test_vector2() {
uint8_t ka_pub[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t kb_priv[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t kb_pub[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t expected_tag[GLOME_MAX_TAG_LENGTH] = {0};
uint8_t tag[GLOME_MAX_TAG_LENGTH] = {0};
uint8_t counter = 100;
const char *msg = "The quick brown fox";
decode_hex(
ka_pub,
"872f435bb8b89d0e3ad62aa2e511074ee195e1c39ef6a88001418be656e3c376");
decode_hex(
kb_priv,
"<KEY>db105f00db105f00d");
decode_hex(
expected_tag,
"064<KEY>");
g_assert_true(glome_derive_key(kb_priv, kb_pub) == 0);
g_assert_true(glome_tag(/* verify */ false, counter, kb_priv, ka_pub,
(const uint8_t *)msg, strlen(msg), tag) == 0);
g_assert_cmpmem(tag, sizeof tag, expected_tag, sizeof expected_tag);
}
int main(int argc, char *argv[]) {
g_test_init(&argc, &argv, NULL);
g_test_add_func("/test-vector1", test_vector1);
g_test_add_func("/test-vector2", test_vector2);
return g_test_run();
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"encoding/hex"
"fmt"
"io/ioutil"
"net/http/httptest"
"strings"
"testing"
"github.com/google/glome/go/glome"
)
type testVector struct {
kbp []byte
index uint8
ka []byte
Request string
Response string
ResponseLen uint8
authFunc Authorizer
}
// ServerKey return correctly formatted Server Private Key
func (t *testVector) ServerKey() glome.PrivateKey {
p, err := glome.PrivateKeyFromSlice(t.kbp)
if err != nil {
panic(fmt.Sprintf("Glome rejected %v:%#v", t.kbp, err))
}
return *p
}
// ClientKey return correctly formatted Client Public Key
func (t testVector) ClientKey() glome.PublicKey {
p, err := glome.PublicKeyFromSlice(t.ka)
if err != nil {
panic(fmt.Sprintf("Glome rejected %v:%#v", t.ka, err))
}
return *p
}
func decodeString(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic(fmt.Sprintf("Invalid hexadecimal string %v input in test", s))
}
return b
}
func constantTrue(user string, hostID string, hostIDType string, action string) (bool, error) {
return true, nil
}
func constantFalse(user string, hostID string, hostIDType string, action string) (bool, error) {
return false, nil
}
func serverTests() map[string]testVector {
return map[string]testVector{
"test vector 0": {
kbp: decodeString("<KEY>"),
index: 1,
Request: "/",
Response: "List of server keys\n" +
"-------------------\n" +
"Index\tValue\n" +
"1\<KEY>",
ResponseLen: MaxResponseSize,
authFunc: AuthorizerFunc(constantTrue),
},
"test vector 1": {
kbp: decodeString("<KEY>"),
index: 1,
Request: "v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/",
Response: "lyHuaHuCck\n",
ResponseLen: 10,
authFunc: AuthorizerFunc(constantTrue),
},
"test vector 2": {
kbp: decodeString("b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d"),
index: 0x51,
Request: "v1/UYcvQ1u4uJ0OOtYqouURB07hleHDnvaogAFBi-ZW48N2/serial-number:1234567890=ABCDFGH%2F%23%3F/reboot/",
Response: "p8M_BUKj7zXBVM2JlQhNYFxs4J-DzxRAps83ZaNDquY=\n",
ResponseLen: MaxResponseSize,
authFunc: AuthorizerFunc(constantTrue),
},
"test vector 3": {
kbp: decodeString("b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d"),
index: 0x51,
Request: "v1/UycvQ1u4uJ0OOtYqouURB07hleHDnvaogAFBi-ZW48N2/serial-number:1234567890=ABCDFGH%2F%23%3F/reboot/",
Response: "Server key not found for prefix 83.\n",
ResponseLen: MaxResponseSize,
authFunc: AuthorizerFunc(constantTrue),
},
"test vector 4": {
kbp: decodeString("5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb"),
index: 1,
Request: "v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/",
Response: "unauthorized action\n",
ResponseLen: 10,
authFunc: AuthorizerFunc(constantFalse),
},
"test vector 5": {
kbp: decodeString("5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb"),
index: 1,
Request: "v1/aYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/",
Response: "Server key not found for prefix 105.\n",
ResponseLen: 10,
authFunc: AuthorizerFunc(constantFalse),
},
}
}
func TestServer(t *testing.T) {
for name, tv := range serverTests() {
name := name
tv := tv
t.Run(name, func(t *testing.T) {
url := tv.Request
if !strings.HasPrefix(url, "/") {
url = "/" + url
}
r := httptest.NewRequest("GET", url, nil)
w := httptest.NewRecorder()
login, err := NewLoginServer(tv.authFunc, ResponseLen(tv.ResponseLen))
if err != nil {
t.Fatalf("test %v, unexpected error: %v ", name, err.Error())
}
login.Keys.Add(tv.ServerKey(), tv.index)
login.ServeHTTP(w, r)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("test %v, unexpected error: %v ", name, err.Error())
}
if string(body) != tv.Response {
t.Errorf("test %v, got %#v, want %#v", name, string(body), tv.Response)
}
})
}
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glome
import (
"bytes"
"encoding/hex"
"fmt"
"testing"
)
func handle(e error, t *testing.T) {
if e != nil {
t.Fatalf("Unexpected Error: " + e.Error())
}
}
// Stores test vectors from protocol reference. Each variable is named after
// a row of the test table. For the purpose of testing, we consider that user A
// is always the one that sends the message (therefore, we change the role of A
// and B in Vector #2)
type testVector struct {
kap []byte //kap = K_a'(k sub a *p*rime)
ka []byte
kbp []byte
kb []byte
counter uint8
msg []byte
ks []byte
tag []byte
}
func (tv *testVector) Dialogs(t *testing.T) (*Dialog, *Dialog) {
aPriv, err := PrivateKeyFromSlice(tv.kap)
if err != nil {
t.Fatalf("Unexpected Error: " + err.Error())
}
aPub, err := PublicKeyFromSlice(tv.ka)
if err != nil {
t.Fatalf("Unexpected Error: " + err.Error())
}
bPriv, err := PrivateKeyFromSlice(tv.kbp)
if err != nil {
t.Fatalf("Unexpected Error: " + err.Error())
}
bPub, err := PublicKeyFromSlice(tv.kb)
if err != nil {
t.Fatalf("Unexpected Error: " + err.Error())
}
sending, err := aPriv.Exchange(bPub)
if err != nil {
t.Fatalf("Unexpected Error: " + err.Error())
}
receiving, err := bPriv.Exchange(aPub)
if err != nil {
t.Fatalf("Unexpected Error: " + err.Error())
}
return sending, receiving
}
func decodeString(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic(fmt.Sprintf("Invalid hexadecimal string %v input in test", s))
}
return b
}
// Stores Tests Samples. Left out for better accessibility
func tests() map[string]testVector {
return map[string]testVector{
"Test Vector 1": {
kap: decodeString("77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a"),
ka: decodeString("8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a"),
kbp: decodeString("5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb"),
kb: decodeString("de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f"),
counter: 0,
msg: []byte("The quick brown fox"),
ks: decodeString("4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742"),
tag: decodeString("9c44389f462d35d0672faf73a5e118f8b9f5c340bbe8d340e2b947c205ea4fa3"),
},
"Test Vector 2": {
kap: decodeString("b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d"),
ka: decodeString("d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647"),
kbp: decodeString("fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead"),
kb: decodeString("872f435bb8b89d0e3ad62aa2e511074ee195e1c39ef6a88001418be656e3c376"),
counter: 100,
msg: []byte("The quick brown fox"),
ks: decodeString("4b1ee05fcd2ae53ebe4c9ec94915cb057109389a2aa415f26986bddebf379d67"),
tag: decodeString("06476f1f314b06c7f96e5dc62b2308268cbdb6140aefeeb55940731863032277"),
},
}
}
func TestKeyGeneration(t *testing.T) {
for name, tv := range tests() {
send, rec := tv.Dialogs(t)
name := name
t.Run(name, func(t *testing.T) {
for _, k := range []struct {
input []byte
want []byte
}{
{input: send.sendingKey(), want: append(tv.ks, append(tv.kb, tv.ka...)...)},
{input: send.receivingKey(), want: append(tv.ks, append(tv.ka, tv.kb...)...)},
{input: rec.sendingKey(), want: append(tv.ks, append(tv.ka, tv.kb...)...)},
{input: rec.receivingKey(), want: append(tv.ks, append(tv.kb, tv.ka...)...)},
} {
if !bytes.Equal(k.input, k.want) {
t.Errorf("%v failed; got: %v, want %v", name, k.want, k.input)
}
}
})
}
}
func TestTagGeneration(t *testing.T) {
for name, tv := range tests() {
send, _ := tv.Dialogs(t)
if got := send.Tag(tv.msg, tv.counter); !bytes.Equal(tv.tag, got) {
t.Errorf("%v failed; got: %v, want %v", name, got, tv.tag)
}
}
}
func TestCheckFailIfIncorrectTag(t *testing.T) {
for name, tv := range tests() {
_, rec := tv.Dialogs(t)
name := name
type input struct {
t []byte
msg []byte
c uint8
}
t.Run(name, func(t *testing.T) {
for _, k := range []struct {
in input
want bool
}{
{in: input{t: tv.tag, msg: tv.msg, c: tv.counter}, want: true},
{in: input{t: []byte{23, 45, 67, 87, 65, 43, 22}, msg: tv.msg, c: tv.counter}, want: false},
{in: input{t: tv.tag, msg: []byte("this is not the message"), c: tv.counter}, want: false},
{in: input{t: tv.tag, msg: tv.msg, c: 23}, want: false},
} {
got := rec.Check(k.in.t, k.in.msg, k.in.c)
if !k.want == got {
t.Fatalf("%v failed; got: %v, want: %v", name, got, k.want)
}
}
})
}
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package glome implements GLOME protocol.
package glome
import (
"crypto/hmac"
"crypto/sha256"
"fmt"
"io"
"golang.org/x/crypto/curve25519"
)
const (
// PrivateKeySize is the size of a PrivateKey in bytes.
PrivateKeySize = 32
// PublicKeySize is the size of a PublicKey in bytes.
PublicKeySize = 32
// MaxTagSize is the maximum size allowed for a Tag
MaxTagSize = 32
// MinTagSize is the minimum size allowed for a Tag
MinTagSize = 1
)
var (
// ErrInvalidPublicKey denotes that a slice that intend to be a public key is not of desired length
ErrInvalidPublicKey = fmt.Errorf("invalid public key - byte slice len is not %d", PublicKeySize)
// ErrInvalidPrivateKey denotes that a slice that intend to be a private key is not of desired length
ErrInvalidPrivateKey = fmt.Errorf("invalid private key - byte slice len is not %d", PrivateKeySize)
// ErrInvalidTagSize denotes that provided integer is not suitable to be minPeerTagSize
ErrInvalidTagSize = fmt.Errorf("invalid tag size - minPeerTagSize must be in range %d-%d",
MinTagSize, MaxTagSize)
// ErrInvalidReader denotes that library failed to read PrivateKeySize bytes from given Reader.
ErrInvalidReader = fmt.Errorf("invalid reader - failed to read %d bytes", PrivateKeySize)
)
// PublicKey is the type of GLOME public Keys.
//
// It can be initialized either by casting a [PublicKeySize]byte array or from a byte
// slice with the PublicKeyFromSlice function.
// Examples:
// - Generate Public Key as existing byte array
// b := [32]byte{0,2,...,7,6}
// p := glome.PublicKey(b)
//
// - Generate from byte slice
// s := b[:]
// p, err := glome.PublicKeyFromSlice(s)
// if err != nil { [...] }
//
// - Read from File
// p, err := ioutil.ReadFile(filename)
// if err != nil { [...] }
// priv, err := glome.PublicKeyFromSlice(p)
// if err != nil { [...] }
type PublicKey [PublicKeySize]byte
// PublicKeyFromSlice generates a PublicKey object from slice. Return ErrInvalidPublicKey
// if slice's length is not PublicKeySize.
func PublicKeyFromSlice(b []byte) (*PublicKey, error) {
if len(b) != PublicKeySize {
return nil, ErrInvalidPublicKey
}
var p PublicKey
copy(p[:], b)
return &p, nil
}
// PrivateKey is the type of GLOME public keys.
//
// It can be initialized either by casting a [PrivateKeySize]byte array or from a byte
// slice with the PrivateKeyFromSlice function.
//
// Examples:
// - Generate Private Key as existing byte array:
// b := [32]byte{0,2,...,7,6}
// p := glome.PrivateKey(b)
//
// - Generate from byte slice:
// s := b[:]
// p, err := glome.PrivateKeyFromSlice(s)
// if err != nil { [...] }
//
// - Read from File:
// p, err := ioutil.ReadFile(filename)
// if err != nil { [...] }
// priv, err := glome.PrivateKeyFromSlice(p)
// if err != nil { [...] }
type PrivateKey [PrivateKeySize]byte
// Public returns the PublicKey corresponding to priv.
func (priv *PrivateKey) Public() (*PublicKey, error) {
slice, err := curve25519.X25519(priv[:], curve25519.Basepoint)
if err != nil {
return nil, err
}
p, _ := PublicKeyFromSlice(slice)
return p, nil
}
// Exchange generates a Dialog struct. It performs GLOME handshake, and stores create
// a Dialog from the user to the peer. Sets minPeerTagSize as MaxTagSize.
func (priv *PrivateKey) Exchange(peer *PublicKey) (*Dialog, error) {
s, err := curve25519.X25519(priv[:], peer[:])
if err != nil {
return nil, err
}
public, err := priv.Public()
if err != nil {
return nil, err
}
return &Dialog{shared: s, User: *public, Peer: *peer, minPeerTagSize: MaxTagSize}, nil
}
// TruncatedExchange generates a Dialog struct. It performs GLOME handshake,
// and stores create a Dialog from the user to the peer. Sets param m as minPeerTagSize.
func (priv *PrivateKey) TruncatedExchange(peer *PublicKey, m uint) (*Dialog, error) {
if m == 0 || m > MaxTagSize {
return nil, ErrInvalidTagSize
}
d, err := priv.Exchange(peer)
if err != nil {
return nil, err
}
d.minPeerTagSize = m
return d, nil
}
// PrivateKeyFromSlice generates a private key from a slice. Fail if len of
// slice is not PrivateKeySize
func PrivateKeyFromSlice(b []byte) (*PrivateKey, error) {
if len(b) != PrivateKeySize {
return nil, ErrInvalidPrivateKey
}
var p PrivateKey
copy(p[:], b)
return &p, nil
}
// GenerateKeys generates a public/private key pair using entropy from rand.
func GenerateKeys(rand io.Reader) (*PublicKey, *PrivateKey, error) {
b := make([]byte, PrivateKeySize)
n, err := rand.Read(b)
if err != nil {
return nil, nil, err
}
if n != PrivateKeySize {
return nil, nil, ErrInvalidReader
}
priv, err := PrivateKeyFromSlice(b)
if err != nil {
return nil, nil, err
}
pub, err := priv.Public()
if err != nil {
return nil, nil, err
}
return pub, priv, nil
}
// Dialog allow tag managing functionalities for GLOME protocol.
//
// Has to be generated with the methods Exchange or TruncatedExchange or Private key.
// For example:
// pubKey, privKey, err := glome.GenerateKeys(rand.Reader)
// if err != nil { [...] }
// ex, err := privkey.Exchange(peerKey)
//
// If TruncatedExchange is selected, minPeerTagSize can be different to MaxTagSize. See
// documentation in method Check for more information on truncation.
type Dialog struct {
shared []byte
User PublicKey // User's Public key
Peer PublicKey // Peer's Public key
minPeerTagSize uint // Minimun Tag size allowed.
}
func (d *Dialog) sendingKey() []byte {
return append(d.shared[:], append(d.Peer[:], d.User[:]...)...)
}
func (d *Dialog) receivingKey() []byte {
return append(d.shared[:], append(d.User[:], d.Peer[:]...)...)
}
// Generates a tag matching some provided message, counter and password.
func generateTag(msg []byte, counter uint8, password []byte) []byte {
h := hmac.New(sha256.New, password)
h.Write([]byte{counter})
h.Write(msg)
return h.Sum(nil)
}
// Tag generates a tag matching some provided message and counter.
// This tag is generated following GLOME protocol specification
// in the context of a communication from the users to theirs peers.
func (d *Dialog) Tag(msg []byte, counter uint8) []byte {
return generateTag(msg, counter, d.sendingKey())
}
// Check method checks if a tag matches some provided message and counter.
// The method generates the matching tag following GLOME protocol
// specification in the context of a communication from the users'
// peers to the users and then is compared with the tag provided.
//
// For the tag to be accepted it has to be equal in all its length
// to the correct tag. Also, its length must be at least MinPeerTagLength
// and always smaller than MaxTagSize.
func (d *Dialog) Check(tag []byte, msg []byte, counter uint8) bool {
var prefixSize uint
switch {
case uint(len(tag)) < d.minPeerTagSize:
prefixSize = d.minPeerTagSize
case uint(len(tag)) > MaxTagSize:
prefixSize = MaxTagSize
default:
prefixSize = uint(len(tag))
}
expected := generateTag(msg, counter, d.receivingKey())[:prefixSize]
return hmac.Equal(expected, tag)
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LOGIN_UI_H_
#define LOGIN_UI_H_
#include <inttypes.h>
#include <stdio.h>
#include "config.h"
#include "crypto.h"
#define errorf(...) fprintf(stderr, __VA_ARGS__)
#if !defined(SYSCONFDIR)
#define SYSCONFDIR "/etc"
#endif
#define DEFAULT_CONFIG_FILE SYSCONFDIR "/glome/config"
#define DEFAULT_LOGIN_PATH "/bin/login"
#define DEFAULT_AUTH_DELAY 1
#define DEFAULT_INPUT_TIMEOUT 180
#define DEFAULT_USERNAME "root"
#define DEFAULT_PROMPT "GLOME: "
// Options
// obsolete: SKIP_LOCKDOWN (1 << 1)
// obsolete: REBOOT (1 << 2)
#define VERBOSE (1 << 3)
#define INSECURE (1 << 4)
#define SYSLOG (1 << 5)
// decode_hex converts a hex-encoded string into the equivalent bytes.
int decode_hex(uint8_t* dst, size_t dst_len, const char* in);
// default_config initializes the config with the default values.
void default_config(glome_login_config_t* config);
// parse_args parses command-line arguments into a config struct. It will
// forcefully initialize the whole content of the struct to zero.
int parse_args(glome_login_config_t* config, int argc, char* argv[]);
#endif // LOGIN_UI_H_
<file_sep>#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that implements unittests cases for Glome Class.
"""
import unittest
from cryptography.hazmat.primitives.asymmetric import x25519
import pyglome
from test import test_vectors
class GlomeTestVector:
"""Class that encapsulates needed components for testing Glome Class."""
def __init__(self, test_vector, truncated_length):
self.data = test_vector
peer_key = x25519.X25519PublicKey.from_public_bytes(self.data.kb)
my_key = x25519.X25519PrivateKey.from_private_bytes(self.data.kap)
self.sender_glomes = pyglome.Glome(peer_key, my_key)
peer_key = x25519.X25519PublicKey.from_public_bytes(self.data.ka)
my_key = x25519.X25519PrivateKey.from_private_bytes(self.data.kbp)
self.receiver_glomes = pyglome.Glome(peer_key, my_key)
self.truncated_tag_length = truncated_length
peer_key = x25519.X25519PublicKey.from_public_bytes(self.data.kb)
my_key = x25519.X25519PrivateKey.from_private_bytes(self.data.kap)
self.truncated_sender_glomes = pyglome.Glome(peer_key, my_key,
self.truncated_tag_length)
peer_key = x25519.X25519PublicKey.from_public_bytes(self.data.ka)
my_key = x25519.X25519PrivateKey.from_private_bytes(self.data.kbp)
self.truncated_receiver_glomes = pyglome.Glome(
peer_key, my_key, self.truncated_tag_length)
class GlomeTestBase:
"""Test Class for Glome Class.
Implements the logic for tests tag and key generation, as well as tag
checking.
"""
def __init__(self, *args, **kwargs):
super(__class__, self).__init__(*args, **kwargs)
self.test_vector = None
def test_keys_generation(self):
test_vector = self.test_vector
self.assertEqual(
test_vector.sender_glomes._send_key,
test_vector.data.sk + test_vector.data.kb + test_vector.data.ka)
self.assertEqual(
test_vector.sender_glomes._receive_key,
test_vector.data.sk + test_vector.data.ka + test_vector.data.kb)
self.assertEqual(
test_vector.receiver_glomes._send_key,
test_vector.data.sk + test_vector.data.ka + test_vector.data.kb)
self.assertEqual(
test_vector.receiver_glomes._receive_key,
test_vector.data.sk + test_vector.data.kb + test_vector.data.ka)
def test_tag_generation(self):
test_vector = self.test_vector
self.assertEqual(
test_vector.sender_glomes.tag(test_vector.data.msg,
test_vector.data.counter),
test_vector.data.tag)
def test_check_raises_exception_when_incorrect(self):
test_vector = self.test_vector
with self.assertRaises(pyglome.IncorrectTagError):
test_vector.sender_glomes.check(tag=bytes([123]),
msg=test_vector.data.msg,
counter=0)
with self.assertRaises(pyglome.IncorrectTagError):
test_vector.receiver_glomes.check(tag=bytes([234]),
msg=test_vector.data.msg,
counter=0)
with self.assertRaises(pyglome.IncorrectTagError):
test_vector.sender_glomes.check(
tag=test_vector.data.tag[:test_vector.truncated_tag_length],
msg=test_vector.data.msg,
counter=0)
with self.assertRaises(pyglome.IncorrectTagError):
test_vector.truncated_receiver_glomes.check(
tag=test_vector.data.tag[:test_vector.truncated_tag_length] +
test_vector.data.tag[:test_vector.truncated_tag_length],
msg=test_vector.data.msg,
counter=test_vector.data.counter)
with self.assertRaises(pyglome.IncorrectTagError):
test_vector.truncated_receiver_glomes.check(
tag=test_vector.data.tag[:test_vector.truncated_tag_length] +
test_vector.data.tag[test_vector.truncated_tag_length::-1],
msg=test_vector.data.msg,
counter=test_vector.data.counter)
def test_check_doesnt_raise_exception_when_correct(self):
test_vector = self.test_vector
try:
test_vector.receiver_glomes.check(test_vector.data.tag,
msg=test_vector.data.msg,
counter=test_vector.data.counter)
test_vector.truncated_receiver_glomes.check(
test_vector.data.tag[:test_vector.truncated_tag_length],
msg=test_vector.data.msg,
counter=test_vector.data.counter)
test_vector.truncated_receiver_glomes.check(
test_vector.data.tag[:test_vector.truncated_tag_length + 2],
msg=test_vector.data.msg,
counter=test_vector.data.counter)
except pyglome.IncorrectTagError:
self.fail('check() raised IncorrectTagError unexpectedly!')
class GlomeTest1(unittest.TestCase, GlomeTestBase):
"""TestCase based on test vector #1 from protocol documentation"""
def __init__(self, *args, **kwargs):
super(__class__, self).__init__(*args, **kwargs)
self.test_vector = GlomeTestVector(test_vectors.TEST1, 8)
class GlomeTest2(unittest.TestCase, GlomeTestBase):
"""TestCase based on test vector #1 from protocol documentation"""
def __init__(self, *args, **kwargs):
super(__class__, self).__init__(*args, **kwargs)
self.test_vector = GlomeTestVector(test_vectors.TEST2, 8)
if __name__ == '__main__':
unittest.main()
<file_sep># GLOME Login Golang API v2
This package implements version 2 of the GLOME Login challenge response
protocol, as described in the [specification](../../../docs/glome-login.md) and
[RFD001](../../../docs/rfd/001.md).
## Design
The API is designed with two groups of users in mind: clients and servers.
In the GLOME Login protocol, clients generate *challenges* which are
*responded to* by servers. This is reflected in the two basic structs defined
here, `v2.Challenger` and `v2.Responder`.
The other important struct is `v2.Message`, which contains all context for the
authorization decision. The genral flow is:
1. Client creates a `v2.Challenger` object including server configuration.
This object is long-lived and can be reused.
2. An authorization decision needs to be made. The client phrases it in form of
a `v2.Message` and produces an encoded challenge.
3. The challenge is transferred to the server, which holds a long-lived
`v2.Responder` object that manages keys.
4. The server accepts the challenge, inspects the message and - if justified -
authorizes by handing out the response code.
5. The response code is transferred to the client, which validates the code and
grants access.
## Example
There's an example GLOME Login flow in [login_test.go](login_test.go).
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"reflect"
"testing"
"github.com/google/glome/go/glome"
)
func Contains(list []PublicKey, pub PublicKey) bool {
for _, b := range list {
if b == pub {
return true
}
}
return false
}
func TestKeyAdd(t *testing.T) {
for name, k := range []struct {
priv glome.PrivateKey
index uint8
}{
{
priv: glome.PrivateKey([32]byte{}),
index: 0,
}, {
priv: glome.PrivateKey([32]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1}),
index: 1,
}, {
priv: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
index: 2,
},
} {
manager := NewKeyManager()
if err := manager.Add(k.priv, k.index); err != nil {
t.Fatalf("test %v, unexpected error: %v ", name, err.Error())
}
readKey, found := manager.Read(k.index)
if !found {
t.Errorf("test %v: No private key %v was added in index %v",
name, k.priv, k.index)
}
if readKey != k.priv {
t.Errorf("test %v: private key %v was not added in index %v",
name, k.priv, k.index)
}
pub, err := k.priv.Public()
if err != nil {
t.Fatalf("test %v, unexpected error: %v ", name, err.Error())
}
if !Contains(manager.publicKeys, PublicKey{Value: *pub, Index: k.index}) {
t.Errorf("test %v: public key %v was not added in index %v",
name, pub, k.index)
}
}
}
func TestKeyAddExceptions(t *testing.T) {
type input struct {
manager *KeyManager
priv glome.PrivateKey
index uint8
}
// PreloadManager is manager for test 2
preloadManager := NewKeyManager()
preloadManager.Add(glome.PrivateKey([32]byte{}), 0)
for name, k := range []struct {
in input
want error
}{
{
in: input{
manager: NewKeyManager(),
priv: glome.PrivateKey([32]byte{}),
index: 0,
},
want: nil,
}, {
in: input{
manager: preloadManager,
priv: glome.PrivateKey([32]byte{}),
index: 0,
},
want: ErrDuplicatedKeyIndex{Index: 0},
}, {
in: input{
manager: NewKeyManager(),
priv: glome.PrivateKey([32]byte{}),
index: 129,
},
want: ErrInvalidKeyIndex{Index: 129},
},
} {
if err := k.in.manager.Add(k.in.priv, k.in.index); err != k.want {
t.Errorf("test %v failed to raises wanted exception on input %#v; got %#v, want %#v",
name, k.in, err, k.want)
}
}
}
func TestKeyRead(t *testing.T) {
type input struct {
priv glome.PrivateKey
index uint8
}
type output struct {
priv glome.PrivateKey
found bool
}
for name, k := range []struct {
in input
want output
}{
{
in: input{priv: glome.PrivateKey([32]byte{}), index: 0},
want: output{priv: glome.PrivateKey([32]byte{}), found: true},
}, {
in: input{
priv: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
index: 111,
},
want: output{
priv: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
found: true,
},
},
} {
manager := NewKeyManager()
if _, found := manager.Read(k.in.index); found {
t.Errorf("test %v failed; found key on index %v", name, k.in.index)
}
if err := manager.Add(k.in.priv, k.in.index); err != nil {
t.Fatalf("test %v, unexpected error: %v ", name, err.Error())
}
if key, found := manager.Read(k.in.index); key != k.want.priv || found != k.want.found {
t.Errorf("test %v failed on input %#v; want %v, got %v,%v", name, k.in, k.want, key, found)
}
}
}
func TestDropAllReplace(t *testing.T) {
preloadManager := NewKeyManager()
preloadManager.Add(glome.PrivateKey([32]byte{}), 0)
type input struct {
keys []PrivateKey
manager *KeyManager
}
for name, k := range []struct {
in input
want map[uint8]glome.PrivateKey
}{
{
in: input{
keys: []PrivateKey{
PrivateKey{Value: glome.PrivateKey([32]byte{}), Index: 0},
PrivateKey{
Value: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
Index: 1,
},
},
manager: NewKeyManager(),
},
want: map[uint8]glome.PrivateKey{
0: glome.PrivateKey([32]byte{}),
1: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
},
}, {
in: input{
keys: []PrivateKey{
PrivateKey{Value: glome.PrivateKey([32]byte{}), Index: 0},
PrivateKey{
Value: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
Index: 1,
},
},
manager: preloadManager,
},
want: map[uint8]glome.PrivateKey{
0: glome.PrivateKey([32]byte{}),
1: glome.PrivateKey([32]byte{49, 244, 125, 133, 0, 40, 7,
192, 7, 90, 5, 208, 234, 104, 66, 68, 251, 237, 187, 132,
67, 236, 108, 164, 162, 199, 41, 89, 128, 95, 26, 190}),
},
},
} {
k.in.manager.DropAllReplace(k.in.keys)
if !reflect.DeepEqual(k.in.manager.indexToPriv, k.want) {
t.Errorf("test %v failed; got %#v, want %#v", name, k.in.manager.indexToPriv, k.want)
}
}
}
<file_sep>package v2
import (
"crypto/rand"
"encoding/base64"
"errors"
"io"
"strings"
"github.com/google/glome/go/glome"
)
var (
// defaultMinResponseSize is the recommended minimal size of a tag so that
// brute-forcing is infeasible (see MIN_ENCODED_AUTHCODE_LEN in the C
// sources).
defaultMinResponseSize uint8 = 10
)
// Challenger produces challenges that a Responder can respond to.
type Challenger struct {
// PublicKey is the server's public key.
//
// This field must always be set.
PublicKey *glome.PublicKey
// The fields below are optional, their zero values work as expected.
// MinResponseLength is the minimal length of a response string required for verification.
//
// Recommended and default setting of this field is 10 (see protocol documentation).
MinResponseLength uint8
// MessageTagPrefixLength is the number of error detection bytes added to the challenge.
//
// Setting this to non-zero allows to detect a mismatch between the public key used by the
// client and the public key inferred by the server from index or public key prefix.
MessageTagPrefixLength uint8
// KeyIndex that the server uses to identify its private key.
//
// If unset, the challenge will be created with the public key prefix instead.
KeyIndex *uint8
// RNG generates ephemeral private keys for this Challenger.
//
// If unset, crypto/rand.Reader will be used.
// WARNING: Don't set this field unless you know what you are doing!
RNG io.Reader
}
// ClientChallenge is the internal representation of a challenge as it would be used on a client.
//
// ClientChallenge instances must be created by Challenger.Challenge()!
type ClientChallenge struct {
d *glome.Dialog
// The minimum length of an acceptable response.
min uint8
h *handshake
m []byte
}
// Challenge creates a clientChallenge object for this message and the Challenger configuration.
func (c *Challenger) Challenge(msg *Message) (*ClientChallenge, error) {
h := &handshake{}
rng := c.RNG
if rng == nil {
rng = rand.Reader
}
publicKey, key, err := glome.GenerateKeys(rng)
if err != nil {
return nil, err
}
h.PublicKey = publicKey
if c.PublicKey == nil {
return nil, errors.New("no public key")
}
if c.KeyIndex != nil {
h.Index = *c.KeyIndex
} else {
h.Prefix = &c.PublicKey[glome.PublicKeySize-1]
}
minResponseSize := uint8(c.MinResponseLength)
if minResponseSize == 0 {
minResponseSize = defaultMinResponseSize
}
d, err := key.TruncatedExchange(c.PublicKey, glome.MinTagSize)
if err != nil {
return nil, err
}
encodedMsg := []byte(msg.Encode())
if c.MessageTagPrefixLength > 0 {
h.MessageTagPrefix = d.Tag(encodedMsg, 0)[:c.MessageTagPrefixLength]
}
return &ClientChallenge{h: h, d: d, m: encodedMsg, min: minResponseSize}, nil
}
// Encode encodes the challenge into its URI path represenation.
func (c *ClientChallenge) Encode() string {
return strings.Join([]string{"v2", c.h.Encode(), string(c.m), ""}, "/")
}
// Verify a challenge response string.
func (c *ClientChallenge) Verify(s string) bool {
// In order to accept truncated base64 data, we need to handle special cases:
// - a single byte from an encoded triple can never decode correctly
// - 32 byte encode with a trailing padding character, which makes RawURLEncoding unhappy.
n := len(s)
// We check the response size here so that we don't need to deal with length conversion between
// Base64 and HMAC.
if n < int(c.min) {
return false
}
if n%4 == 1 || n == 44 {
n--
}
tag, err := base64.RawURLEncoding.DecodeString(s[:n])
if err != nil {
return false
}
return c.d.Check(tag, c.m, 0)
}
<file_sep>#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that implements unittests cases for Glome Class.
"""
import unittest
import sys
import test.glome_test, test.autoglome_test, test.fuzzing_test
def suite():
"""Suite of test to run"""
glome_tests = unittest.TestLoader().loadTestsFromModule(test.glome_test)
autoglome_tests = unittest.TestLoader().loadTestsFromModule(
test.autoglome_test)
fuzzing_tests = unittest.TestLoader().loadTestsFromModule(test.fuzzing_test)
return unittest.TestSuite([glome_tests, autoglome_tests, fuzzing_tests])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) # Nice verbosy output
result = unittest.TestResult()
suite().run(result)
sys.exit(len(result.errors) + len(result.failures)) # Correct exitcode
<file_sep>package v2
import (
"reflect"
"testing"
)
type messageTestCase struct {
msg *Message
encoded string
}
var messageTestCases = []messageTestCase{
{
encoded: "myhost/root",
msg: &Message{HostID: "myhost", Action: "root"},
},
{
encoded: "mytype:myhost/root",
msg: &Message{HostIDType: "mytype", HostID: "myhost", Action: "root"},
},
{
encoded: "escaping/special%20action%CC",
msg: &Message{HostID: "escaping", Action: "special action\xcc"},
},
{
encoded: "pairs/user=root;exec=%2Fbin%2Fmksh",
msg: &Message{HostID: "pairs", Action: "user=root;exec=/bin/mksh"},
},
}
func TestEncodeMessage(t *testing.T) {
for _, tc := range messageTestCases {
t.Run(tc.encoded, func(t *testing.T) {
got := tc.msg.Encode()
if got != tc.encoded {
t.Errorf("%#v.Encode() == %q, want %q", tc.msg, got, tc.encoded)
}
})
}
}
func TestDecodeMessage(t *testing.T) {
for _, tc := range messageTestCases {
t.Run(tc.encoded, func(t *testing.T) {
got, err := decodeMessage(tc.encoded)
if err != nil {
t.Fatalf("decodeMessage(%q) failed: %v", tc.encoded, err)
}
if !reflect.DeepEqual(got, tc.msg) {
t.Errorf("decodeMessage(%q) == %#v, want %#v", tc.encoded, got, tc.msg)
}
})
}
}
<file_sep># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyGLOME is a Python library that provides an API for GLOME protocol.
Basic Usage:
In order for Alice and Bob to communicate, the first step would be to generate
some new keys:
>>> import pyglome
>>> alice_keys = pyglome.generate_keys()
>>> bob_keys = pyglome.generate_keys()
Suppose that Alice knows Bob's `public_key` and wants to send Bob the message
`msg` and no other message have been shared before. Alice will need to:
>>> glome = pyglome.Glome(bob_keys.public, alice_keys.private)
>>> first_tag = glome.tag(msg, counter=0)
And Alice will send Bob both msg, first_tag as well as Alice's public key. On
Bob ends he will need to do the following:
>>> glome = pyglome.Glome(alice_keys.public, bob_keys.private)
>>> try:
... first_tag = glome.check(first_tag, msg, counter=0)
... except pyglome.TagCheckError as tag_error:
... ## Handle the exception.
>>> ## do what you have to do
"""
# Bring glome module to top level
from pyglome.glome import (Glome, TagCheckError, IncorrectTagError,
TagGenerationError, generate_keys, AutoGlome)
<file_sep># GLOME Protocol
Generic Low-Overhead Message Exchange
> :information_source: **NOTE**: GLOME provides a solution to a fairly niche
> problem. If the following constraints don't apply in your case, you might be
> better off using established signature schemes (e.g.
> [EdDSA](https://en.wikipedia.org/wiki/EdDSA)).
## Introduction
GLOME combines ephemeral-static key exchange (e.g.
[X25519](https://en.wikipedia.org/wiki/Curve25519)) between two parties and uses
that to enable exchanging authenticated and integrity-protected messages using a
truncated tag (e.g. truncated [HMAC](https://en.wikipedia.org/wiki/HMAC)).
Ephemeral-static key exchange indicates that only one side can authenticate
itself through the key agreement, and in case of GLOME it is the server side.
Clients are not automatically authenticated since they are using ephemeral keys.
The protocol is designed to keep its overhead to minimum, assuming that sending
a message is expensive, and allows the parties to trade some security for
reduced overhead by operating on truncated HMAC tags.
## Real world applications
An example of a real-world scenario fitting the description above is authorizing
a human operator to access a device with the following constraints:
* The device does not have a network connectivity (e.g. due to a failure or
by design).
* The device does not have a synchronized time (e.g. no real-time clock).
* The device does not store any secrets (e.g. all its storage is easily
readable by an adversary).
* The device has access to a [cryptographically secure pseudorandom number generator](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) (e.g. a hardware-based random number
generator).
* The device accepts input from a human operator via a very low-bandwidth
device (e.g. a keyboard).
* The device provides output to a human operator (e.g. via display).
With the constraints above, the operator effectively provides a low-bandwidth
channel for the device and the authorization server to communicate by passing
the messages back and forth. While there are ways to increase the bandwidth from
the device to the operator (e.g. via
[matrix codes](https://en.wikipedia.org/wiki/Barcode#Matrix_\(2D\)_barcodes)),
we must assume that the opposite direction requires the operator to type the
message manually on the keyboard, so minimizing the protocol overhead in that
direction is crucial.
To address this problem, the [GLOME login protocol](glome-login.md) based on
GLOME was invented.
## Caveats
* GLOME does not protect confidentiality of exchanged messages. This is not a
technical limitation (given that the protocol already performs a key
exchange) but avoiding introducing unnecessary complexity. This decision can
be revised in future revisions of this protocol, once there is a compelling
use case to provide this.
* The server is unable to authenticate the client just using GLOME due to the
usage of ephemeral keys. A protocol built on top of GLOME should implement
its own client authentication (if necessary).
## Protocol details
Alice and Bob want to exchange messages over an expensive untrusted channel,
i.e.:
* The channel can be actively MITM-ed.
* Cost-per-byte and cost-per-message are relatively high.
* The cost function can be asymmetrical, i.e., the cost can be higher in one
direction.
Alice and Bob can choose to lower the cost (i.e., the overhead) by accepting
weaker security.
Alice knows Bob's public key.
The protocol consists of an ephemeral-static Diffie-Hellman key exchange, and
uses the established shared secret to calculate MAC over combined payloads.
Alice wants to send a payload $M_a$ to Bob. Alice knows Bob's public key $K_b$.
### Handshake
The handshake derives two MAC keys, one for each direction of communication,
from a shared secret that is established using a Diffie-Hellman key exchange.
Key derivation operations are only described in brief.
For full reference, see
[RFC 7748 Section 6.1](https://tools.ietf.org/html/rfc7748#section-6).
#### Alice
1. Alice generates an ephemeral private key $K_a'$.
1. Alice computes the corresponding public key $K_a$ from $K_a'$.
1. Alice uses $K_a'$ and Bob's public key $K_b$ to derive the shared secret
$K_s$.
1. Alice uses $K_a$, $K_b$ and $K_s$ to construct MAC keys:
1. For messages from Alice to Bob: $K_{ab} = K_s ⧺ K_b ⧺ K_a$
1. For messages from Bob to Alice: $K_{ba} = K_s ⧺ K_a ⧺ K_b$
1. At this point Alice can forget $K_a'$ and $K_s$ so they cannot be
accidentally reused.
1. Alice sends $K_a$ and indicates which $K_b$ was used to Bob.
#### Bob
1. Bob receives $K_a$ and an indication of which $K_b$ to be used.
1. Bob uses the corresponding private key $K_b'$ and $K_a$ to derive the
shared secret $K_s$.
1. Bob computes the MAC keys $K_{ab}$ and $K_{ba}$ in the same way as Alice
did.
### Exchanging messages
To prevent replay attacks, Alice and Bob need to maintain a pair of counters:
$N_{ab}$ and $N_{ba}$. Each zero-indexed counter represents the number of
messages sent in a given direction.
Once the handshake is complete, Alice and Bob can send messages $M_n$ to each
other by computing a tag $T$ over $N_x ⧺ M_n$ using key $K_x$ and
incrementing $N_x$. $x$ is either $ab$ or $ba$, depending on the direction of
the message.
Upon receiving a message, the other party verifies its authenticity by repeating
the tag calculation and comparing the result (in constant-time) with the
received tag.
### Variants
There is currently only one variant of the protocol defined. This variant uses:
* Curve25519 keys ($K_a$, $K_a'$, $K_b$, $K_b'$).
* X25519 to derive the shared secret $K_s$.
* HMAC-SHA256 to calculate the message tag.
* Unsigned 8-bit counters (0..255).
While the use of 8-bit counters limits the number of messages exchanged between
the parties, it is likely to be sufficient given the constraints that warrant
the usage of the protocol.
### Optional optimizations
* To reduce the overhead at the cost of security, parties can truncate the
exchanged tags and compare only prefixes of an acceptable length.
* To reduce the number of messages exchanged, Alice can combine the initial
handshake with sending the first message.
* Sending the tag in the first message sent from Alice to Bob is not
security-relevant since it does not authenticate the message as Alice uses
ephemeral keys. It might be useful to detect accidental errors and for Bob
to disambiguate between his multiple key pairs (more on that below).
* The indication of Bob's public key ($K_b$) can be done in different ways,
each leading to varying degrees of communication overhead:
1. Specifying a truncated version of Bob's public key.
* The truncation can cause ambiguity if it matches multiple of Bob's
keys.
1. Specifying a key identifier, e.g. the key's serial number.
* Requires pre-agreeing to key identifiers between both parties.
1. By including an (optionally truncated) tag over the message sent
together with the handshake.
* This can cause ambiguity, if Bob discovers that multiple key pairs
produce the same (truncated) tag.
1. If Bob has only one key, there is no need to indicate which one is being
used.
* Not recommended, as this makes any key rotation difficult.
### Future improvements
* Given that the protocol already establishes a shared secret between Alice
and Bob, it could be used to encrypt the exchanged messages. We decided not
to add it at this point to keep the protocol simpler.
* The protocol could be extended to support multi-party settings (i.e., a
client exchanging messages with multiple servers at the same time).
### Test vectors
These are some example test cases that can be used to verify an implementation
of the GLOME protocol. Octet strings (keys and tags) are represented in
hexadecimal encoding, message counters in their decimal represenation and
messages in ASCII encoding.
#### Vector 1
Message from Alice to Bob.
| Variable | Value |
|---------:|:-------------------------------------------------------------------|
| $K_a'$ | `<KEY>` |
| $K_b'$ | `<KEY>` |
| $N_{ab}$ | `0` |
| $M_n$ | `The quick brown fox` |
| | |
| $K_a$ | `8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a` |
| $K_b$ | `<KEY>` |
| $K_s$ | `4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742` |
| | |
| $T$ | `9c44389f462d35d0672faf73a5e118f8b9f5c340bbe8d340e2b947c205ea4fa3` |
#### Vector 2
Message from Bob to Alice.
| Variable | Value |
|---------:|:-------------------------------------------------------------------|
| $K_a'$ | `fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead` |
| $K_b'$ | `b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d` |
| $N_{ba}$ | `100` |
| $M_n$ | `The quick brown fox` |
| | |
| $K_a$ | `872f435bb8b89d0e3ad62aa2e511074ee195e1c39ef6a88001418be656e3c376` |
| $K_b$ | `d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647` |
| $K_s$ | `4b1ee05fcd2ae53ebe4c9ec94915cb057109389a2aa415f26986bddebf379d67` |
| | |
| $T$ | `06476f1f314b06c7f96e5dc62b2308268cbdb6140aefeeb55940731863032277` |
### Reference implementation
The reference implementation consists of a glome binary that implements the
following operations.
#### Key pair generation
```
$ glome keygen <secret-key>
```
If `<secret-key>` does not exist, the private key is generated and written to
`<secret-key>`. Otherwise it reads the secret key from `<secret-key>`.
The tool prints out the corresponding public key to stdout (hex-encoded).
#### HMAC tag computation
```
$ glome tag <secret-key> <peer-key> [<message> [<counter>]]
```
Prints the hex-encoded tag over `<message>` (defaults to empty) with the counter
set to `<counter>` (defaults to 0).
#### HMAC tag verification
```
$ glome verify <secret-key> <peer-key> <tag> [<message> [<counter>]]
```
Verifies that the provided tag matches the expected tag over message `<message>`
with the counter set to `<counter>` as produced by peer using `<peer-key>`.
The tool exits with 0 on success, 1 on failure (tag mismatch).
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LOGIN_LOGIN_H_
#define LOGIN_LOGIN_H_
#include <glome.h>
#include "ui.h"
// All exit codes from login_run/main
#define EXITCODE_USAGE 1
// obsolete: EXITCODE_REBOOT 2
// obsolete: EXITCODE_LOCKDOWN 3
#define EXITCODE_IO_ERROR 4
#define EXITCODE_INVALID_AUTHCODE 5
#define EXITCODE_INVALID_INPUT_SIZE 6
#define EXITCODE_INTERRUPTED 7
// obsolete: EXITCODE_LOCKDOWN_ERROR 8
#define EXITCODE_TIMEOUT 9
#define EXITCODE_PANIC 42
// How many bytes of authorization code do we require.
// Each byte has 6-bits of entropy due to Base64 encoding.
//
// For an auth code consisting of 48 bits of entropy with one second delays
// between attempts, the probability of sustaining a brute-force attack lasting
// a year is ~99.9999888%.
//
// This can be calculated using: (1-2**(-N))**(365*24*60*60/delay)
// where N is the number of bits of token’s entropy and delay is in seconds.
//
// We increase this a bit more and choose 60-bits of entropy.
#define MIN_ENCODED_AUTHCODE_LEN 10
// login_run executes the main login logic challenging the user for an
// authenticate code unless fallback authentication has been requested.
//
// On error, the error_tag is set to an error token which should NOT be freed.
int login_run(glome_login_config_t* config, const char** error_tag);
// Constructs the action requesting shell access as a given user.
//
// Caller is expected to free returned message.
// On error, the error_tag is set to an error token which should NOT be freed.
int shell_action(const char* user, char** action, size_t* action_len,
const char** error_tag);
// Construct a challenge given the key parameters, host ID, an action, and
// optionally a message prefix tag.
//
// The length of the message prefix tag is in bytes. Only tag sizes of multiples
// by 8 is supported.
//
// Caller is expected to free returned challenge.
// On error, the error_tag is set to an error token which should NOT be freed.
int request_challenge(const uint8_t service_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
int service_key_id,
const uint8_t public_key[PUBLIC_KEY_LENGTH],
const char* host_id, const char* action,
const uint8_t prefix_tag[GLOME_MAX_TAG_LENGTH],
size_t prefix_tag_len, char** challenge,
int* challenge_len, const char** error_tag);
// Set the error_tag to the given error token and return the error code.
int failure(int code, const char** error_tag, const char* message);
// Store the identifier of the current machine in the buf array.
// On error, the error_tag is set to an error token which should NOT be freed.
int get_machine_id(char* buf, size_t buflen, const char** error_tag);
// Helper operations used by the GLOME login authentication.
struct pam_handle;
typedef struct pam_handle pam_handle_t;
void login_error(glome_login_config_t* config, pam_handle_t* pamh,
const char* format, ...);
void login_syslog(glome_login_config_t* config, pam_handle_t* pamh,
int priority, const char* format, ...);
int login_prompt(glome_login_config_t* config, pam_handle_t* pamh,
const char** error_tag, const char* message, char* input,
size_t input_size);
// Execute GLOME login authentication for login and PAM binaries.
int login_authenticate(glome_login_config_t* config, pam_handle_t* pamh,
const char** error_tag);
#endif // LOGIN_LOGIN_H_
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "commands.h"
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <limits.h>
#include <openssl/crypto.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include "glome.h"
#include "login/base64.h"
#include "login/config.h"
#include "login/crypto.h"
#define GLOME_CLI_MAX_MESSAGE_LENGTH 4095
#define UNUSED(var) (void)(var)
// Arguments
static const char *key_file = NULL;
static const char *peer_file = NULL;
static const char *tag_b64 = NULL;
static unsigned long counter = 0; // NOLINT(runtime/int)
static bool parse_args(int argc, char **argv) {
int c;
struct option long_options[] = {{"key", required_argument, 0, 'k'},
{"peer", required_argument, 0, 'p'},
{"counter", required_argument, 0, 'c'},
{"tag", required_argument, 0, 't'},
{0, 0, 0, 0}};
// First argument is the command name so skip it.
while ((c = getopt_long(argc - 1, argv + 1, "c:k:p:t:", long_options,
NULL)) != -1) {
switch (c) {
case 'c': {
char *endptr;
errno = 0;
counter = strtoul(optarg, &endptr, 0);
if (errno || counter > 255 || optarg == endptr || *endptr != '\0') {
fprintf(stderr, "'%s' is not a valid counter (0..255)\n", optarg);
return false;
}
break;
}
case 'k':
key_file = optarg;
break;
case 'p':
peer_file = optarg;
break;
case 't':
tag_b64 = optarg;
break;
case '?':
return false;
default:
// option not implemented
abort();
}
}
return true;
}
static bool read_file(const char *fname, uint8_t *buf, const size_t num_bytes) {
FILE *f = fopen(fname, "r");
if (!f) {
fprintf(stderr, "could not open file %s: %s\n", fname, strerror(errno));
return false;
}
if (fread(buf, 1, num_bytes, f) != num_bytes) {
fprintf(stderr, "could not read %zu bytes from file %s", num_bytes, fname);
if (ferror(f)) {
fprintf(stderr, ": %s\n", strerror(errno));
} else {
fputs("\n", stderr);
}
fclose(f);
return false;
}
fclose(f);
return true;
}
static bool read_public_key_file(const char *fname, uint8_t *buf,
size_t buf_len) {
FILE *f = fopen(fname, "r");
if (!f) {
fprintf(stderr, "could not open file %s: %s\n", fname, strerror(errno));
return false;
}
// Allocate enough buffer space to fit the public key and a reasonable amount
// of whitespace.
char encoded_public_key[128] = {0};
if (!fgets(encoded_public_key, sizeof(encoded_public_key), f)) {
perror("could not read from public key file");
fclose(f);
return false;
}
fclose(f);
if (!glome_login_parse_public_key(encoded_public_key, buf, buf_len)) {
fprintf(stderr, "failed to parse public key %s\n", encoded_public_key);
return false;
}
return true;
}
int genkey(int argc, char **argv) {
UNUSED(argc);
UNUSED(argv);
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
if (glome_generate_key(private_key, NULL)) {
fprintf(stderr, "unable to generate a new key\n");
return EXIT_FAILURE;
}
if (fwrite(private_key, 1, sizeof private_key, stdout) !=
sizeof private_key) {
perror("unable to write the private key to stdout");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
int pubkey(int argc, char **argv) {
UNUSED(argc);
UNUSED(argv);
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
char encoded_public_key[ENCODED_BUFSIZE(GLOME_MAX_PUBLIC_KEY_LENGTH)] = {0};
if (fread(private_key, 1, sizeof private_key, stdin) != sizeof private_key) {
perror("unable to read the private key from stdin");
return EXIT_FAILURE;
}
if (glome_derive_key(private_key, public_key)) {
fprintf(stderr, "unable to generate a new key\n");
return EXIT_FAILURE;
}
if (!base64url_encode(public_key, sizeof public_key,
(uint8_t *)encoded_public_key,
sizeof encoded_public_key)) {
fputs("unable to encode public key\n", stderr);
return EXIT_FAILURE;
}
if (printf("%s %s\n", GLOME_LOGIN_PUBLIC_KEY_ID, encoded_public_key) < 0) {
perror("unable to write the public key to stdout");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
// tag_impl reads a private key and a peer key from the given files and computes
// a tag corresponding to a message read from stdin for the communication
// direction determined by verify.
int tag_impl(uint8_t tag[GLOME_MAX_TAG_LENGTH], bool verify,
const char *key_file, const char *peer_file) {
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t peer_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
char message[GLOME_CLI_MAX_MESSAGE_LENGTH] = {0};
if (!read_file(key_file, private_key, sizeof private_key) ||
!read_public_key_file(peer_file, peer_key, sizeof(peer_key))) {
return EXIT_FAILURE;
}
size_t msg_len = fread(message, 1, GLOME_CLI_MAX_MESSAGE_LENGTH, stdin);
if (!feof(stdin)) {
fprintf(stderr, "message exceeds maximum supported size of %u\n",
GLOME_CLI_MAX_MESSAGE_LENGTH);
return EXIT_FAILURE;
}
if (glome_tag(verify, counter, private_key, peer_key, (uint8_t *)message,
msg_len, tag)) {
fputs("MAC tag generation failed\n", stderr);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
int tag(int argc, char **argv) {
uint8_t tag[GLOME_MAX_TAG_LENGTH] = {0};
if (!parse_args(argc, argv)) {
return EXIT_FAILURE;
}
if (!key_file || !peer_file) {
fprintf(stderr, "not enough arguments for subcommand %s\n", argv[1]);
return EXIT_FAILURE;
}
int res = tag_impl(tag, /*verify=*/false, key_file, peer_file);
if (res) {
return res;
}
char tag_encoded[ENCODED_BUFSIZE(sizeof tag)] = {0};
if (base64url_encode(tag, sizeof tag, (uint8_t *)tag_encoded,
sizeof tag_encoded) == 0) {
fprintf(stderr, "GLOME tag encode failed\n");
return EXIT_FAILURE;
}
puts(tag_encoded);
return EXIT_SUCCESS;
}
int verify(int argc, char **argv) {
uint8_t tag[GLOME_MAX_TAG_LENGTH] = {0};
uint8_t *expected_tag = NULL;
int ret = EXIT_FAILURE;
if (!parse_args(argc, argv)) {
goto out;
}
if (!key_file || !peer_file || !tag_b64) {
fprintf(stderr, "not enough arguments for subcommand %s\n", argv[1]);
goto out;
}
int res = tag_impl(tag, /*verify=*/true, key_file, peer_file);
if (res) {
goto out;
}
// decode the tag
size_t tag_b64_len = strlen(tag_b64);
size_t tag_b64_decoded_len = DECODED_BUFSIZE(tag_b64_len);
expected_tag = malloc(tag_b64_decoded_len);
if (expected_tag == NULL) {
fprintf(stderr, "GLOME tag malloc %ld bytes failed\n", tag_b64_decoded_len);
goto out;
}
size_t expected_tag_len =
base64url_decode((uint8_t *)tag_b64, tag_b64_len, (uint8_t *)expected_tag,
tag_b64_decoded_len);
if (expected_tag_len == 0) {
fprintf(stderr, "GLOME tag decode failed\n");
goto out;
}
if (expected_tag_len > sizeof tag) {
expected_tag_len = sizeof tag;
}
// compare the tag
if (CRYPTO_memcmp(expected_tag, tag, expected_tag_len) != 0) {
fputs("MAC tag verification failed\n", stderr);
goto out;
}
ret = EXIT_SUCCESS;
out:
free(expected_tag);
return ret;
}
static bool parse_login_path(char *path, char **handshake, char **host,
char **action) {
size_t path_len = strlen(path);
if (path_len < 3 || path[0] != 'v' || path[1] != '1' || path[2] != '/') {
fprintf(stderr, "unexpected challenge prefix: %s\n", path);
return false;
}
if (path[path_len - 1] != '/') {
fprintf(stderr, "unexpected challenge suffix: %s\n", path);
return false;
}
char *start = path + 3;
char *slash = strchr(start, '/');
if (slash == NULL || slash - start == 0) {
fprintf(stderr, "could not parse handshake from %s\n", start);
return false;
}
*handshake = strndup(start, slash - start);
if (*handshake == NULL) {
fprintf(stderr, "failed to duplicate handshake\n");
return false;
}
start = slash + 1;
slash = strchr(start, '/');
if (slash == NULL || slash - start == 0) {
free(*handshake);
*handshake = NULL;
fprintf(stderr, "could not parse host from %s\n", start);
return false;
}
*host = strndup(start, slash - start);
if (*host == NULL) {
free(*handshake);
*handshake = NULL;
fprintf(stderr, "failed to duplicate host\n");
return false;
}
// Everything left (not including the trailing slash) is an action. It may
// include slashes and it can also be empty.
start = slash + 1;
*action = strndup(start, path + path_len - 1 - start);
if (*action == NULL) {
free(*host);
*host = NULL;
free(*handshake);
*handshake = NULL;
fprintf(stderr, "failed to duplicate action\n");
return false;
}
return true;
}
static int unhex(char c) {
if (c >= '0' && c <= '9') {
return c - '0';
} else if (c >= 'a' && c <= 'f') {
return c - 'a' + 10;
} else if (c >= 'A' && c <= 'F') {
return c - 'A' + 10;
} else {
return -1;
}
}
static char *uri_unescape(char *e) {
size_t len = strlen(e);
char *u = malloc(len + 1);
if (u == NULL) {
return NULL;
}
size_t i, j;
for (i = 0, j = 0; i < len; j++) {
if (e[i] != '%') {
u[j] = e[i];
i += 1;
continue;
}
if (i + 2 >= len) {
goto fail;
}
int n = unhex(e[i + 1]);
if (n < 0) {
goto fail;
}
u[j] = (char)(n << 4);
n = unhex(e[i + 2]);
if (n < 0) {
goto fail;
}
u[j] |= (char)n;
i += 3;
}
u[j] = '\0';
return u;
fail:
free(u);
return NULL;
}
int login(int argc, char **argv) {
char *handshake = NULL;
char *handshake_b64 = NULL;
char *host = NULL;
char *host_esc = NULL;
char *action = NULL;
int ret = EXIT_FAILURE;
if (!parse_args(argc, argv)) {
return EXIT_FAILURE;
}
char *cmd = argv[optind++];
if (optind >= argc) {
fprintf(stderr, "missing challenge for subcommand %s\n", cmd);
return EXIT_FAILURE;
}
if (!key_file) {
fprintf(stderr, "not enough arguments for subcommand %s\n", cmd);
return EXIT_FAILURE;
}
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
if (!read_file(key_file, private_key, sizeof private_key)) {
return EXIT_FAILURE;
}
char *path = strstr(argv[optind], "v1/");
if (!parse_login_path(path, &handshake_b64, &host_esc, &action)) {
return EXIT_FAILURE;
}
host = uri_unescape(host_esc);
if (host == NULL) {
fprintf(stderr, "failed to parse hostname in path %s\n", path);
goto out;
}
size_t handshake_b64_len = strlen(handshake_b64);
handshake = malloc(DECODED_BUFSIZE(handshake_b64_len));
if (handshake == NULL) {
fprintf(stderr, "failed to malloc %ld bytes for base64 decode\n",
DECODED_BUFSIZE(handshake_b64_len));
goto out;
}
int handshake_len = base64url_decode((uint8_t *)handshake_b64,
handshake_b64_len, (uint8_t *)handshake,
DECODED_BUFSIZE(handshake_b64_len));
if (handshake_len == 0) {
fprintf(stderr, "failed to decode handshake in path %s\n", path);
goto out;
}
if (handshake_len < 1 + GLOME_MAX_PUBLIC_KEY_LENGTH ||
handshake_len > 1 + GLOME_MAX_PUBLIC_KEY_LENGTH + GLOME_MAX_TAG_LENGTH) {
fprintf(stderr, "handshake size is invalid in path %s\n", path);
goto out;
}
if ((handshake[0] & 0x80) != 0) {
fprintf(stderr,
"only \"service-key-indicator\" prefix type is supported\n");
goto out;
}
uint8_t peer_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
memcpy(peer_key, handshake + 1, GLOME_MAX_PUBLIC_KEY_LENGTH);
uint8_t tag[GLOME_MAX_TAG_LENGTH] = {0};
if (get_authcode(host, action, peer_key, private_key, tag)) {
fprintf(stderr, "MAC authcode generation failed\n");
goto out;
}
if (CRYPTO_memcmp(handshake + 1 + GLOME_MAX_PUBLIC_KEY_LENGTH, tag,
handshake_len - 1 - GLOME_MAX_PUBLIC_KEY_LENGTH) != 0) {
fprintf(
stderr,
"The challenge includes a message tag prefix which does not match the "
"message\n");
goto out;
}
if (get_msg_tag(host, action, peer_key, private_key, tag)) {
fprintf(stderr, "GLOME tag generation failed\n");
goto out;
}
char tag_encoded[ENCODED_BUFSIZE(sizeof tag)] = {0};
if (base64url_encode(tag, sizeof tag, (uint8_t *)tag_encoded,
sizeof tag_encoded) == 0) {
fprintf(stderr, "GLOME tag encode failed\n");
goto out;
}
puts(tag_encoded);
ret = EXIT_SUCCESS;
out:
free(handshake);
free(host);
free(handshake_b64);
free(host_esc);
free(action);
return ret;
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "crypto.h"
#include <glib.h>
#include <glome.h>
#include <stdio.h>
#include <string.h>
#include "base64.h"
#include "login.h"
static void test_derive() {
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t expected_public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
decode_hex(
private_key, sizeof private_key,
"<KEY>");
decode_hex(
expected_public_key, sizeof expected_public_key,
"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a");
g_assert_true(derive_or_generate_key(private_key, public_key) == 0);
g_assert_cmpmem(expected_public_key, sizeof expected_public_key, public_key,
sizeof public_key);
}
static void test_generate() {
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t empty_public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t empty_private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
g_assert_true(derive_or_generate_key(private_key, public_key) == 0);
g_assert_true(memcmp(empty_public_key, public_key, sizeof empty_public_key));
g_assert_true(
memcmp(empty_private_key, private_key, sizeof empty_private_key));
}
static void test_authcode() {
const char* host_id = "serial-number:1234567890=ABCDFGH/#?";
const char* action = "reboot";
uint8_t service_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
decode_hex(
private_key, sizeof private_key,
"fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead");
decode_hex(
service_key, sizeof service_key,
"d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647");
uint8_t authcode[GLOME_MAX_TAG_LENGTH];
uint8_t expected_authcode[GLOME_MAX_TAG_LENGTH];
decode_hex(
expected_authcode, sizeof expected_authcode,
"a7c33f0542a3ef35c154cd8995084d605c6ce09f83cf1440a6cf3765a343aae6");
g_assert_true(
get_authcode(host_id, action, service_key, private_key, authcode) == 0);
g_assert_cmpmem(expected_authcode, sizeof expected_authcode, authcode,
sizeof authcode);
}
static void test_msg_tag() {
const char* host_id = "serial-number:1234567890=ABCDFGH/#?";
const char* action = "reboot";
uint8_t service_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
decode_hex(
private_key, sizeof private_key,
"fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead");
decode_hex(
service_key, sizeof service_key,
"d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647");
uint8_t msg_tag[GLOME_MAX_TAG_LENGTH];
uint8_t expected_msg_tag[GLOME_MAX_TAG_LENGTH];
decode_hex(
expected_msg_tag, sizeof expected_msg_tag,
"dff5aae753a8bdce06038a20adcdb26c7be19cb6bd05a7850fae542f4af29720");
g_assert_true(
get_msg_tag(host_id, action, service_key, private_key, msg_tag) == 0);
g_assert_cmpmem(expected_msg_tag, sizeof expected_msg_tag, msg_tag,
sizeof msg_tag);
}
int main(int argc, char** argv) {
g_test_init(&argc, &argv, NULL);
g_test_add_func("/test-derive", test_derive);
g_test_add_func("/test-generate", test_generate);
g_test_add_func("/test-authcode", test_authcode);
g_test_add_func("/test-msg-tag", test_msg_tag);
return g_test_run();
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glome.h"
#include <assert.h>
#include <openssl/crypto.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/opensslv.h>
#include <openssl/sha.h>
#include <string.h>
#define X25519_SHARED_KEY_LEN 32
int glome_generate_key(uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH]) {
size_t public_key_len = GLOME_MAX_PUBLIC_KEY_LENGTH;
size_t private_key_len = GLOME_MAX_PRIVATE_KEY_LENGTH;
EVP_PKEY *pkey = NULL;
EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_X25519, NULL);
int err =
(ctx == NULL || EVP_PKEY_keygen_init(ctx) != 1 ||
EVP_PKEY_keygen(ctx, &pkey) != 1 ||
EVP_PKEY_get_raw_public_key(pkey, public_key, &public_key_len) != 1 ||
public_key_len != GLOME_MAX_PUBLIC_KEY_LENGTH ||
EVP_PKEY_get_raw_private_key(pkey, private_key, &private_key_len) != 1 ||
private_key_len != GLOME_MAX_PRIVATE_KEY_LENGTH);
EVP_PKEY_CTX_free(ctx);
EVP_PKEY_free(pkey);
return err;
}
int glome_derive_key(const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH]) {
size_t public_key_length = GLOME_MAX_PUBLIC_KEY_LENGTH;
EVP_PKEY *pkey = EVP_PKEY_new_raw_private_key(
EVP_PKEY_X25519, NULL, private_key, GLOME_MAX_PRIVATE_KEY_LENGTH);
int err =
(pkey == NULL ||
EVP_PKEY_get_raw_public_key(pkey, public_key, &public_key_length) != 1 ||
public_key_length != GLOME_MAX_PUBLIC_KEY_LENGTH);
EVP_PKEY_free(pkey);
return err;
}
int glome_tag(bool verify, unsigned char counter,
const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
const uint8_t peer_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
const uint8_t *message, size_t message_len,
uint8_t tag[GLOME_MAX_TAG_LENGTH]) {
uint8_t hmac_key[X25519_SHARED_KEY_LEN + 2 * GLOME_MAX_PUBLIC_KEY_LENGTH] = {
0};
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
EVP_PKEY *evp_peer_key = EVP_PKEY_new_raw_public_key(
EVP_PKEY_X25519, NULL, peer_key, GLOME_MAX_PUBLIC_KEY_LENGTH);
EVP_PKEY *evp_private_key = EVP_PKEY_new_raw_private_key(
EVP_PKEY_X25519, NULL, private_key, GLOME_MAX_PRIVATE_KEY_LENGTH);
if (evp_private_key == NULL || evp_peer_key == NULL) {
EVP_PKEY_free(evp_peer_key);
EVP_PKEY_free(evp_private_key);
return 1;
}
EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(evp_private_key, NULL);
if (ctx == NULL) {
EVP_PKEY_free(evp_peer_key);
EVP_PKEY_free(evp_private_key);
return 1;
}
// Derive public key.
size_t public_key_length = GLOME_MAX_PUBLIC_KEY_LENGTH;
int err = (EVP_PKEY_get_raw_public_key(evp_private_key, public_key,
&public_key_length) != 1 ||
public_key_length != GLOME_MAX_PUBLIC_KEY_LENGTH);
if (err) {
EVP_PKEY_free(evp_peer_key);
EVP_PKEY_free(evp_private_key);
return 1;
}
// X25519 shared secret
size_t shared_key_length = X25519_SHARED_KEY_LEN;
err = (EVP_PKEY_derive_init(ctx) != 1 ||
EVP_PKEY_derive_set_peer(ctx, evp_peer_key) != 1 ||
EVP_PKEY_derive(ctx, hmac_key, &shared_key_length) != 1 ||
shared_key_length != X25519_SHARED_KEY_LEN);
EVP_PKEY_CTX_free(ctx);
EVP_PKEY_free(evp_peer_key);
EVP_PKEY_free(evp_private_key);
if (err) {
return 1;
}
// hmac_key := (sharded_key | verifier_key | signer_key)
memcpy(hmac_key + X25519_SHARED_KEY_LEN, (verify ? public_key : peer_key),
GLOME_MAX_PUBLIC_KEY_LENGTH);
memcpy(hmac_key + X25519_SHARED_KEY_LEN + GLOME_MAX_PUBLIC_KEY_LENGTH,
(verify ? peer_key : public_key), GLOME_MAX_PUBLIC_KEY_LENGTH);
// data := (counter | message)
size_t data_len = message_len + sizeof counter;
uint8_t *data = malloc(data_len);
if (data == NULL) {
return 1;
}
memcpy(data, &counter, sizeof counter);
memcpy(data + sizeof counter, message, message_len);
unsigned int tag_length = GLOME_MAX_TAG_LENGTH;
int success = (HMAC(EVP_sha256(), hmac_key, sizeof hmac_key, data, data_len,
tag, &tag_length) &&
tag_length == GLOME_MAX_TAG_LENGTH);
free(data);
return success ? 0 : 1;
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CLI_COMMANDS_H_
#define CLI_COMMANDS_H_
// Generates a new key and writes it to stdout.
int genkey(int argc, char **argv);
// Reads a private key from stdin and writes the corresponding public key to
// stdout.
int pubkey(int argc, char **argv);
// Tags a message and writes it to stdout.
int tag(int argc, char **argv);
// Returns 0 iff the tag could be verified.
int verify(int argc, char **argv);
// Generates a tag for a glome-login challenge and writes it to stdout.
int login(int argc, char **argv);
#endif // CLI_COMMANDS_H_
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// For vsyslog
#define _BSD_SOURCE
#define _DEFAULT_SOURCE
#include "login.h"
#include <assert.h>
#include <errno.h>
#include <glome.h>
#include <limits.h>
#include <netdb.h>
#include <openssl/crypto.h>
#include <signal.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <syslog.h>
#include <time.h>
#include <unistd.h>
#include "base64.h"
#include "crypto.h"
#include "ui.h"
#define PROMPT "> "
#define DMI_UUID_PATH "/sys/class/dmi/id/product_uuid"
#define DMI_UUID_SIZE 36
#define UNUSED(var) (void)(var)
static int get_hostname(char* buf, size_t buflen) {
if (gethostname(buf, buflen) != 0) {
return -1;
}
buf[buflen - 1] = '\0';
// Regular hostname is likely fully qualified, so stop here and return it.
if (strchr(buf, '.') != NULL) {
return 0;
}
// Retry using getaddrinfo to get an FQDN.
struct addrinfo* res = NULL;
struct addrinfo hints;
memset(&hints, 0, sizeof(struct addrinfo));
hints.ai_socktype = SOCK_DGRAM;
hints.ai_flags = AI_CANONNAME;
int ret;
if ((ret = getaddrinfo(buf, NULL, &hints, &res)) != 0) {
return -1;
}
strncpy(buf, res->ai_canonname, buflen - 1);
buf[buflen - 1] = '\0';
freeaddrinfo(res);
return 0;
}
int failure(int code, const char** error_tag, const char* message) {
if (error_tag != NULL && *error_tag == NULL) {
*error_tag = message;
}
return code;
}
int get_machine_id(char* buf, size_t buflen, const char** error_tag) {
if (get_hostname(buf, buflen) == 0) {
return 0;
}
if (DMI_UUID_SIZE + 1 > buflen) {
return failure(EXITCODE_PANIC, error_tag, "dmi-uuid-size");
}
FILE* fd;
fd = fopen(DMI_UUID_PATH, "r");
if (fd != NULL) {
errorf("Unable to obtain hostname. Using DMI UUID instead.\n");
if (fread(buf, DMI_UUID_SIZE, 1, fd) == 1) {
buf[DMI_UUID_SIZE] = '\0';
fclose(fd);
return 0;
}
errorf("ERROR reading DMI product UUID (eof=%d, err=%d)\n", feof(fd),
ferror(fd));
fclose(fd);
} else {
perror("ERROR opening DMI product UUID file");
}
return -1;
}
void timeout_handler(int sig) {
UNUSED(sig);
errorf("Timed out while waiting for user input.\n");
exit(EXITCODE_TIMEOUT);
}
int shell_action(const char* user, char** action, size_t* action_len,
const char** error_tag) {
size_t buf_len = strlen("shell/") + strlen(user) + 1;
char* buf = calloc(buf_len, 1);
if (buf == NULL) {
return failure(EXITCODE_PANIC, error_tag, "message-calloc-error");
}
int ret = snprintf(buf, buf_len, "shell/%s", user);
if (ret < 0) {
free(buf);
return failure(EXITCODE_PANIC, error_tag, "message-sprintf-error");
}
if ((size_t)ret >= buf_len) {
free(buf);
return failure(EXITCODE_PANIC, error_tag, "message-sprintf-trunc");
}
*action = buf;
*action_len = buf_len;
return 0;
}
char* escape_host(const char* host) {
size_t host_len = strlen(host);
char *ret = malloc(host_len * 3 + 1), *ret_end = ret;
if (ret == NULL) {
return NULL;
}
// Only /, ?, and # would be problematic given our URL encoding
for (size_t i = 0; i < host_len; ++i) {
if (host[i] == '/' || host[i] == '?' || host[i] == '#') {
ret_end += snprintf(ret_end, 3 + 1, "%%%02X", host[i]);
} else {
ret_end[0] = host[i];
ret_end += 1;
}
}
ret_end[0] = '\0';
return ret;
}
int request_challenge(const uint8_t service_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
int service_key_id,
const uint8_t public_key[PUBLIC_KEY_LENGTH],
const char* host_id, const char* action,
const uint8_t prefix_tag[GLOME_MAX_TAG_LENGTH],
size_t prefix_tag_len, char** challenge,
int* challenge_len, const char** error_tag) {
if (prefix_tag_len > GLOME_MAX_TAG_LENGTH) {
return failure(EXITCODE_PANIC, error_tag, "prefix-tag-too-large");
}
// glome-handshake := base64url(
// <prefix-type>
// <prefix7>
// <eph-key>
// [<prefixN>]
//)
uint8_t handshake[PUBLIC_KEY_LENGTH + 1 + GLOME_MAX_TAG_LENGTH] = {0};
size_t handshake_len = PUBLIC_KEY_LENGTH + 1 + prefix_tag_len;
if (service_key_id == 0) {
// If no key ID was specified, send the first key byte as the ID.
handshake[0] = service_key[0] & 0x7f;
} else {
handshake[0] = service_key_id & 0x7f;
}
memcpy(handshake + 1, public_key, PUBLIC_KEY_LENGTH);
if (prefix_tag_len > 0) {
memcpy(handshake + PUBLIC_KEY_LENGTH + 1, prefix_tag, prefix_tag_len);
}
char handshake_encoded[ENCODED_BUFSIZE(sizeof handshake)] = {0};
if (!base64url_encode(handshake, handshake_len, (uint8_t*)handshake_encoded,
sizeof handshake_encoded)) {
return failure(EXITCODE_PANIC, error_tag, "handshake-encode");
}
char* host_id_escaped = escape_host(host_id);
if (host_id_escaped == NULL) {
return failure(EXITCODE_PANIC, error_tag, "host-id-malloc-error");
}
int len = strlen("v1/") + strlen(handshake_encoded) + 1 +
strlen(host_id_escaped) + 1 + strlen(action) + 2;
char* buf = malloc(len);
if (buf == NULL) {
free(host_id_escaped);
return failure(EXITCODE_PANIC, error_tag, "challenge-malloc-error");
}
int ret = snprintf(buf, len, "v1/%s/%s/%s/", handshake_encoded,
host_id_escaped, action);
free(host_id_escaped);
host_id_escaped = NULL;
if (ret < 0) {
free(buf);
return failure(EXITCODE_PANIC, error_tag, "challenge-sprintf-error");
}
if (ret >= len) {
free(buf);
return failure(EXITCODE_PANIC, error_tag, "challenge-sprintf-trunc");
}
*challenge = buf;
*challenge_len = len;
return 0;
}
#ifndef PAM_GLOME
void login_error(glome_login_config_t* config, pam_handle_t* pamh,
const char* format, ...) {
UNUSED(config);
UNUSED(pamh);
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
fflush(NULL);
}
void login_syslog(glome_login_config_t* config, pam_handle_t* pamh,
int priority, const char* format, ...) {
UNUSED(pamh);
if (config->options & SYSLOG) {
va_list args;
va_start(args, format);
vsyslog(priority, format, args);
va_end(args);
}
}
// read_stdin reads characters from stdin into buf. It returns:
// -1, if it encounters an error while reading
// -2, if it encounters invalid characters in the input
// (buflen-1) if it read buflen-1 characters
// <(buflen-1), if a newline was read before the buffer was full
// If the return value is >=0, the buf is NULL-terminated.
// Additionally, stdin is always advanced up to a newline (or EOF)
// to prevent excess input from being read by a future shell process.
static int read_stdin(char* buf, int buflen) {
// Return error if we got no characters.
if (fgets(buf, buflen, stdin) == NULL) {
perror("ERROR when reading from stdin");
return -1;
}
bool newline = false;
int len = strlen(buf);
if (buf[len - 1] == '\n') {
newline = true;
buf[len - 1] = '\0';
len--;
}
// Return error if we got a non-printable character.
for (int i = 0; i < len; i++) {
if (buf[i] < 0x20 || buf[i] > 0x7e) {
errorf("ERROR invalid characters read from stdin\n");
return -2;
}
}
// Read stdin until a newline to avoid passing junk to shell.
if (!newline) {
for (int c = 0; c != EOF && c != '\n'; c = fgetc(stdin)) {
}
}
return len; // Number of characters in the buffer without the NULL byte.
}
static void print_hex(const uint8_t* buf, size_t len) {
for (size_t i = 0; i < len; i++) {
errorf("%02x", buf[i]);
}
errorf("\n");
}
int login_prompt(glome_login_config_t* config, pam_handle_t* pamh,
const char** error_tag, const char* message, char* input,
size_t input_size) {
UNUSED(pamh);
UNUSED(error_tag);
puts(message);
fputs(PROMPT, stdout);
fflush(NULL);
if (config->input_timeout_sec) {
struct sigaction action = {.sa_handler = &timeout_handler};
if (sigaction(SIGALRM, &action, NULL) < 0) {
perror("error while setting up the handler");
// Continue nonetheless as the handler is not critical.
}
// Set an alarm to prevent waiting for the code indefinitely.
alarm(config->input_timeout_sec);
}
int bytes_read = read_stdin(input, input_size);
// Cancel any pending alarms.
alarm(0);
if (bytes_read < 0) {
return EXITCODE_IO_ERROR;
}
if (config->options & VERBOSE) {
errorf("debug: stdin: ");
print_hex((uint8_t*)input, bytes_read);
}
return 0;
}
#endif
int login_authenticate(glome_login_config_t* config, pam_handle_t* pamh,
const char** error_tag) {
if (is_zeroed(config->service_key, sizeof config->service_key)) {
return failure(EXITCODE_PANIC, error_tag, "no-service-key");
}
uint8_t public_key[PUBLIC_KEY_LENGTH] = {0};
if (derive_or_generate_key(config->secret_key, public_key)) {
return failure(EXITCODE_PANIC, error_tag, "derive-or-generate-key");
}
char* host_id = NULL;
if (config->host_id != NULL) {
host_id = strdup(config->host_id);
if (host_id == NULL) {
return failure(EXITCODE_PANIC, error_tag, "malloc-host-id");
}
} else {
host_id = calloc(HOST_NAME_MAX + 1, 1);
if (host_id == NULL) {
return failure(EXITCODE_PANIC, error_tag, "malloc-host-id");
}
if (get_machine_id(host_id, HOST_NAME_MAX + 1, error_tag) < 0) {
return failure(EXITCODE_PANIC, error_tag, "get-machine-id");
}
}
if (config->host_id_type != NULL) {
size_t host_id_len = strlen(config->host_id_type) + 1 + strlen(host_id) + 1;
char* host_id_full = calloc(host_id_len, 1);
if (host_id_full == NULL) {
return failure(EXITCODE_PANIC, error_tag, "malloc-host-id-full");
}
int ret = snprintf(host_id_full, host_id_len, "%s:%s", config->host_id_type,
host_id);
if (ret < 0) {
free(host_id_full);
return failure(EXITCODE_PANIC, error_tag, "generate-host-id-full");
}
if ((size_t)ret >= host_id_len) {
free(host_id_full);
return failure(EXITCODE_PANIC, error_tag, "generate-host-id-full");
}
free(host_id);
host_id = host_id_full;
}
char* action = NULL;
size_t action_len = 0;
if (shell_action(config->username, &action, &action_len, error_tag)) {
free(host_id);
return EXITCODE_PANIC;
}
if (config->options & VERBOSE) {
login_syslog(config, pamh, LOG_DEBUG, "host ID: %s, action: %s", host_id,
action);
}
uint8_t authcode[GLOME_MAX_TAG_LENGTH];
if (get_authcode(host_id, action, config->service_key, config->secret_key,
authcode)) {
free(host_id);
free(action);
return failure(EXITCODE_PANIC, error_tag, "get-authcode");
}
char* challenge = NULL;
int challenge_len = 0;
if (request_challenge(config->service_key, config->service_key_id, public_key,
host_id, action, /*prefix_tag=*/NULL,
/*prefix_tag_len=*/0, &challenge, &challenge_len,
error_tag)) {
free(host_id);
free(action);
return EXITCODE_PANIC;
}
free(host_id);
host_id = NULL;
free(action);
action = NULL;
const char* prompt = "";
if (config->prompt != NULL) {
prompt = config->prompt;
}
size_t message_len = strlen(prompt) + strlen(challenge) + 1;
char* message = malloc(message_len);
if (message == NULL) {
free(challenge);
return failure(EXITCODE_PANIC, error_tag, "malloc-message");
}
message[0] = '\0'; // required by strncat()
strncat(message, prompt, message_len - 1);
strncat(message, challenge, message_len - strlen(message) - 1);
free(challenge);
challenge = NULL;
if (message[message_len - 1] != '\0') {
free(message);
return failure(EXITCODE_PANIC, error_tag, "strncat-failure");
}
char input[ENCODED_BUFSIZE(GLOME_MAX_TAG_LENGTH)];
int rc = login_prompt(config, pamh, error_tag, message, input, sizeof(input));
free(message);
message = NULL;
if (rc != 0) {
return rc;
}
int bytes_read = strlen(input);
if (config->options & INSECURE) {
login_syslog(config, pamh, LOG_DEBUG, "user input: %s", input);
}
// Calculate the correct authcode.
char authcode_encoded[ENCODED_BUFSIZE(sizeof authcode)] = {0};
if (base64url_encode(authcode, sizeof authcode, (uint8_t*)authcode_encoded,
sizeof authcode_encoded) == 0) {
return failure(EXITCODE_PANIC, error_tag, "authcode-encode");
}
if (config->options & INSECURE) {
login_syslog(config, pamh, LOG_DEBUG, "expect input: %s", authcode_encoded);
}
size_t min_len = MIN_ENCODED_AUTHCODE_LEN;
if (config->min_authcode_len > min_len) {
if (config->min_authcode_len > strlen(authcode_encoded)) {
login_syslog(config, pamh, LOG_INFO,
"minimum authcode too long: %d bytes (%s)",
config->min_authcode_len, config->username);
login_error(config, pamh,
"Minimum input too long: expected at most %d characters.\n",
config->min_authcode_len);
return failure(EXITCODE_INVALID_INPUT_SIZE, error_tag, "authcode-length");
}
min_len = config->min_authcode_len;
}
if ((size_t)bytes_read < min_len) {
login_syslog(config, pamh, LOG_INFO, "authcode too short: %d bytes (%s)",
bytes_read, config->username);
login_error(config, pamh,
"Input too short: expected at least %d characters, got %d.\n",
min_len, bytes_read);
return failure(EXITCODE_INVALID_INPUT_SIZE, error_tag, "authcode-length");
}
if ((size_t)bytes_read > strlen(authcode_encoded)) {
login_syslog(config, pamh, LOG_INFO, "authcode too long: %d bytes (%s)",
bytes_read, config->username);
login_error(config, pamh,
"Input too long: expected at most %zu characters, got %d.\n",
strlen(authcode_encoded), bytes_read);
return failure(EXITCODE_INVALID_INPUT_SIZE, error_tag, "authcode-length");
}
// Since we use (relatively) short auth codes, sleep before confirming the
// result to prevent bruteforcing.
if (config->auth_delay_sec) {
struct timespec delay;
delay.tv_sec = (time_t)config->auth_delay_sec;
delay.tv_nsec = 0;
if (nanosleep(&delay, NULL) != 0) {
login_error(config, pamh, "interrupted sleep: %s", strerror(errno));
return failure(EXITCODE_INTERRUPTED, error_tag, "sleep-interrupted");
}
}
if (CRYPTO_memcmp(input, authcode_encoded, bytes_read) != 0) {
login_syslog(config, pamh, LOG_WARNING, "authcode rejected (%s)",
config->username);
login_error(config, pamh, "Invalid authorization code.\n");
return failure(EXITCODE_INVALID_AUTHCODE, error_tag, "authcode-invalid");
}
return 0;
}
int login_run(glome_login_config_t* config, const char** error_tag) {
assert(config != NULL);
if (config->options & VERBOSE) {
errorf(
"debug: options: 0x%x\n"
"debug: username: %s\n"
"debug: login: %s\n"
"debug: auth delay: %d seconds\n",
config->options, config->username, config->login_path,
config->auth_delay_sec);
}
if (config->options & SYSLOG) {
openlog("glome-login", LOG_PID | LOG_CONS, LOG_AUTH);
}
int r = login_authenticate(config, NULL, error_tag);
if (r != 0) {
return r;
}
if (config->options & SYSLOG) {
syslog(LOG_WARNING, "authcode accepted (%s)", config->username);
}
puts("Authorization code: OK");
fflush(NULL);
execl(config->login_path, config->login_path, "-f", config->username,
(char*)NULL);
perror("ERROR while executing login");
return failure(EXITCODE_PANIC, error_tag, "login-exec");
}
<file_sep>module github.com/google/glome/go
go 1.15
require golang.org/x/crypto v0.1.0
<file_sep>// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "config.h"
#include <glib.h>
#include <stdio.h>
#include "ui.h"
static const char* ENCODED_PUBLIC_KEY =
"<KEY>;
static const uint8_t DECODED_PUBLIC_KEY[32] = {
0x6a, 0xa0, 0x3d, 0xca, 0xa7, 0xb5, 0x45, 0x7a, 0x0e, 0x4f, 0xa1,
0xeb, 0x98, 0x26, 0xc5, 0xe3, 0x4c, 0x15, 0x52, 0x16, 0x29, 0xe7,
0x41, 0x58, 0x65, 0x1f, 0x6a, 0xf3, 0xf5, 0xf9, 0x28, 0x5e};
static void test_parse_public_key() {
uint8_t decoded[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
g_assert_true(glome_login_parse_public_key(ENCODED_PUBLIC_KEY, decoded,
sizeof(decoded)));
g_assert_cmpmem(decoded, sizeof(decoded), DECODED_PUBLIC_KEY,
sizeof(DECODED_PUBLIC_KEY));
g_assert_false(glome_login_parse_public_key(ENCODED_PUBLIC_KEY, decoded,
sizeof(decoded) - 1));
g_assert_false(glome_login_parse_public_key(
"<KEY> decoded,
sizeof(decoded)));
g_assert_false(glome_login_parse_public_key("<KEY>", decoded,
sizeof(decoded)));
memset(decoded, 0, sizeof(decoded));
const char* extra_chars =
"glome-v1 \t aqA9yqe1RXoOT6HrmCbF40wVUhYp50FYZR9q8_X5KF4= "
"root@localhost";
g_assert_true(
glome_login_parse_public_key(extra_chars, decoded, sizeof(decoded)));
g_assert_cmpmem(decoded, sizeof(decoded), DECODED_PUBLIC_KEY,
sizeof(DECODED_PUBLIC_KEY));
}
static char* EXAMPLE_CFG = NULL;
static void test_parse_config_file() {
g_assert_true(EXAMPLE_CFG != NULL);
glome_login_config_t config = {0};
default_config(&config);
config.config_path = EXAMPLE_CFG;
status_t s = glome_login_parse_config_file(&config);
if (s) {
fprintf(stderr, "glome_login_parse_config_file returned error: %s\n", s);
}
g_assert_true(s == STATUS_OK);
g_assert_true(config.auth_delay_sec == 7);
g_assert_true(config.min_authcode_len == 15);
g_assert_true(config.input_timeout_sec == 321);
g_assert_cmpstr("/bin/true", ==, config.login_path);
g_assert_cmpstr("my-host", ==, config.host_id);
g_assert_cmpstr("hostname", ==, config.host_id_type);
g_assert_true(config.options & VERBOSE);
g_assert_false(config.options & SYSLOG);
g_assert_false(config.options & INSECURE);
g_assert_cmpmem(DECODED_PUBLIC_KEY, sizeof(DECODED_PUBLIC_KEY),
config.service_key, GLOME_MAX_PUBLIC_KEY_LENGTH);
g_assert_true(config.service_key_id == 42);
g_assert_cmpstr("glome://", ==, config.prompt);
}
int main(int argc, char** argv) {
g_test_init(&argc, &argv, NULL);
g_assert_true(argc > 1);
EXAMPLE_CFG = argv[1];
g_test_add_func("/test-parse-public-key", test_parse_public_key);
g_test_add_func("/test-parse-config-file", test_parse_config_file);
return g_test_run();
}
<file_sep>---
authors: <NAME> (@burgerdev)
state: committed
---
# RFD 001: GLOME Login v2
## Objective
Make the GLOME Login Protocol unambiguous.
## Background
See also [google/glome#62](https://github.com/google/glome/issues/62).
- The ambiguous interpretation of `prefix7` may lead to a change of server authorization behaviour that cannot be controlled by the client (e.g. a new key is added to the server whose index conflicts with a public key prefix of an existing one).
- It's currently legal to have colon (`:`) and slash (`/`) characters in all message fields, which may cause ambiguity in parsing and, ultimately, lead to authorization of unintended messages.
- The protocol gives advice to "maximize the human readability of the URL", which conflicts with an unambiguous presentation of said characters in percent-encoded form.
## Requirements
- There must be a well-defined interpretation of the GLOME Login handshake
that does not depend on the public keys a server holds.
- There must be a well-defined, bijective conversion from the message embedded
in a GLOME Login URL to the message being authorized.
- Subject to the preceding requirements, the URL layout should be optimized for
human readability (e.g. don't encode
[unreserved characters](https://www.rfc-editor.org/rfc/rfc3986#section-2.3))
and brevity.
- Assuming humans will have to read the message to be authorized much more often than parse the involved keys.
## Design ideas
- The `prefix-type` bit determines interpretation of `prefix7`:
* 0: `prefix7` is matched with the high byte of the server's public key
* 1: `prefix7` is an index into the server's public keys.
- The GLOME Login challenge is a URI path.
- Completely specify the encoding and decoding of the message part.
- Include detailed instructions for server and client into the protocol.
- Publish the result as GLOME Login v2, as it is incompatible with v1 URLs.
### `prefix7`
The most significant bit of a 256bit X25519 public key should not be interpreted by the Diffie-Hellman key exchange [RFC7748]. We use this fact to define a `prefix7` config that is somewhat self-configuring: `prefix-type` is 0, `prefix7` is the high byte of the server's public key, and thus the 8bit prefix is, too. Alternatively, if indices are to be used, `prefix-type` is 1 and `prefix7` is the index between 0 and 127, inclusive. Note that this is a large amount of public keys, even in case of automatic rotation - if this is a concern, `prefixN` can be used to verify (or pick) the public key on the server side.
Note that this is incompatible with _all_ subsets of v1: indices need to be taken `mod 128`, and public key prefixes are now taken from the MSB, not the LSB.
[RFC7748]: https://www.rfc-editor.org/rfc/rfc7748#section-5
### URI
Prior versions of GLOME assumed that the challenge would always be rendered as a URL. This is not true in many cases: for example, a URL challenge does not make too much sense for a response generated with the `glome` cli. On the other hand, presenting the challenge as a URL works reasonably well in practice, so we don't want to change the challenge format in an incompatible way. Thus, a challenge in v2 is what used to be the URL path in v1.
```abnf
challenge = "v2/" handshake-segment "/" message "/"
```
A URI path can still be prefixed with scheme and host to build a URL. Subsequent sections describe how a challenge is encoded to a valid URI path and how to compute the tag over that encoding.
### Message
New restrictions to make message encoding unambiguous:
- `hostid-type` and `hostid` must not contain the `:` character.
- `hostid-type`, `hostid` and `action` should not contain any characters that would be escaped in a URI path segment (as detailed below). Differing from previous protocol versions, `/` is discouraged.
#### URI Path Segments
The URI specification [RFC 3986](https://www.ietf.org/rfc/rfc3986.html#section-3.3) defines a path segment as
```abnf
segment = *pchar
pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
pct-encoded = "%" HEXDIG HEXDIG
```
where HEXDIG should refer to a digit or an uppercase letter A-F. This matches the definition in <https://url.spec.whatwg.org/#url-path-segment-string>, which supposedly supersedes the RFC.
Thus, define `EscapePathSegment` as a function that escapes all characters that are not unreserved, sub delimiters, `:` or `@`. See the Appendix for how this function can be implemented in some of the major programming languages.
#### Message Encoding
Constructing a message takes three parameters: `hostid`, `action` and (optionally) `hostid-type`. These are encoded into a `message` - a URI (sub)path - using `EscapePathSegment` as follows.
```abnf
message = host-segment "/" action-segment
host-segment = EscapePathSegment( [hostid-type ":"] hostid )
action-segment = EscapePathSegment(action)
```
The `hostid-type` prefix is added if and only if the `hostid-type` of the message is not empty.
Note that this voids some of the existing recommendations for 'good' actions: `shell/root`, for example, would have to be escaped and thus be less readable. Instead, using URI sub-delimiters as in `shell=root` should be recommended. This format would interact nicely with a host-identity-based authorization scheme working with key-value pairs.
#### Message Decoding
Given a URI path, strip the path prefix up to including the `/` after the handshake message. Split the remaining string on the character `/` and keep only the first and second element, denoted `host-segment` and `action-segment`; or fail if there are less than two elements. Replace all percent-encoded octets in the `host-segment` with their raw, unencoded form. Split the result at the character `:`. If there is one element, assign that element to `hostid` and assign the literal string `hostname` to `hostid-type`; if there are two elements assign the first one to `hostid-type` and the second to `hostid`; if there are more than two elements, fail. Replace all percent-encoded octets in the `action-segment` with their raw, unencoded form, and assign the result to `action`.
#### Message Tagging
The tag for a message is produced by passing the **encoded message** string into `glome_tag`.
## Alternatives considered
### Allow unescaped slashes in action
- Allow an action to span more than one path segment.
- This prevents us from having an unambiguous encoding: `xxx/yyy%2Fzzz` vs. `xxx/yyy/zzz`.
### Calculate the tag on the unescaped message
- Tag the message before URL escaping.
- This would have the benefit of decoupling the tagging from the transport (here, URL segments).
- However, we need to encode the message into a byte array before we can tag it. This encoding must be unambiguous as well, simply concatenating the triple won't cut it.
## Appendix
### URI Path Escaping APIs
#### Python
```python
urllib.parse.quote(segment, safe=":@!$&'()*+,;=")
```
#### Golang
:'-( <https://github.com/golang/go/issues/27559>
#### C
GLib:
```c
g_uri_escape_string(segment, ":@!$&'()*+,;=", /*allow_utf8=*/false);
```
#### Java
Guava:
```java
com.google.common.net.UrlEscapers.urlPathSegmentEscaper().escape(segment)
```
#### OCaml
Uri:
```ocaml
Uri.pct_encode segment
```
<file_sep>#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that implements unittests cases for AutoGlome Class.
"""
import unittest
from test import test_vectors
from cryptography.hazmat.primitives.asymmetric import x25519
import pyglome
class AutoGlomeTestVector:
"""Class that encapsulate needed components for testing AutoGlome Class."""
def __init__(self, test_vector, min_peer_tag_len, skippable_range):
self.data = test_vector
self.skippable_range = skippable_range
peer_key = x25519.X25519PublicKey.from_public_bytes(self.data.kb)
my_key = x25519.X25519PrivateKey.from_private_bytes(self.data.kap)
self.sender_glome = pyglome.AutoGlome(peer_key,
my_key,
min_peer_tag_len=min_peer_tag_len,
skippable_range=skippable_range)
peer_key = x25519.X25519PublicKey.from_public_bytes(self.data.ka)
my_key = x25519.X25519PrivateKey.from_private_bytes(self.data.kbp)
self.receiver_glome = pyglome.AutoGlome(
peer_key,
my_key,
min_peer_tag_len=min_peer_tag_len,
skippable_range=skippable_range)
class AutoGlomeTestBase:
"""Test keys constructions, tag generation and tag checking for AutoGlome."""
def __init__(self):
self.test_vector = None
def test_check_counters_raise_exceptions_when_incorrect(self):
test_vector = self.test_vector
with self.assertRaises(ValueError):
test_vector.sender_glome.sending_counter = -1
with self.assertRaises(ValueError):
test_vector.receiver_glome.sending_counter = 256
with self.assertRaises(ValueError):
test_vector.sender_glome.receiving_counter = 280
with self.assertRaises(ValueError):
test_vector.receiver_glome.receiving_counter = 280
def test_check_counters_dont_raise_exceptions_when_correct(self):
test_vector = self.test_vector
try:
test_vector.sender_glome.sending_counter = 0
test_vector.receiver_glome.sending_counter = 23
test_vector.sender_glome.receiving_counter = 123
test_vector.receiver_glome.receiving_counter = 255
except ValueError:
self.fail('properties raised ValueError unexpectedly!')
def test_check_counters_are_correctly_set(self):
test_vector = self.test_vector
test_vector.sender_glome.sending_counter = 0
self.assertEqual(test_vector.sender_glome.sending_counter, 0)
test_vector.receiver_glome.sending_counter = 23
self.assertEqual(test_vector.receiver_glome.sending_counter, 23)
test_vector.sender_glome.receiving_counter = 123
self.assertEqual(test_vector.sender_glome.receiving_counter, 123)
test_vector.receiver_glome.receiving_counter = 255
self.assertEqual(test_vector.receiver_glome.receiving_counter, 255)
def test_tag(self):
test_vector = self.test_vector
test_vector.sender_glome.sending_counter = test_vector.data.counter
self.assertEqual(test_vector.sender_glome.tag(test_vector.data.msg),
test_vector.data.tag)
def test_skippable_range(self):
test_vector = self.test_vector
try:
test_vector.receiver_glome.receiving_counter = (
test_vector.data.counter - test_vector.skippable_range) % 256
test_vector.receiver_glome.check(test_vector.data.tag,
msg=test_vector.data.msg)
self.assertEqual((test_vector.data.counter + 1) % 256,
test_vector.receiver_glome.receiving_counter)
except pyglome.IncorrectTagError:
self.fail('check() raised IncorrectTagError unexpectedly!')
class AutoGlomeTest1(unittest.TestCase, AutoGlomeTestBase):
"""Autoglome test using test vector #1 from the protocol documentation."""
def __init__(self, *args, **kwargs):
super(__class__, self).__init__(*args, **kwargs)
self.test_vector = AutoGlomeTestVector(test_vectors.TEST1,
min_peer_tag_len=32,
skippable_range=0)
class AutoTest2(unittest.TestCase, AutoGlomeTestBase):
"""Autoglome test using test vector #2 from the protocol documentation."""
def __init__(self, *args, **kwargs):
super(__class__, self).__init__(*args, **kwargs)
self.test_vector = AutoGlomeTestVector(test_vectors.TEST2,
min_peer_tag_len=8,
skippable_range=10)
if __name__ == '__main__':
unittest.main()
<file_sep># Generic Low Overhead Message Exchange (GLOME)
**GLOME Login** is a [challenge-response authentication
mechanism](https://en.wikipedia.org/wiki/Challenge%E2%80%93response_authentication).
It resembles [one-time authorization
codes](https://en.wikipedia.org/wiki/One-time_password) (aka OTPs) but is
different from [HOTP] and [TOTP] in the following ways:
- It is stateless (unlike [HOTP]).
- It does not depend on time (unlike [TOTP]).
- It does not require predefined secret sharing (unlike [HOTP] and [TOTP]).
These properties make it a good choice for low dependency environments (e.g.,
devices with no persistent storage a real-time clock). It can be also useful
for managing access to a large fleet of hosts where synchronising state or
sharing predefined secrets can be a challenge.
GLOME Login can be easily integrated with existing systems through
[PAM](https://en.wikipedia.org/wiki/Pluggable_authentication_module)
(`libglome`) or through the
[login(1)](https://manpages.debian.org/testing/login/login.1.en.html) wrapper
([glome-login](login)).
[GLOME Login protocol](docs/glome-login.md) is is built on top of the [Generic
Low Overhead Message Exchange (GLOME) protocol](docs/protocol.md).
[TOTP]: https://www.rfc-editor.org/rfc/rfc6238 [HOTP]:
https://www.rfc-editor.org/rfc/rfc4226
## How does it work?
Let's imagine the following scenario:
Alice is a system engineer who got paged to investigate an unresponsive machine
that happens to be located far away. She calls Bob, a datacenter technican with
physical access to the machine.
Alice is authorized to access the machine but has no connectivity. Bob faces
the opposite problem, he can access the machine's serial port but does not have
credentials to log in.
Alice is able to use GLOME Login to grant Bob one-time access to the machine.
First, Bob connects to the machine over serial port and types `root` on the
login prompt. He is then provided with a challenge that he forwards to Alice.
The challenge contains information about the identity of accessed host and the
requested action (i.e., root shell access). Alice verifies that the request is
legitimate (e.g., the accessed host is indeed the one she's trying to
diagnose), and uses the [`glome` CLI](cli) to generate an authorization code.
She forwards that authorization code to Bob who provides it as a challenge
response.
The authorization succeeds and Bob is able to run diagnostic commands and share
the results with Alice.
## Getting started
### Installation on the client host
These steps should be followed on the host you are planning to use to generate
authorization codes (e.g., a laptop).
1. Follow [build](docs/build) to build the `glome` CLI binary.
1. Generate a key pair using the `glome` command. Note that if the `glome`
command is not in your `$PATH`, you might need to provide a full path to the
binary.
```
$ glome genkey | tee glome-private.key | glome pubkey | tee glome-public.key | xxd -c 32 -p
4242424242424242424242424242424242424242424242424242424242424242
```
The output of that command is the approver public key that will be used to
configure the target host.
### Installation on the target host
1. Follow [instructions](login) to configure your host to use PAM module
(recommended) or `glome-login`.
1. Edit the configuration file (by default located at `/etc/glome/config`) and
replace the key value with the approver public key generated in the previous
section.
```
$ cat /etc/glome/config
key=4242424242424242424242424242424242424242424242424242424242424242
key-version=1
```
### Usage
Try to log in to the target host. You should see the prompt with the challenge:
```
GLOME: v1/AU7U7GiFDG-ITgOh8K_ND9u41S3S-joGp7MAdhIp_rQt/myhost/shell/root/
Password:
```
Use the `glome` CLI on the client host to obtain an authorization code:
```
$ glome --key glome-private.key login
v1/AU7U7GiFDG-ITgOh8K_ND9u41S3S-joGp7MAdhIp_rQt/myhost/shell/root/Tm90aGluZyB0byBzZWUgaGVyZSwgbW92ZSBhbG9uZy4K
```
Provide the generated authcode as a response to the challenge.
## Repository
This repository consists of a number of components of the GLOME ecosystem.
Documentation:
- [GLOME protocol](docs/protocol.md)
- [GLOME Login protocol](docs/glome-login.md)
Core libraries:
- [libglome](glome.h) *C*
- [PyGLOME](python) *Python*
- [GLOME-Go](go/glome) *Go*
Binaries:
- [glome](cli) *Command-line interface for GLOME*
- [glome-login](login) *Replacement of login(1) implementing GLOME Login
protocol*
## Building
Building the GLOME library requires
- Compiler conforming to C99 (e.g. gcc, clang)
- Meson >=0.49.2
- OpenSSL headers >=1.1.1
- glib-2.0 (for glome-login as well as tests)
- libpam (for PAM module)
Alternatively, on systems with [Nix](https://nixos.org/), you can simply run
`nix-shell` in the root directory of this repository.
### Instructions
GLOME is built using [Meson](https://mesonbuild.com/). First, initialize the
Meson build directory. You only have to do this once per Meson configuration.
```shell
$ meson build
```
NOTE: You can customize the installation target by passing the `--prefix` flag.
Build the shared library `libglome.so` and the command line utility `glome`
inside the build root `./build`.
```shell
$ ninja -C build
```
Now run the tests.
```shell
$ meson test -C build
```
Install both the binary and the library into the configured prefix (the default
prefix is `/usr/local/`, which will require admin privileges).
```shell
$ meson install -C build
```
## Disclaimer
**This is not an officially supported Google product.**
<file_sep># GLOME login protocol
## Introduction
GLOME login is first application of the [GLOME protocol](protocol.md). It is
used to authorize serial console access to Linux machines.
To achieve that, a client program called `glome-login` is executed by getty (or
a similar process) instead of the conventional `/sbin/login`. Instead of
prompting the user for the password, it generates an URL that points at the
authorization server and contains the GLOME handshake information and the action
requested by the operator. The operator follows that URL and upon successful
authentication and authorization, the server provides the operator with an
authorization code response that needs to be returned to `glome-login`.
If the authorization code matches the one calculated internally by
`glome-login`, the user is authorized and glome-login executes the requested
action - e.g. providing the login shell or rebooting the machine.
## Implementation
The current version of the GLOME login protocol uses the
[standard GLOME variant](protocol.md#variants).
Counters are set to constant `0` since only a single set of messages
is exchanged.
* GLOME handshake information and tags are encoded as Base64-encoded URLs or
"base64url"
[[RFC4648 section 5](https://tools.ietf.org/html/rfc4648#section-5)].
* Initial message from the GLOME login client to the server contains the
context required for authorization (i.e. host identity, requested action).
* The authorization context is sent in clear for easier debuggability and
reducing the likelihood of human errors (e.g. incomplete URL copy and
paste).
* Server's public key can be identified by:
* 7-bit service key identifier and message tag prefix (of any
length, including 0).
* 7-bit service key prefix and message tag prefix (of any length,
including 0),
* Using a message tag prefix provides an additional protection against channel
errors (e.g. caused by operator errors).
* The message sent from the GLOME login client to the server contains the context required for authorization (i.e. host identity, requested action).
* In this protocol the client and the server sign identical messages.
the client to the server, and therefore is omitted.
### Challenge request format
The GLOME login client generates the challenge in the form:
```
v<V>/<glome-handshake>[/<message>]/
glome-handshake := base64url(
<prefix-type>
<prefix7>
<eph-key>
[<prefixN>]
)
message := [<hostid-type>:]<hostid>[/<action>]
```
where <fields> have the following meanings:
| Field | Length | Description |
| :-------------- | ----------: | :----------------------------------------------- |
| V | 1 byte | Challenge format version. Currently always 1. |
| prefix-type | 1 bits | Determines the meaning of (prefix7; prefixN) fields: <br><ul><li>0: (service key indicator; message tag prefix)</li><li>1: reserved</li></ul>Service key indicator is either index, or if no index found will be matched<br>with the public key (to be administrator configurable) |
| prefix7 | 7 bits | Purpose determined by prefix-type. |
| eph-key | 32 bytes | Client's public key (ephemeral). |
| prefixN | 0..32 bytes | Purpose determined by prefix-type, right now message tag prefix. |
| hostid-type | 0..n bytes | Type of identity; `hostname` if not set |
| hostid | 1..n bytes | Identity of the target (e.g. hostname, serial number, etc.) |
| action | 0..n bytes | Action that is being authorized (e.g. reboot, shell).<br>Both parties should agree what the default action is if not set. |
The client should then output the resulting challenge prefixed by the
configured prompt. In practice, that configurable prefix can be used to present
the challenge as an URL which can be used to submit the challenge to a GLOME
serve.
The challenge must always end in a `/` to make it easy for the GLOME login
server to detect truncated requests and reject those early. Without the
trailing slash requirement the request will still likely look correct but may
result in an invalid request being signed causing confusion for the operator.
#### Action
The `<action>` field represents the action being authorized and should not
be ambiguous in a way that affects security. The format of the action is left
up to the implementer to decide but it has to take into account these points:
* The `<action>` needs to be suitable for embedding in a URL.
* The `<action>` should be human readable and easy to understand
both as part of the URL and stand alone.
Good examples:
* `shell/root` starts a shell as the given user, root in this case.
* `reboot` reboots the target.
* `show-logs/httpd` outputs debug logs for the HTTPD application.
Bad examples:
* `exec` executes a command.
* This is bad because it does not specify which command is being executed.
* `exec/cm0gLWZyIC8=` executes a given command (Base64 encoded).
* This is not human readable.
* `shell` starts a shell as an user-provided but undisclosed user.
* This is bad if there exists ambiguity on which user the shell will launch
as. E.g. if the system is hard-coded to only allow login as root, this
example is OK - otherwise not.
#### Challenge construction
Care must be taken to ensure that the challenge outputted by the GLOME login
client is suitable to be embedded in an URL.
A GLOME login client should make sure to format the challenge as per [[RFC 3986
Section 2.4](https://tools.ietf.org/html/rfc3986#section-2.4)]. The intent
should be to maximize the human readability of the URL.
**Example:** If the challenge prefix is set to `https://glome.example.com/` and
the challenge is `v1/ABCD…/serial:ab@!c/action/` the resulting challenge should
be presented as `https://glome.example.com/v1/ABCD…/serial:ab@!c/action/`.
The important lesson from this example is that `serial:ab@!c` is **not** encoded
using percent encoding as there is no reason to and would sacrifice human
readability needlessly.
Finally it is recommended to verify that commonly used terminal emulators
correctly identify the whole URL when outputted.
#### Message tag prefix
The message tag prefix is calculated by the client as the MAC tag over the
`<message>` field. The client can choose to include as much of the tag as it
prefers.
The server can verify the integrity of the message doing the same calculation
and performing a prefix comparison of the expected tag and the received
message tag prefix.
The message tag prefix does not offer any additional security properties unless
the server enforces its inclusion. However, the message tag prefix is still
useful to detect accidental message corruption. It can also be used to
resolve ambiguity in which service key was used by the client.
### Response format
The response is a Base64 URL-safe (base64url) MAC tag computed over the
`<message>` field as provided by the client. The GLOME login client can accept a
shortened tag (prefix) to reduce the message cost. Ephemeral keys are valid only
for one attempt, thus the brute forcing is severely limited, and can be further
slowed down by introducing an artificial delay before comparing the tags.
### Test vectors
These are some example test cases that can be used to verify an implementation
of the GLOME login protocol. Octet strings (keys and tags) are represented in
hexadecimal encoding, message counters in their decimal represenation and
messages and strings in ASCII encoding.
[Ka]: https://render.githubusercontent.com/render/math?math=K_a
[Ka']: https://render.githubusercontent.com/render/math?math=K_a^%27
[Kb]: https://render.githubusercontent.com/render/math?math=K_b
[Kb']: https://render.githubusercontent.com/render/math?math=K_b^%27
[Ks]: https://render.githubusercontent.com/render/math?math=K_s
[Mn]: https://render.githubusercontent.com/render/math?math=M_n
[T]: https://render.githubusercontent.com/render/math?math=T
For in-depth definition of the GLOME variables, see the [protocol](protocol.md)
specification. In summary note that
 is the
private key and
 is the
associated public key.
#### Vector 1
Login request using service key index 1, message tag prefix length of 16 bits,
and response tag length of 60 bits.
| Variable | Value |
|-----------------------:|:-------------------------------------------------------------------|
| ![K_a'][Ka'] | `<KEY>` |
| ![K_b'][Kb'] | `<KEY>` |
| `prefix-type` | `0` |
| `prefix7` | `1` |
| `eph-key` (![K_a][Ka]) | `8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a` |
| `hostid-type` | Omitted |
| `hostid` | `my-server.local` |
| `action` | `shell/root` |
| | |
| ![K_b][Kb] | `de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f` |
| ![K_s][Ks] | `4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742` |
| ![M_n][Mn] | `my-server.local/shell/root` |
| `prefixN` | `d0f59d0b17cb155a1b9cd2b5cdea3a17f37a200e95e3651af2c88e1c5fc8108e` |
| ![T][T] | `9721ee687b827249dbe6c244ba459216cf01d525012163025df358eb87c89059` |
| | |
| Challenge | `v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/` |
| Response token | `<PASSWORD>` |
#### Vector 2
Login request using service key prefix, no message tag prefix, and full response tag.
| Variable | Value |
|-----------------------:|:-------------------------------------------------------------------|
| ![K_a'][Ka'] | `fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead` |
| ![K_b'][Kb'] | `b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d` |
| `prefix-type` | `0` |
| `prefix7` | `0x51` |
| `eph-key` (![K_a][Ka]) | `<KEY>2aa2e511074ee195e1c39ef6a88001418be656e3c376` |
| `hostid-type` | `serial-number` |
| `hostid` | `1234567890=ABCDFGH/#?` |
| `action` | `reboot` |
| | |
| ![K_b][Kb] | `d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647` |
| ![K_s][Ks] | `<KEY>` |
| ![M_n][Mn] | `serial-number:1234567890=ABCDFGH/#?/reboot` |
| `prefixN` | `dff5aae753a8bdce06038a20adcdb26c7be19cb6bd05a7850fae542f4af29720` |
| ![T][T] | `a7c33f0542a3ef35c154cd8995084d605c6ce09f83cf1440a6cf3765a343aae6` |
| | |
| Challenge | `v1/UYcvQ1u4uJ0OOtYqouURB07hleHDnvaogAFBi-ZW48N2/serial-number:1234567890=ABCDFGH%2F%23%3F/reboot/` |
| Response token | `<KEY>ps83ZaNDquY=` |
## Alternatives
The GLOME protocol is based on the assumption that the cost of transmitting
messages in the server-to-client direction is higher than in the opposite
direction.
If that is not the case, then using an existing proven signature scheme (e.g,
[Ed25519](https://en.wikipedia.org/wiki/EdDSA#Ed25519)) is recommended.
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LOGIN_BASE64_H_
#define LOGIN_BASE64_H_
#include <inttypes.h>
#include <stdlib.h>
// Base64 needs 4 bytes for every 3 bytes of input (+ padding + NULL byte)
// NOTE: Caller is responsible for protecting against integer overflow.
#define ENCODED_BUFSIZE(n) ((((n) + 2) / 3) * 4 + 1)
#define DECODED_BUFSIZE(n) ((((n)*3) / 4))
size_t base64url_encode(const uint8_t* src, size_t src_len, uint8_t* dst,
size_t dst_len);
size_t base64url_decode(const uint8_t* src, size_t src_len, uint8_t* dst,
size_t dst_len);
#endif // LOGIN_BASE64_H_
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LOGIN_CONFIG_H_
#define LOGIN_CONFIG_H_
#include "crypto.h"
typedef struct glome_login_config {
// Bitfield of options as described above.
uint8_t options;
// Username to log in as.
const char* username;
// Configuration file to parse.
const char* config_path;
// Login binary for fallback authentication.
const char* login_path;
// Challenge prompt.
const char* prompt;
// Delay to wait before confirming if the authentication code is valid
// or not, to stop brute forcing; in seconds.
unsigned int auth_delay_sec;
// How long to wait for authentication code input in seconds.
unsigned int input_timeout_sec;
// Minimum required length of the encoded authentication code.
unsigned int min_authcode_len;
// Service key of the remote peer.
uint8_t service_key[PUBLIC_KEY_LENGTH];
// ID of the service key of the remote peer. (Optional)
uint8_t service_key_id;
// Local ephemeral secret key.
uint8_t secret_key[PRIVATE_KEY_LENGTH];
// Explicitly set host-id to use in the login request.
const char* host_id;
// Type of host-id to use in the login request.
const char* host_id_type;
} glome_login_config_t;
#define GLOME_LOGIN_PUBLIC_KEY_ID "glome-v1"
// glome_login_parse_public_key extracts the public key bytes from an encoded
// public key.
// Returns true on success.
bool glome_login_parse_public_key(const char* encoded_key, uint8_t* public_key,
size_t public_key_size);
// Error message returned by the config functions. If no error ocurred
// return value will be set to STATUS_OK.
typedef char* status_t;
// Allocate and format an error message.
status_t status_createf(const char* format, ...);
// Free an error message after it is not needed anymore.
void status_free(status_t status);
// If no error occurred the value of returned error message will be STATUS_OK.
#define STATUS_OK NULL
// glome_login_parse_config_file parses the configuration file and fills the
// given config struct with the data. The default config file is used in case
// no explicit config file has been provided, however in this case failed
// attempts to read the default config file will be ignored.
status_t glome_login_parse_config_file(glome_login_config_t* config);
status_t glome_login_assign_config_option(glome_login_config_t* config,
const char* section, const char* key,
const char* val);
#endif // LOGIN_CONFIG_H_
<file_sep>---
authors: <NAME> (@burgerdev)
state: committed
---
# Objective
Define a format for GLOME public keys at rest.
# Background
The GLOME protocol definition does not deal with key material handling, and the
reference implementation only implements a very rudimentary storage format -
32 raw octets. This causes a variety of problems, e.g. when transferring keys
between hosts or when specifying server keys for *GLOME Login*.
See also [google/glome#100](https://github.com/google/glome/issues/100).
# Requirements
* A GLOME public key at rest should be unambiguously identifiable as such.
* Public keys should be printable.
* Public keys should be easily exchanged over any medium, potentially analog.
# Design ideas
Public keys are stored in URL-safe base64 encoding and tagged with their
protocol variant version. The configuration file format accepts keys in a
format similar to [OpenSSH's `authorized_keys` format][1].
[1]: https://man.openbsd.org/sshd.8#AUTHORIZED_KEYS_FILE_FORMAT
## Public Key Format
The format of a _GLOME public key_ adheres to the ABNF below:
```abnf
public-key = key-type SP key-base64
key-type = "glome-v1"
key-base64 = 44urlsafe-base64-char
urlsafe-base64-char = "=" / "-" / "_" / ALPHA / DIGIT
```
The key type encodes the GLOME variant this key should be used with. As we
only have one variant right now, we're only defining one `key-type` here.
An example public key, like it would be printed by `glome pubkey`:
```
glome-v1 l<KEY>
```
## Public Key Interpretation
An implementation must verify that the `key-type` matches its expectations and
must not produce a tag if it does not.
If the `key-type` matches the expectations, the `key-base64` part is decoded as
base64, and the resulting 32 octets are interpreted as the _raw GLOME public
key_, suitable for use with `glome_tag`.
## Consequences for the GLOME Login Configuration Format
The configuration file accepts a new `public-key` field in the `service`
section. This field must contain a key as specified in this document. The `key`
field is deprecated and will be removed for release 1.0, but will be supported
until then. If both `public-key` and `key` are present in the config file,
`public-key` will take precedence.
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package server implements GLOME-login server framework.
package server
import (
"encoding/hex"
"fmt"
"net/http"
"sync"
"github.com/google/glome/go/login"
)
const (
// MaxResponseSize is the maximum size in characaters of the response token
MaxResponseSize = 44 // 32 bytes base64 encoded
)
// ErrInvalidResponseLen denotes that response length provided is invalid. ResponseLen
// should be in range {1,...,MaxResponseSize}
type ErrInvalidResponseLen struct {
ResponseLen uint8
}
func (e ErrInvalidResponseLen) Error() string {
return fmt.Sprintf("ResponseLen should be in range {1,...,%v}, got %v", MaxResponseSize, e.ResponseLen)
}
// Authorizer responds to an authorization request. The method
// GrantLogin returns whether an user is allowed to perform a given action on a host.
//
// Some considerations need to be held while implementing this interface:
// - Allow should consider that an empty string as command is a correct input.
// - If no user can be obtained from request metadata, an empty string is to be
// passed as default value.
// - Both hostIDType and hostID can be empty. Whether this refer to a default value
// or not is to be user configurable.
// - returned boolean will be considered even if an error is returned.
type Authorizer interface {
GrantLogin(user string, hostID string, hostIDType string, action string) (bool, error)
}
// AuthorizerFunc type is an adapter to allow the use of ordinary functions as an Authorizer.
type AuthorizerFunc func(user string, hostID string, hostIDType string, action string) (bool, error)
// GrantLogin calls a(user, hostID, hostIDType, action)
func (a AuthorizerFunc) GrantLogin(user string, hostID string, hostIDType string, action string) (bool, error) {
return a(user, hostID, hostIDType, action)
}
// LoginServer is a framework that can be used to implement servers for glome-login.
type LoginServer struct {
// Keys manages the keys used by the server.
Keys *KeyManager
auth Authorizer
authLock sync.RWMutex
loginParser *login.Server
responseLen uint8
userHeader string
}
// Authorizer replaces the server Authorizer with a new one provided, in a secure way for concurrency.
func (s *LoginServer) Authorizer(a Authorizer) {
s.authLock.Lock()
s.auth = a
s.authLock.Unlock()
}
// NewLoginServer creates a new server with provided Authorizer and, optionally, selected options.
func NewLoginServer(a Authorizer, options ...func(*LoginServer) error) (*LoginServer, error) {
srv := LoginServer{
auth: a,
Keys: NewKeyManager(),
responseLen: MaxResponseSize,
userHeader: "authenticated-user",
}
srv.loginParser = srv.newLoginParser()
for _, option := range options {
if err := option(&srv); err != nil {
return nil, err
}
}
return &srv, nil
}
// ResponseLen is an option to be provided to NewServer on creation. Its sets the size of response
// to provided length. the size is measured in number of characters in base64. Return
// ErrInvalidResponseLen if provided length is not in {1,..,MaxResponseSize}. If not set,
// defaults to MaxResponseSize.
func ResponseLen(length uint8) func(srv *LoginServer) error {
return func(srv *LoginServer) error {
if !(0 < length && length <= MaxResponseSize) {
return ErrInvalidResponseLen{ResponseLen: length}
}
srv.responseLen = length
return nil
}
}
// UserHeader is an option to be provided to NewServer on creation. It sets the name of the
// HTTP header from which to read the user id. It defaults to "authenticated-user".
func UserHeader(s string) func(srv *LoginServer) error {
return func(srv *LoginServer) error {
srv.userHeader = s
return nil
}
}
func (s *LoginServer) newLoginParser() *login.Server {
return &login.Server{KeyFetcher: s.Keys.keyFetcher()}
}
// ServeHTTP implements http.Handler interface:
// - On "/": List server service keys.
// - On a glome login URL: Return a login token or an error message.
func (s *LoginServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
s.printServerKeys(w)
return
}
user := r.Header.Get(s.userHeader)
path := r.URL.RawPath
if path == "" {
path = r.URL.Path
}
response, err := s.loginParser.ParseURLResponse(path)
if err != nil {
http.Error(w, err.Error(), 400)
return
}
s.printToken(w, response, user)
}
// Auxiliary function to print login token on response writer.
func (s *LoginServer) printToken(w http.ResponseWriter, r *login.URLResponse, user string) {
s.authLock.RLock()
allowed, err := s.auth.GrantLogin(user, r.Msg.HostID, r.Msg.HostIDType,
r.Msg.Action)
s.authLock.RUnlock()
if !allowed {
if err != nil {
http.Error(w, err.Error(), 403)
} else {
http.Error(w, "unauthorized action", 403)
}
return
}
responseToken := r.EncToken()[:s.responseLen]
fmt.Fprintln(w, responseToken)
}
// Auxiliary function that prints service keys.
func (s *LoginServer) printServerKeys(w http.ResponseWriter) {
fmt.Fprintf(w, "List of server keys\n")
fmt.Fprintf(w, "-------------------\n")
fmt.Fprintf(w, "Index\tValue\n")
for _, key := range s.Keys.ServiceKeys() {
fmt.Fprintf(w, "%v\t%v\n", key.Index, hex.EncodeToString(key.Value[:]))
}
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "config.h"
#include <alloca.h>
#include <ctype.h>
#include <errno.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include "base64.h"
#include "ui.h"
static bool is_empty(const char *line) {
for (; isspace(*line); line++) {
}
return *line == '\0';
}
static bool is_comment(const char *line) {
return line[0] == '#' || line[0] == ';';
}
static bool is_section(const char *line) { return line[0] == '['; }
static bool is_name(const char *name) {
const char *p;
for (p = name; isalnum(*p) || *p == '_' || *p == '-'; p++) {
}
return *p == '\0' && p - name > 0;
}
static char *section_name(char *line) {
char *p;
for (p = line + 1; *p != ']' && *p != '\0'; p++) {
}
if (*p != ']') {
return NULL;
}
char *end = p;
if (!is_empty(p + 1)) {
return NULL;
}
*end = '\0';
if (is_name(line + 1)) {
return line + 1;
}
return NULL;
}
static void key_value(char *line, char **key, char **val) {
*key = NULL;
*val = NULL;
char *p;
for (p = line; !isspace(*p) && *p != '=' && *p != '\0'; p++) {
}
if (*p == '\0') {
return;
}
char *end = p;
for (; isspace(*p); p++) {
}
if (*p != '=') {
return;
}
for (p++; isspace(*p); p++) {
}
if (*p == '\0') {
return;
}
*end = '\0';
if (!is_name(line)) {
return;
}
// Trim whitespace at the end of the value.
int k = strlen(p) - 1;
for (; k >= 0 && isspace(p[k]); k--) {
}
p[k + 1] = '\0';
*key = line;
*val = p;
}
bool glome_login_parse_public_key(const char *encoded_key, uint8_t *public_key,
size_t public_key_size) {
if (public_key_size < GLOME_MAX_PUBLIC_KEY_LENGTH) {
errorf("ERROR: provided buffer has size %zu, need at least %d\n",
public_key_size, GLOME_MAX_PUBLIC_KEY_LENGTH);
return false;
}
size_t prefix_length = strlen(GLOME_LOGIN_PUBLIC_KEY_ID);
if (strncmp(encoded_key, GLOME_LOGIN_PUBLIC_KEY_ID, prefix_length)) {
errorf("ERROR: unsupported public key encoding: %s\n", encoded_key);
return false;
}
// Advance to the start of the base64-encoded key.
encoded_key += prefix_length;
while (*encoded_key != '\0' && isblank(*encoded_key)) {
encoded_key++;
}
// Truncate the encoded string to allow for appended comments.
size_t encoded_length = 0;
while (isgraph(encoded_key[encoded_length])) {
encoded_length++;
}
// Unfortunately we need an extra byte because 32B don't pack cleanly in
// base64.
uint8_t buf[GLOME_MAX_PUBLIC_KEY_LENGTH + 1] = {0};
size_t b = base64url_decode((uint8_t *)encoded_key, encoded_length, buf,
sizeof(buf));
if (b != GLOME_MAX_PUBLIC_KEY_LENGTH) {
errorf("ERROR: public key decoded to %zu bytes, expected %d\n", b,
GLOME_MAX_PUBLIC_KEY_LENGTH);
return false;
}
memcpy(public_key, buf, GLOME_MAX_PUBLIC_KEY_LENGTH);
return true;
}
static status_t assign_string_option(const char **option, const char *val) {
const char *copy = strdup(val);
if (copy == NULL) {
return status_createf("ERROR: failed to allocate memory for value: %s",
val);
}
*option = copy;
return STATUS_OK;
}
static status_t assign_positive_int_option(unsigned int *option,
const char *val) {
char *end;
errno = 0;
unsigned long n = strtoul(val, &end, 0); // NOLINT(runtime/int)
if (errno || val == end || *end != '\0' || n > UINT_MAX) {
return status_createf("ERROR: invalid value for option: %s", val);
}
*option = (unsigned int)n;
return STATUS_OK;
}
static status_t set_bitfield_option(glome_login_config_t *config, uint8_t bit) {
config->options |= bit;
return STATUS_OK;
}
static status_t clear_bitfield_option(glome_login_config_t *config,
uint8_t bit) {
config->options &= ~bit;
return STATUS_OK;
}
static bool boolean_true(const char *val) {
if (strcasecmp(val, "true") == 0) {
return true;
} else if (strcasecmp(val, "yes") == 0) {
return true;
} else if (strcasecmp(val, "on") == 0) {
return true;
} else if (strcmp(val, "1") == 0) {
return true;
}
return false;
}
static bool boolean_false(const char *val) {
if (strcasecmp(val, "false") == 0) {
return true;
} else if (strcasecmp(val, "no") == 0) {
return true;
} else if (strcasecmp(val, "off") == 0) {
return true;
} else if (strcmp(val, "0") == 0) {
return true;
}
return false;
}
static status_t update_bitfield_option(glome_login_config_t *config,
uint8_t bit, bool invert,
const char *val) {
if (boolean_true(val)) {
if (invert) {
return clear_bitfield_option(config, bit);
} else {
return set_bitfield_option(config, bit);
}
} else if (boolean_false(val)) {
if (invert) {
return set_bitfield_option(config, bit);
} else {
return clear_bitfield_option(config, bit);
}
} else {
return status_createf("ERROR: unrecognized boolean value: %s", val);
}
}
static status_t assign_key_option(uint8_t *dest, size_t dest_len,
const char *val) {
if (is_zeroed(dest, dest_len)) {
if (decode_hex(dest, dest_len, val)) {
return status_createf("ERROR: failed to hex decode service key: %s", val);
}
}
return STATUS_OK;
}
static status_t assign_key_version_option(glome_login_config_t *config,
const char *val) {
char *end;
errno = 0;
unsigned long n = strtoul(val, &end, 0); // NOLINT(runtime/int)
if (errno || val == end || *end != '\0' || n > 127) {
return status_createf("ERROR: '%s' is not a valid key version (0..127)",
val);
}
config->service_key_id = (unsigned int)n;
return STATUS_OK;
}
static status_t assign_default_option(glome_login_config_t *config,
const char *key, const char *val) {
if (strcmp(key, "auth-delay") == 0) {
return assign_positive_int_option(&config->auth_delay_sec, val);
} else if (strcmp(key, "input-timeout") == 0) {
return assign_positive_int_option(&config->input_timeout_sec, val);
} else if (strcmp(key, "config-path") == 0) {
return assign_string_option(&config->config_path, val);
} else if (strcmp(key, "ephemeral-key") == 0) {
return assign_key_option(config->secret_key, sizeof config->secret_key,
val);
} else if (strcmp(key, "min-authcode-len") == 0) {
return assign_positive_int_option(&config->min_authcode_len, val);
} else if (strcmp(key, "host-id") == 0) {
return assign_string_option(&config->host_id, val);
} else if (strcmp(key, "host-id-type") == 0) {
return assign_string_option(&config->host_id_type, val);
} else if (strcmp(key, "login-path") == 0) {
return assign_string_option(&config->login_path, val);
} else if (strcmp(key, "disable-syslog") == 0) {
return update_bitfield_option(config, SYSLOG, true, val);
} else if (strcmp(key, "print-secrets") == 0) {
return update_bitfield_option(config, INSECURE, false, val);
} else if (strcmp(key, "timeout") == 0) {
return assign_positive_int_option(&config->input_timeout_sec, val);
} else if (strcmp(key, "verbose") == 0) {
return update_bitfield_option(config, VERBOSE, false, val);
}
return status_createf("ERROR: unrecognized default option: %s", key);
}
static status_t assign_service_option(glome_login_config_t *config,
const char *key, const char *val) {
if (strcmp(key, "key") == 0) {
return assign_key_option(config->service_key, sizeof config->service_key,
val);
} else if (strcmp(key, "key-version") == 0) {
return assign_key_version_option(config, val);
} else if (strcmp(key, "url-prefix") == 0) {
// `url-prefix` support is provided only for backwards-compatiblity
// TODO: to be removed in the 1.0 release
size_t len = strlen(val);
char *url_prefix = malloc(len + 2);
if (url_prefix == NULL) {
return status_createf("ERROR: failed to allocate memory for url_prefix");
}
strncpy(url_prefix, val, len + 1);
url_prefix[len] = '/';
url_prefix[len + 1] = '\0';
config->prompt = url_prefix;
return STATUS_OK;
} else if (strcmp(key, "prompt") == 0) {
return assign_string_option(&config->prompt, val);
} else if (strcmp(key, "public-key") == 0) {
if (!glome_login_parse_public_key(val, config->service_key,
sizeof(config->service_key))) {
return status_createf("ERROR: failed to decode public-key");
}
return STATUS_OK;
}
return status_createf("ERROR: unrecognized service option: %s", key);
}
status_t glome_login_assign_config_option(glome_login_config_t *config,
const char *section, const char *key,
const char *val) {
if (section == NULL) {
return status_createf("ERROR: section name not set");
}
if (strcmp(section, "service") == 0) {
return assign_service_option(config, key, val);
} else if (strcmp(section, "default") == 0) {
return assign_default_option(config, key, val);
}
return status_createf("ERROR: section name not recognized: %s", section);
}
status_t glome_login_parse_config_file(glome_login_config_t *config) {
bool required = config->config_path != NULL;
if (!required) {
config->config_path = DEFAULT_CONFIG_FILE;
}
FILE *f = fopen(config->config_path, "r");
if (f == NULL) {
if (!required) {
return 0;
}
return status_createf("ERROR: config file could not be opened: %s\n",
strerror(errno));
}
char *line = NULL;
char *section = NULL;
char *key, *val;
size_t len = 0;
size_t lines = 0;
status_t status = STATUS_OK;
while (getline(&line, &len, f) != -1) {
lines++;
if (is_empty(line) || is_comment(line)) {
continue;
} else if (is_section(line)) {
char *s = section_name(line);
if (s == NULL) {
status = status_createf(
"ERROR: config file parsing failed in line %ld (bad section "
"name)\n",
lines);
break;
}
free(section);
section = strdup(s);
} else {
key_value(line, &key, &val);
if (key == NULL || val == NULL) {
status = status_createf(
"ERROR: config file parsing failed in line %ld (bad key/value)\n",
lines);
break;
}
status = glome_login_assign_config_option(
config, section ? section : "default", key, val);
if (status != STATUS_OK) {
break;
}
}
}
free(line);
free(section);
fclose(f);
return status;
}
<file_sep># glome-login
This binary implements the client side of the
[GLOME Login](../docs/glome-login.md) protocol. It is written to be a
replacement of login(1).
## Usage
1. Create a configuration file, see [example.cfg](example.cfg).
1. Try it out by running `glome-login -c glome.cfg -- root`
## Configuration
In order to reduce external dependencies, a custom parser is used
to read the configuration file. The parser supports a simplified
version of the INI syntax with the following limitations:
* Quoting and escaping is not supported.
* Comments are allowed only at the start of the line and can
begin with either `#` or `;`.
## Installation
The installation is dependent on what system you are running.
### systemd
Create a override file for the getty instance e.g. in
`/etc/systemd/system/serial-getty@.service.d/glome.conf`.
```
[Service]
ExecStart=
ExecStart=-/sbin/agetty -l /usr/local/sbin/glome-login \
-o '-- \\u' --keep-baud 115200,38400,9600 %I $TERM
```
Alternatively or for a normal VTY, use
`/etc/systemd/system/getty@.service.d/glome.conf`.
```
[Service]
ExecStart=
ExecStart=-/sbin/agetty -l /usr/local/sbin/glome-login \
-o '-- \\u' --noclear %I $TERM
```
## Troubleshooting
glome-login uses error tags to communicate errors.
### no-service-key
This error means that `glome-login` could not figure out what service key to
use. This most likely means that you have not specified a service key in the
configuration file (by default `/etc/glome/config`).
# PAM module
`pam_glome.so` library implements the PAM authentication module for the
[GLOME Login](../docs/glome-login.md) protocol.
## Installation
1. Install the library into the system dependent location for PAM modules
(for example `/lib/security/pam_glome.so`).
1. Enable and configure PAM module for a specific service (for example
`/etc/pam.d/login`):
```
auth requisite pam_glome.so
```
## Usage
PAM module supports the following options:
* `config_path=PATH` - location of the configuration file to parse (defaults to
`/etc/glome/config`)
* `key=KEY` - use hex-encoded `KEY` as the service key (defaults to key
from configuration file)
* `key_version=N` - use `N` for the service key version (defaults to key
version from configuration file)
* `prompt=PROMPT` - challenge prompt (defaults to prompt from configuration
file)
* `debug` - enable verbose logging
* `print_secrets` - enable logging of secrets (INSECURE!)
* `host_id=NAME` - use `NAME` as the host-id
* `ephemeral_key=KEY` - use hex-encoded `KEY` instead of the ephemeral
secret key (INSECURE!)
## Troubleshooting
PAM module uses error tags to communicate errors in the syslog messages.
# Docker
Dockerfile included in the repository creates a Docker image that can be used
to test `glome-login` and the PAM module.
## Instalation
Docker image for GLOME needs to be built first using the following command:
```
$ docker build -t glome -f kokoro/docker/Dockerfile .
```
## Usage
Container is than started in the background with two TCP ports published to the
host:
```
$ container=$(docker run -d -p 2022:22 -p 2023:23 glome)
```
Once the container is running it is possible to login using `netcat` or
`socat`, for example:
```
$ socat tcp-connect:localhost:2023 file:`tty`,raw,echo=0
```
Regular SSH client can be used for testing the PAM module:
```
$ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 2022 root@localhost
```
Authorization code required for GLOME Login can be obtained by running:
```
$ docker exec $container /usr/local/bin/glome login --key /usr/local/etc/glome/private.key https://glome.example.com/v1/...
```
<file_sep>#!/bin/sh
GLOME=/usr/local/bin/glome
GLOME_LOGIN=/usr/local/sbin/glome-login
CONFIG=/usr/local/etc/glome/config
PRIVATE=/usr/local/etc/glome/private.key
umask 077
PUBLIC_KEY=$($GLOME genkey | tee $PRIVATE | $GLOME pubkey)
sed -i "s/^#public-key = .*/public-key = $PUBLIC_KEY/" $CONFIG
sed -i "1 i\auth sufficient /usr/local/lib/x86_64-linux-gnu/security/pam_glome.so" /etc/pam.d/sshd
cat <<EOF > /etc/ssh/sshd_config.d/glome.conf
ChallengeResponseAuthentication yes
PermitRootLogin yes
EOF
mkdir /run/sshd
/usr/sbin/sshd
socat tcp-l:23,reuseaddr,fork exec:"/sbin/agetty -l $GLOME_LOGIN -",pty,setsid,setpgid,stderr,ctty
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "login.h"
#include <glib.h>
#include <glome.h>
#include <stdio.h>
#include <string.h>
#include "base64.h"
#include "crypto.h"
static void test_shell_action() {
const char* error_tag = NULL;
char* action = NULL;
size_t action_len = 0;
shell_action("operator", &action, &action_len, &error_tag);
g_assert_cmpstr("shell/operator", ==, action);
g_assert_true(strlen(action) + 1 == action_len);
g_assert_null(error_tag);
}
static void test_vector_1() {
const char* host_id = "my-server.local";
const char* action = "shell/root";
uint8_t service_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
decode_hex(
private_key, sizeof private_key,
"<KEY>");
decode_hex(
service_key, sizeof service_key,
"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f");
g_assert_true(derive_or_generate_key(private_key, public_key) == 0);
{
uint8_t authcode[GLOME_MAX_TAG_LENGTH];
g_assert_true(
get_authcode(host_id, action, service_key, private_key, authcode) == 0);
char authcode_encoded[ENCODED_BUFSIZE(sizeof authcode)] = {0};
g_assert_true(base64url_encode(authcode, sizeof authcode,
(uint8_t*)authcode_encoded,
sizeof authcode_encoded));
g_assert_cmpmem("lyHuaHuCck", 10, authcode_encoded, 10);
}
{
const char* error_tag = NULL;
char* challenge = NULL;
int challenge_len = 0;
int service_key_id = 1;
uint8_t prefix_tag[GLOME_MAX_TAG_LENGTH];
g_assert_true(get_msg_tag(host_id, action, service_key, private_key,
prefix_tag) == 0);
if (request_challenge(service_key, service_key_id, public_key, host_id,
action, prefix_tag, /*prefix_tag_len=*/2, &challenge,
&challenge_len, &error_tag)) {
g_test_message("construct_request_challenge failed: %s", error_tag);
g_test_fail();
}
g_assert_cmpstr(
"v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/"
"my-server.local/shell/root/",
==, challenge);
g_assert_null(error_tag);
}
}
static void test_vector_2() {
const char* host_id = "serial-number:1234567890=ABCDFGH/#?";
const char* action = "reboot";
uint8_t service_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH] = {0};
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH] = {0};
decode_hex(
private_key, sizeof private_key,
"fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead");
decode_hex(
service_key, sizeof service_key,
"d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647");
g_assert_true(derive_or_generate_key(private_key, public_key) == 0);
{
uint8_t authcode[GLOME_MAX_TAG_LENGTH];
g_assert_true(
get_authcode(host_id, action, service_key, private_key, authcode) == 0);
char authcode_encoded[ENCODED_BUFSIZE(sizeof authcode)] = {0};
g_assert_true(base64url_encode(authcode, sizeof authcode,
(uint8_t*)authcode_encoded,
sizeof authcode_encoded));
g_assert_cmpstr("p8M_BUKj7zXBVM2JlQhNYFxs4J-DzxRAps83ZaNDquY=", ==,
authcode_encoded);
}
{
const char* error_tag = NULL;
char* challenge = NULL;
int challenge_len = 0;
int service_key_id = 0;
if (request_challenge(service_key, service_key_id, public_key, host_id,
action,
/*prefix_tag=*/NULL, /*prefix_tag_len=*/0, &challenge,
&challenge_len, &error_tag)) {
g_test_message("construct_request_challenge failed: %s", error_tag);
g_test_fail();
}
g_assert_cmpstr(
"v1/UYcvQ1u4uJ0OOtYqouURB07hleHDnvaogAFBi-ZW48N2/"
"serial-number:1234567890=ABCDFGH%2F%23%3F/reboot/",
==, challenge);
g_assert_null(error_tag);
}
}
int main(int argc, char** argv) {
g_test_init(&argc, &argv, NULL);
g_test_add_func("/test-shell-action", test_shell_action);
g_test_add_func("/test-vector-1", test_vector_1);
g_test_add_func("/test-vector-2", test_vector_2);
return g_test_run();
}
<file_sep>#!/bin/bash
set -e
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y --no-install-recommends \
build-essential meson pkg-config \
libssl-dev libglib2.0-dev libpam0g-dev libpam-wrapper libpamtest0-dev
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "base64.h"
#include <openssl/evp.h>
#include <string.h>
#define CHAR_62_CLASSIC '+'
#define CHAR_62_URLSAFE '-'
#define CHAR_63_CLASSIC '/'
#define CHAR_63_URLSAFE '_'
size_t base64url_encode(const uint8_t* src, size_t src_len, uint8_t* dst,
size_t dst_len) {
size_t len = ENCODED_BUFSIZE(src_len);
// The ENCODED_BUFSIZE macro has not been tested for operation close
// to the overflow point, but up to SIZE_MAX/2 it behaves fine.
if (src_len >= SIZE_MAX / 2) {
return 0;
}
if (len > dst_len) {
return 0;
}
len = EVP_EncodeBlock(dst, src, src_len);
// Replacing 62nd and 63rd character with '-' and '_' per RFC4648 section 5
for (size_t i = 0; i < len; i++) {
switch (dst[i]) {
case CHAR_62_CLASSIC:
dst[i] = CHAR_62_URLSAFE;
break;
case CHAR_63_CLASSIC:
dst[i] = CHAR_63_URLSAFE;
break;
}
}
return len;
}
size_t base64url_decode(const uint8_t* urlsafe_src, size_t src_len,
uint8_t* dst, size_t dst_len) {
if (dst_len < DECODED_BUFSIZE(src_len)) {
return 0;
}
// Restore 62nd and 63rd character from '-' and '_' per RFC4648 section 5
uint8_t* src = (uint8_t*)malloc(src_len);
if (src == NULL) {
return 0;
}
memcpy(src, urlsafe_src, src_len);
for (size_t i = 0; i < src_len; i++) {
switch (src[i]) {
case CHAR_62_URLSAFE:
src[i] = CHAR_62_CLASSIC;
break;
case CHAR_63_URLSAFE:
src[i] = CHAR_63_CLASSIC;
break;
}
}
EVP_ENCODE_CTX* ctx = EVP_ENCODE_CTX_new();
if (ctx == NULL) {
free(src);
return 0;
}
EVP_DecodeInit(ctx);
int ret, len, total = 0;
ret = EVP_DecodeUpdate(ctx, dst, &len, src, src_len);
if (ret < 0) {
goto out;
}
total = len;
ret = EVP_DecodeFinal(ctx, dst, &len);
if (ret < 0) {
total = 0;
goto out;
}
total += len;
out:
free(src);
EVP_ENCODE_CTX_free(ctx);
return total;
}
<file_sep>#!/bin/sh
set -e
apk add --no-cache \
alpine-sdk meson \
openssl-dev glib-dev linux-pam-dev
<file_sep>#!/bin/sh
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
binary="$(dirname "$1")/$(basename "$1")"
if ! test -x "$binary"; then
echo "ERROR: $binary is not an executable" >&2
exit 1
fi
t=$(mktemp -d)
cleanup() {
rm -rf -- "${t?}"
}
trap cleanup EXIT
# Populate directory with keys according to specification.
mkdir -p "$t/vector-1"
printf '\167\007\155\012\163\030\245\175\074\026\301\162\121\262\146\105\337'\
'\114\057\207\353\300\231\052\261\167\373\245\035\271\054\052' >"$t/vector-1/a"
printf '\135\253\010\176\142\112\212\113\171\341\177\213\203\200\016\346\157'\
'\073\261\051\046\030\266\375\034\057\213\047\377\210\340\353' >"$t/vector-1/b"
printf "The quick brown fox" >"$t/vector-1/msg"
printf "0" >"$t/vector-1/n"
printf "nEQ4n0YtNdBnL69zpeEY-Ln1w0C76NNA4rlHwgXqT6M=" >"$t/vector-1/tag"
mkdir -p "$t/vector-2"
printf '\261\005\360\015\261\005\360\015\261\005\360\015\261\005\360\015\261'\
'\005\360\015\261\005\360\015\261\005\360\015\261\005\360\015' >"$t/vector-2/a"
printf '\376\341\336\255\376\341\336\255\376\341\336\255\376\341\336\255\376'\
'\341\336\255\376\341\336\255\376\341\336\255\376\341\336\255' >"$t/vector-2/b"
printf "The quick brown fox" >"$t/vector-2/msg"
printf "100" >"$t/vector-2/n"
printf "BkdvHzFLBsf5bl3GKyMIJoy9thQK7-61WUBzGGMDInc=" >"$t/vector-2/tag"
errors=0
for n in 1 2; do
testdir="$t/vector-$n"
counter=$(cat "$testdir/n")
expected_tag="$(cat "$testdir/tag")"
for x in a b; do
"$binary" pubkey <"$testdir/$x" >"$testdir/$x.pub"
done
tag=$("$binary" tag --key "$testdir/a" --peer "$testdir/b.pub" --counter "$counter" <"$testdir/msg")
if [ "$tag" != "${expected_tag}" ]; then
echo "Generated wrong tag for test vector $n" >&2
echo "${expected_tag} <- expected" >&2
echo "$tag <- actual" >&2
errors=$((errors + 1))
fi
if ! "$binary" verify -k "$testdir/b" -p "$testdir/a.pub" -c "$counter" -t "$tag" <"$testdir/msg"; then
echo "Failed to verify test vector $n" >&2
errors=$((errors + 1))
fi
done
# Test login subcommand according to specification.
key="$t/vector-1/b"
path="v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/"
expected_tag="lyHuaHuCcknb5sJEukWSFs8B1SUBIWMCXfNY64fIkFk="
tag=$("$binary" login --key "$key" "$path")
if [ "$tag" != "$expected_tag" ]; then
echo "Generated wrong tag for test path $path" >&2
echo "$expected_tag <- expected" >&2
echo "$tag <- actual" >&2
errors=$((errors + 1))
fi
key="$t/vector-2/a"
path="v1/UYcvQ1u4uJ0OOtYqouURB07hleHDnvaogAFBi-ZW48N2/serial-number:1234567890=ABCDFGH%2F%23%3F/reboot/"
expected_tag="p8M_BUKj7zXBVM2JlQhNYFxs4J-DzxRAps83ZaNDquY="
tag=$("$binary" login --key "$key" "$path")
if [ "$tag" != "$expected_tag" ]; then
echo "Generated wrong tag for test path $path" >&2
echo "$expected_tag <- expected" >&2
echo "$tag <- actual" >&2
errors=$((errors + 1))
fi
exit "$errors"
<file_sep>FROM docker.io/library/debian:bullseye AS build
WORKDIR /app
COPY . .
RUN kokoro/rodete/fetch_dependencies.sh
RUN rm -rf build \
&& meson build \
&& meson compile -C build \
&& meson test --print-errorlogs -C build \
&& meson install -C build
FROM docker.io/library/debian:bullseye
COPY --from=build /usr/local /usr/local
COPY kokoro/docker/glome-start /usr/local/sbin
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
openssh-server \
socat \
&& rm -rf /var/lib/apt/lists/*
CMD ["/usr/local/sbin/glome-start"]
EXPOSE 22 23
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package login
import (
"encoding/base64"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/google/glome/go/glome"
)
const (
// Minimal acceptable length of a handshake. 1 byte for the Prefix, 32 bytes for the key.
minHandshakeLen = 1 + glome.PublicKeySize
)
var (
validURLPrefix = regexp.MustCompile(`(?P<v>v[1-9][0-9]*)/(?P<handshake>[\w=-]+)(?:/(?P<message>.+))?/`)
)
var (
// ErrInvalidHandshakeLen denotes that the handshake is too short.
ErrInvalidHandshakeLen = fmt.Errorf("handshake length is too small: should be at least %d", minHandshakeLen)
// ErrInvalidPrefixType denotes that the prefix-type is invalid.
ErrInvalidPrefixType = fmt.Errorf("invalid prefix type: should be a 0")
// ErrIncorrectTag denotes that received tag is incorrect.
ErrIncorrectTag = fmt.Errorf("invalid tag")
// ErrResponseNotInitialized denotes that the response is not initialized.
ErrResponseNotInitialized = fmt.Errorf("response is not initialized")
)
// ErrInvalidURLFormat denotes that the URL has a wrong format.
type ErrInvalidURLFormat struct {
URL string
}
// ErrServerKeyNotFound denotes that there is no private server key associated with a Prefix.
type ErrServerKeyNotFound struct {
Prefix byte
}
// ErrVersionNotSupported denotes that the V of glome-login URL format is not supported.
type ErrVersionNotSupported struct {
V int
}
func (err *ErrInvalidURLFormat) Error() string {
return fmt.Sprintf("URL %v doesn't satisfy the format %s.", err.URL, validURLPrefix.String())
}
func (err *ErrServerKeyNotFound) Error() string {
return fmt.Sprintf("Server key not found for prefix %d.", err.Prefix)
}
func (err *ErrVersionNotSupported) Error() string {
return fmt.Sprintf("Version not supported: %d.", err.V)
}
// Message represents the context required for authorization.
type Message struct {
HostIDType string // type of identity
HostID string // identity of the target (e.g. hostname, serial number, etc.)
Action string // action that is being authorized
}
// Construct returns a message from a Message according to the format: [<hostid-type>:]<hostid>[/<action>].
// URL escaping is optional.
func (m *Message) Construct(esc bool) []byte {
hostIDType := m.HostIDType
hostID := m.HostID
if esc {
hostIDType = escape(hostIDType)
hostID = escape(hostID)
}
action := ""
if hostIDType != "" {
hostIDType += ":"
}
if m.Action != "" {
action = "/" + m.Action
}
return []byte(hostIDType + hostID + action)
}
// Escapes the string so it can be safely placed inside a URL path segment,
// replacing "/#?" special characters and not replacing "!*'();:@&=+$,[]" special characters.
func escape(s string) string {
res := url.PathEscape(s)
for _, c := range "!*'();:@&=+$,[]" {
st := string(c)
strings.Replace(res, url.PathEscape(st), st, -1)
}
return res
}
// Handshake struct represents the context required for constructing the handshake.
type Handshake struct {
Prefix byte // either service key id or its last 7 bits of the first byte
UserKey glome.PublicKey // user's public ephemeral key
MessageTagPrefix []byte // Prefix of a tag calculated under Message
}
// URLResponse represents the context required for the construction of the URL.
type URLResponse struct {
V byte // URL format V (currently always 1)
HandshakeInfo Handshake // handshake info including Prefix, user's public key and message tag Prefix
Msg Message // message info including host and action
d *glome.Dialog // glome.Dialog for the tag managing
}
// NewResponse returns a new URLResponse corresponding to the given arguments.
func NewResponse(serviceKeyID uint8, serviceKey glome.PublicKey, userKey glome.PrivateKey,
V byte, hostIDType string, hostID string, action string, tagLen uint) (*URLResponse, error) {
var prefix byte
var r URLResponse
r.V = V
d, err := userKey.TruncatedExchange(&serviceKey, 1)
if err != nil {
return nil, err
}
r.d = d
r.Msg = Message{hostIDType, hostID, action}
if serviceKeyID == 0 {
// If no key ID was specified, send the first key byte as the ID.
// TODO(#60): Fix this up once there is clarify on key Prefix usage.
prefix = serviceKey[0] & 0x7f
} else {
prefix = serviceKeyID & 0x7f
}
userPublic, err := userKey.Public()
if err != nil {
return nil, err
}
r.HandshakeInfo = Handshake{prefix, *userPublic, r.Tag(tagLen)}
return &r, nil
}
// ValidateAuthCode checks if the received tag corresponding to the tag calculated under message constructed from the Message.
func (r *URLResponse) ValidateAuthCode(tag []byte) bool {
return r.d.Check(tag, r.Msg.Construct(false), 0)
}
// Tag returns the tag corresponding to the Msg. The returned tag is calculated with usage of sendingKey.
func (r *URLResponse) Tag(len uint) []byte {
return r.d.Tag(r.Msg.Construct(false), 0)[:len]
}
// EncToken returns a base64-encoded response token.
func (r *URLResponse) EncToken() string {
return base64.URLEncoding.EncodeToString(r.Tag(glome.MaxTagSize)) // TODO: passing the tag len as param?
}
// Client implements the client-side of the glome-login protocol. Should be constructed under NewClient constructor.
type Client struct {
ServerKey glome.PublicKey // server's public key
UserKey glome.PrivateKey // user's private key
ServerKeyID uint8 // server's key id
TagLen uint // length of a tag to be sent to the server. Should be in [0..glome.MaxTagLength] range.
response *URLResponse // URL challenge
}
// NewClient is a Client constructor. Sets Client.ServerKey, Client.UserKey, Client.ServerKeyID, Client.TagLen
// to the corresponding values and Client.response to nil.
func NewClient(sk glome.PublicKey, uk glome.PrivateKey, sID uint8, tagLen uint) *Client {
return &Client{sk, uk, sID, tagLen, nil}
}
// Construct returns a request to the server according to the format: /v<V>/<glome-handshake>[/<message>]/.
func (c *Client) Construct(V byte, hostIDType string, hostID string, action string) (string, error) {
r, err := NewResponse(c.ServerKeyID, c.ServerKey, c.UserKey, V, hostIDType, hostID, action, c.TagLen)
if err != nil {
return "", err
}
c.response = r
var handshake = c.constructHandshake()
var msg = c.response.Msg.Construct(true)
var u = fmt.Sprintf("v%d/%s/", c.response.V, handshake)
if len(msg) > 0 {
u += fmt.Sprintf("%s/", msg)
}
return u, nil
}
// constructHandshake returns base64-url encoded handshake. The handshake is constructed following the format:
//
// glome-handshake := base64url(
// <prefix-type>
// <prefix7>
// <eph-key>
// [<prefixN>]
// ).
func (c *Client) constructHandshake() string {
var handshake []byte
h := c.response.HandshakeInfo
handshake = append(handshake, h.Prefix)
handshake = append(handshake, h.UserKey[:]...)
handshake = append(handshake, h.MessageTagPrefix[:]...)
return base64.URLEncoding.EncodeToString(handshake[:])
}
// ValidateAuthCode checks if the received tag corresponding to the tag calculated under message constructed from the Message.
// Returns ErrResponseNotInitialized if the Client.response is not initialized.
func (c *Client) ValidateAuthCode(tag string) (bool, error) {
dTag, err := base64.URLEncoding.DecodeString(completeBase64S(tag))
if err != nil {
return false, err
}
if c.response == nil {
return false, ErrResponseNotInitialized
}
return c.response.ValidateAuthCode(dTag), nil
}
// completeBase64S completes the base64 string with padding if it was truncated and couldn't be correctly decoded.
func completeBase64S(s string) string {
n := len(s)
switch n % 4 {
case 0:
return s
case 1:
return s[:n-1]
case 2:
return s + "=="
case 3:
return s + "="
default:
panic("math fail")
}
}
// Response is a getter for Client.response.
func (c *Client) Response() *URLResponse {
return c.response
}
// Server implements the server-side of the glome-login protocol.
type Server struct {
// Fetch the server's private key given a version ID. Caller is responsible
// for not modifying the returned private key. If the key is authoritatively
// found to not exist for a given version it is expected that (nil, nil) is
// returned.
KeyFetcher func(uint8) (*glome.PrivateKey, error)
}
// ParseURLResponse parses the url, checks whether it is formed correctly and validates the client's tag, received from the URL.
// Returns ErrInvalidURLFormat if the URL is malformed, ErrServerKeyNotFound is there is no key corresponding to prefix,
// ErrIncorrectTag if the client's tag is invalid.
func (s *Server) ParseURLResponse(url string) (*URLResponse, error) {
response := URLResponse{}
parsed := validURLPrefix.FindStringSubmatch(url) // save first element (full substring) to be trimmed later in url
if parsed == nil {
return nil, &ErrInvalidURLFormat{url}
}
version, err := parseVersion(parsed[1])
if err != nil {
return nil, err
}
response.V = version
handshake, err := parseHandshake(parsed[2])
if err != nil {
return nil, err
}
response.HandshakeInfo = *handshake
sPrivKey, err := s.KeyFetcher(handshake.Prefix)
if err != nil {
return nil, err
}
if sPrivKey == nil {
return nil, &ErrServerKeyNotFound{handshake.Prefix}
}
response.d, err = sPrivKey.TruncatedExchange(&handshake.UserKey, 1)
if err != nil {
return nil, err
}
if len(parsed) > 3 {
message, err := parseMsg(parsed[3])
if err != nil {
return nil, err
}
response.Msg = *message
}
if len(handshake.MessageTagPrefix) == 0 {
return &response, nil
}
if response.ValidateAuthCode(handshake.MessageTagPrefix) != true {
return nil, ErrIncorrectTag
}
return &response, nil
}
// parseVersion returns the parsed version of the URL format version. Returns ErrVersionNotSupported,
// if the version is not supported.
func parseVersion(v string) (byte, error) {
parsed, err := strconv.Atoi(v[1:])
if err != nil {
return 0, err
}
if parsed != 1 { // current parsed
return 0, &ErrVersionNotSupported{parsed}
}
return byte(parsed), nil
}
// parseHandshake returns the parsed V of the URL handshake.
// The handshake should satisfy the following format:
//
// glome-handshake := base64url(
// <prefix-type>
// <prefix7>
// <eph-key>
// [<prefixN>]
// ).
//
// Returns ErrInvalidHandshakeLen if the tag length is less than minHandshakeLen,
// ErrInvalidPrefixType if prefix-type is different from 0,
// glome.ErrInvalidTagSize if the tag length is bigger than glome.MaxTagSize.
func parseHandshake(handshake string) (*Handshake, error) {
dHandshake, err := base64.URLEncoding.DecodeString(handshake)
if err != nil {
return nil, err
}
if len(dHandshake) < minHandshakeLen {
return nil, ErrInvalidHandshakeLen
}
prefix := dHandshake[0]
if prefix>>7 != 0 { // check Prefix-type
return nil, ErrInvalidPrefixType
}
userKey, err := glome.PublicKeyFromSlice(dHandshake[1:minHandshakeLen])
if err != nil {
return nil, err
}
msgTagPrefix := dHandshake[minHandshakeLen:]
if len(msgTagPrefix) > glome.MaxTagSize {
return nil, glome.ErrInvalidTagSize
}
return &Handshake{prefix, *userKey, msgTagPrefix}, nil
}
// parseMsg returns the parsed V of the URL message.
// The message should satisfy the following format: [<hostid-type>:]<hostid>[/<action>].
func parseMsg(hostAndAction string) (*Message, error) {
var hostIDType, hostID, action string
split := strings.SplitN(hostAndAction, "/", 2)
host, err := url.QueryUnescape(split[0])
if err != nil {
return nil, err
}
var h = strings.SplitN(host, ":", 2)
if len(h) == 2 { // <hostid-type> is present
hostIDType = h[0]
hostID = h[1]
} else {
hostID = h[0]
}
if len(split) == 2 { // <action> is present
action = split[1]
}
return &Message{hostIDType, hostID, action}, nil
}
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdlib.h>
#include <unistd.h>
#include "config.h"
#include "login.h"
#include "ui.h"
static void handle_error(const char* error_tag) {
if (error_tag != NULL) {
errorf("\nError: %s\n", error_tag);
}
// Let's sleep for a bit in case the console gets cleared after login exits so
// the user has a chance to see all the output.
fflush(NULL);
sleep(2);
}
int main(int argc, char* argv[]) {
glome_login_config_t config = {0};
// Parse arguments to initialize the config path.
int r = parse_args(&config, argc, argv);
if (r > 0) {
return EXITCODE_USAGE;
}
if (r < 0) {
handle_error("parse-args");
return EXITCODE_PANIC;
}
// Reset config while preserving the config path.
const char* config_path = config.config_path;
default_config(&config);
config.config_path = config_path;
// Read configuration file.
status_t status = glome_login_parse_config_file(&config);
if (status != STATUS_OK) {
handle_error(status);
return EXITCODE_PANIC;
}
// Parse arguments again to override config values.
r = parse_args(&config, argc, argv);
if (r > 0) {
return EXITCODE_USAGE;
}
if (r < 0) {
handle_error("parse-args");
return EXITCODE_PANIC;
}
const char* error_tag = NULL;
int rc = login_run(&config, &error_tag);
if (rc) {
handle_error(error_tag);
}
return rc;
}
<file_sep># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module includes test vectors from the protocol reference.
"""
class TestVector:
"""Class that encapsulate needed components for testing.
Consider a use case where an user A sends a message to user B.
Attributes:
kap: A's private key.
ka: A's public key.
kbp: B's private key.
kb: B's public key.
counter: number of messages already shared.
msg: message to share.
sk: shared secret betweens A and B.
tag: tag that matches ka, kb, counter and msg.
"""
def __init__(self, kap: str, ka: str, kbp: str, kb: str, counter: int,
msg: str, sk: str, tag: str):
"""Constructor for TestVector Class."""
self.kap = bytes.fromhex(kap)
self.ka = bytes.fromhex(ka)
self.kbp = bytes.fromhex(kbp)
self.kb = bytes.fromhex(kb)
self.counter = counter
self.msg = msg.encode(encoding="ascii")
self.sk = bytes.fromhex(sk)
self.tag = bytes.fromhex(tag)
TEST1 = TestVector(
kap='7<KEY>',
ka='8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a',
kbp='5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb',
kb='de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f',
counter=0,
msg='The quick brown fox',
sk='4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742',
tag='9c44389f462d35d0672faf73a5e118f8b9f5c340bbe8d340e2b947c205ea4fa3')
TEST2 = TestVector(
kap='b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d',
ka='d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647',
kbp='fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead',
kb='872f435bb8b89d0e3ad62aa2e511074ee195e1c39ef6a88001418be656e3c376',
counter=100,
msg='The quick brown fox',
sk='<KEY>',
tag='<KEY>')
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ui.h"
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static void usage(const char* argv0) {
const char* sep = strrchr(argv0, '/');
const char* name = (sep == NULL) ? argv0 : sep + 1;
errorf("Usage: %s [OPTIONS] [--] USERNAME\n", name);
}
#define STATUS_SIZE 256
static char* status_malloc_failed = "ERROR: failed to allocate status buffer";
status_t status_createf(const char* format, ...) {
char* message = malloc(STATUS_SIZE);
if (message == NULL) {
return status_malloc_failed;
}
va_list argptr;
va_start(argptr, format);
int ret = vsnprintf(message, STATUS_SIZE, format, argptr);
va_end(argptr);
if (ret < 0 || ret >= STATUS_SIZE) {
snprintf(message, STATUS_SIZE, "ERROR: status message too big: %d", ret);
}
return message;
}
void status_free(status_t status) {
if (status == status_malloc_failed) {
return;
}
free(status);
}
int decode_hex(uint8_t* dst, size_t dst_len, const char* in) {
size_t len = strlen(in);
if (len > 2 && in[0] == '0' && in[1] == 'x') {
len -= 2;
in += 2;
}
if (len != dst_len * 2) {
errorf(
"ERROR: hex-encoded key must have exactly %zu characters (got %zu)\n",
dst_len * 2, len);
return -1;
}
for (size_t i = 0; i < dst_len; i++) {
if (sscanf(in + (i * 2), "%02hhX", dst + i) != 1) {
errorf("ERROR while parsing byte %zu ('%c%c') as hex\n", i, in[2 * i],
in[2 * i + 1]);
return -2;
}
}
return 0;
}
static const char flags_help[] =
"Available flags:"
"\n -h, --help this help"
"\n -c, --config-path=PATH configuration file to parse "
"(default: " DEFAULT_CONFIG_FILE
")"
"\n -a, --min-authcode-len=N minimum length of the encoded authcode"
"\n -d, --auth-delay=N sleep N seconds before the authcode check "
"(default: %d)"
"\n -k, --key=KEY use hex-encoded KEY as the service key "
"(default: key from configuration file)"
"\n -l, --login-path=PATH use PATH instead of " DEFAULT_LOGIN_PATH
"\n -m, --host-id-type=TYPE use TYPE as the host-id type"
"\n -p, --prompt=PROMPT print PROMPT before the challenge is "
"printed (default: '" DEFAULT_PROMPT
"')"
"\n -s, --disable-syslog suppress syslog logging (default: false)"
"\n -t, --timeout=N abort if the authcode has not been provided "
"within N seconds"
"\n no timeout if the flag is 0 (default: %d)"
"\n -v, --verbose print debug information"
"\nUnsafe flags:"
"\n -I, --print-secrets print all the secrets (INSECURE!)"
"\n -K, --ephemeral-key=KEY use KEY as the hex-encoded ephemeral secret "
"key (INSECURE!)"
"\n -M, --host-id=NAME use NAME as the host-id"
"\n";
static const char* short_options = "ha:c:d:k:l:m:p:st:u:vIK:M:";
static const struct option long_options[] = {
{"help", no_argument, 0, 'h'},
{"min-authcode-len", required_argument, 0, 'a'},
{"config-path", required_argument, 0, 'c'},
{"auth-delay", required_argument, 0, 'd'},
{"key", required_argument, 0, 'k'},
{"login-path", required_argument, 0, 'l'},
{"disable-syslog", no_argument, 0, 's'},
{"timeout", required_argument, 0, 't'},
{"prompt", required_argument, 0, 'p'},
{"verbose", no_argument, 0, 'v'},
{"print-secrets", no_argument, 0, 'I'},
{"ephemeral-key", required_argument, 0, 'K'},
{"host-id", required_argument, 0, 'M'},
{"host-id-type", required_argument, 0, 'm'},
{0, 0, 0, 0},
};
void default_config(glome_login_config_t* config) {
memset(config, 0, sizeof(glome_login_config_t));
// Setting defaults.
config->login_path = DEFAULT_LOGIN_PATH;
config->prompt = DEFAULT_PROMPT;
config->auth_delay_sec = DEFAULT_AUTH_DELAY;
config->input_timeout_sec = DEFAULT_INPUT_TIMEOUT;
config->options = SYSLOG;
}
int parse_args(glome_login_config_t* config, int argc, char* argv[]) {
int c;
int errors = 0;
status_t status;
// Reset current position to allow parsing arguments multiple times.
optind = 1;
while ((c = getopt_long(argc, argv, short_options, long_options, NULL)) !=
-1) {
switch (c) {
case 'a':
status = glome_login_assign_config_option(config, "default",
"min-authcode-len", optarg);
break;
case 'c':
status = glome_login_assign_config_option(config, "default",
"config-path", optarg);
break;
case 'd':
status = glome_login_assign_config_option(config, "default",
"auth-delay", optarg);
break;
case 'k':
status =
glome_login_assign_config_option(config, "service", "key", optarg);
break;
case 'l':
status = glome_login_assign_config_option(config, "service",
"login-path", optarg);
break;
case 'm':
status = glome_login_assign_config_option(config, "default",
"host-id-type", optarg);
break;
case 'p':
status = glome_login_assign_config_option(config, "service", "prompt",
optarg);
break;
case 's':
status = glome_login_assign_config_option(config, "default",
"disable-syslog", optarg);
break;
case 't':
status = glome_login_assign_config_option(config, "default", "timeout",
optarg);
break;
case 'v':
status = glome_login_assign_config_option(config, "default", "verbose",
optarg);
break;
case 'I':
status = glome_login_assign_config_option(config, "default",
"print-secrets", optarg);
break;
case 'K':
status = glome_login_assign_config_option(config, "default",
"ephemeral-key", optarg);
break;
case 'M':
status = glome_login_assign_config_option(config, "default", "host-id",
optarg);
break;
case '?':
case 'h':
usage(argv[0]);
errorf(flags_help, DEFAULT_AUTH_DELAY, DEFAULT_INPUT_TIMEOUT);
return 2;
default:
return -1; // PANIC
}
if (status != STATUS_OK) {
errorf("%s\n", status);
status_free(status);
errors++;
}
}
if (optind >= argc) {
errorf("ERROR: no username specified\n");
errors++;
}
if (optind < argc - 1) {
errorf("ERROR: only one username is allowed (got %d)\n", argc - optind);
errors++;
}
if (errors > 0) {
usage(argv[0]);
return 1;
}
config->username = argv[optind];
return 0;
}
<file_sep># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python GLOME fuzz main.
This module that implement some easy fuzz testing for pyglome.
"""
import hypothesis
import unittest
from cryptography.hazmat.primitives.asymmetric import x25519
import pyglome
def _glome(private_bytes, public_bytes, msg, tag, counter, min_tag_len):
"""Calls basic utilities of glome class, accepts only documented exceptions.
The intention of this function is to be fuzzed over to find cases that throw
unexpected exceptions."""
try:
private_key = x25519.X25519PrivateKey.from_private_bytes(private_bytes)
peer_key = x25519.X25519PublicKey.from_public_bytes(public_bytes)
except (ValueError, pyglome.ExchangeError):
return
try:
tag_manager = pyglome.Glome(peer_key, private_key, min_tag_len)
except ValueError:
return
# Call to property method to test side effects
tag_manager.user_keys
tag_manager.peer_key
try:
tag_manager.tag(msg, counter)
except pyglome.TagGenerationError:
pass
try:
tag_manager.check(tag, msg, counter)
except (pyglome.IncorrectTagError, pyglome.TagCheckError):
pass
def _autoglome(private_bytes, public_bytes, msg, tag, counter, min_tag_len,
skippable_range):
"""Calls basic utilities of autoglome class, accepts only documented exceptions.
The intention of this function is to be fuzzed over to find cases that throw
unexpected exceptions."""
try:
private_key = x25519.X25519PrivateKey.from_private_bytes(private_bytes)
peer_key = x25519.X25519PublicKey.from_public_bytes(public_bytes)
except (ValueError, pyglome.ExchangeError):
return
try:
tag_manager = pyglome.AutoGlome(peer_key,
private_key,
min_peer_tag_len=min_tag_len,
skippable_range=skippable_range)
except ValueError:
return
# Call to property method to test side effects
tag_manager.user_keys
tag_manager.peer_key
tag_manager.sending_counter
tag_manager.receiving_counter
try:
tag_manager.sending_counter = counter
except ValueError:
pass
try:
tag_manager.receiving_counter = counter
except ValueError:
pass
try:
tag_manager.tag(msg)
except pyglome.TagGenerationError:
pass
try:
tag_manager.check(tag, msg)
except (pyglome.IncorrectTagError, pyglome.TagCheckError):
pass
@hypothesis.settings(max_examples=10**7)
@hypothesis.given(
hypothesis.strategies.binary(min_size=32, max_size=32), #private_bytes
hypothesis.strategies.binary(min_size=32, max_size=32), #public_bytes
hypothesis.strategies.binary(), #msg
hypothesis.strategies.binary(min_size=32, max_size=32), #tag
hypothesis.strategies.integers(), #counter
hypothesis.strategies.integers()) #min_tag_len
def glome_test(private_bytes, public_bytes, msg, tag, counter, min_tag_len):
"""Add hypothesis decorator to _glome function"""
_glome(private_bytes, public_bytes, msg, tag, counter, min_tag_len)
@hypothesis.settings(max_examples=10**7)
@hypothesis.given(
hypothesis.strategies.binary(min_size=32, max_size=32), #private_bytes
hypothesis.strategies.binary(min_size=32, max_size=32), #public_bytes
hypothesis.strategies.binary(), #msg
hypothesis.strategies.binary(min_size=32, max_size=32), #tag
hypothesis.strategies.integers(), #counter
hypothesis.strategies.integers(), #min_tag_len
hypothesis.strategies.integers()) #skippable
def autoglome_test(private_bytes, public_bytes, msg, tag, counter, min_tag_len,
skippable):
"""Add hypothesis decorator to _autoglome function"""
_autoglome(private_bytes, public_bytes, msg, tag, counter, min_tag_len,
skippable)
@hypothesis.settings(max_examples=10**5)
@hypothesis.given(
hypothesis.strategies.binary(), #private_bytes
hypothesis.strategies.binary(), #public_bytes
hypothesis.strategies.binary(), #msg
hypothesis.strategies.binary(min_size=32, max_size=32), #tag
hypothesis.strategies.integers(), #counter
hypothesis.strategies.integers()) #min_tag_len
def glome_unsized_keys_test(private_bytes, public_bytes, msg, tag, counter,
min_tag_len):
"""Add hypothesis decorator to _glome function"""
_glome(private_bytes, public_bytes, msg, tag, counter, min_tag_len)
@hypothesis.settings(max_examples=10**5)
@hypothesis.given(
hypothesis.strategies.binary(), #private_bytes
hypothesis.strategies.binary(), #public_bytes
hypothesis.strategies.binary(), #msg
hypothesis.strategies.binary(min_size=32, max_size=32), #tag
hypothesis.strategies.integers(), #counter
hypothesis.strategies.integers(), #min_tag_len
hypothesis.strategies.integers()) #skippable
def autoglome_unsized_keys_test(private_bytes, public_bytes, msg, tag, counter,
min_tag_len, skippable):
"""Add hypothesis decorator to _autoglome function"""
_autoglome(private_bytes, public_bytes, msg, tag, counter, min_tag_len,
skippable)
class GlomeTest1(unittest.TestCase):
"""Test Class that check one iteration of each function.
Uses sample input to test whether the fuzzing function raise unexpected
exceptions."""
def __init__(self, *args, **kwargs):
super(__class__, self).__init__(*args, **kwargs)
constant_one = b'\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
self.private_bytes = constant_one
self.public_bytes = constant_one
self.msg = b'\x11'
self.tag = constant_one
self.counter = 1
self.min_tag_len = 1
self.skippable = 1
def test_glome_fuzz(self):
"""Test glome fuzzing function with trivial example"""
try:
_glome(self.private_bytes, self.public_bytes, self.msg, self.tag,
self.counter, self.min_tag_len)
except:
self.fail('Unexpected exception was raised.')
def test_autoglome_fuzz(self):
"""Test autoglome fuzzing function with trivial example"""
try:
_autoglome(self.private_bytes, self.public_bytes, self.msg,
self.tag, self.counter, self.min_tag_len, self.skippable)
except:
self.fail('Unexpected exception was raised.')
if __name__ == "__main__":
glome_test()
autoglome_test()
glome_unsized_keys_test()
autoglome_unsized_keys_test()
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"sync"
"github.com/google/glome/go/glome"
)
// ErrInvalidKeyIndex denotes that provided index is invalid for a key. Only
// indexes in {0,...,127} are valid.
type ErrInvalidKeyIndex struct {
Index uint8
}
func (e ErrInvalidKeyIndex) Error() string {
return fmt.Sprintf("key index should be in {0,...,127}, found: %v", e.Index)
}
// ErrDuplicatedKeyIndex denotes that provided index is already in use, so no
// new keys can be assigned to it.
type ErrDuplicatedKeyIndex struct {
Index uint8
}
func (e ErrDuplicatedKeyIndex) Error() string {
return fmt.Sprintf("key index already in use, found: %v", e.Index)
}
// A PrivateKey represent a Private key for the login server. It is a pair composed of a private key
// and its pairing index.
type PrivateKey struct {
Value glome.PrivateKey
Index uint8
}
// A PublicKey represent a Service key for the login server. It is a pair composed of a public key
// and its pairing index.
type PublicKey struct {
Value glome.PublicKey
Index uint8
}
// KeyManager performs key maneger task in a concurrent-safe way. It allows for constant
// time search of keys by index. KeyManager is constructed with NewKeyManager function.
type KeyManager struct {
indexToPriv map[uint8]glome.PrivateKey
publicKeys []PublicKey
lock sync.RWMutex
}
func (k *KeyManager) locklessAdd(key glome.PrivateKey, index uint8) error {
if index > 127 {
return ErrInvalidKeyIndex{Index: index}
}
if _, ok := k.indexToPriv[index]; ok {
return ErrDuplicatedKeyIndex{Index: index}
}
pub, err := key.Public()
if err != nil {
return err
}
k.indexToPriv[index] = key
k.publicKeys = append(k.publicKeys, PublicKey{Value: *pub, Index: index})
return nil
}
// Add adds provided key and index to the key manager. Return ErrInvalidindex
// if index provided is not in {0,...,127} and ErrDuplicatedIndex if index
// provided was already in use.
func (k *KeyManager) Add(key glome.PrivateKey, index uint8) error {
k.lock.Lock()
defer k.lock.Unlock()
return k.locklessAdd(key, index)
}
// Read returns the PrivateKey stored in the KeyManager for a index, or a
// zero-value PrivateKey if no PrivateKey is present. The ok result indicates
// whether value was found in the KeyManager.
func (k *KeyManager) Read(index uint8) (glome.PrivateKey, bool) {
k.lock.RLock()
defer k.lock.RUnlock()
key, ok := k.indexToPriv[index]
return key, ok
}
// DropAllReplace drops all stored keys and replace them with the new ones provided.
// This operation is done in a atomic way (no other call to the struct will be handled
// while DropAllReplace is).
func (k *KeyManager) DropAllReplace(keys []PrivateKey) error {
k.lock.Lock()
defer k.lock.Unlock()
k.indexToPriv = make(map[uint8]glome.PrivateKey)
for _, key := range keys {
if err := k.locklessAdd(key.Value, key.Index); err != nil {
return err
}
}
return nil
}
// ServiceKeys returns a copy slice of the public keys being at use at this moment by
// the KeyManager.
func (k *KeyManager) ServiceKeys() []PublicKey {
serviceKey := make([]PublicKey, len(k.publicKeys))
copy(serviceKey, k.publicKeys)
return serviceKey
}
// Return a function that implements key fetching. The function
// returns ErrInvalidKeyIndex if index is not in {0,...,127}, and nil
// if the provided index does not match any key.
func (k *KeyManager) keyFetcher() func(uint8) (*glome.PrivateKey, error) {
return func(index uint8) (*glome.PrivateKey, error) {
if index > 127 {
return nil, ErrInvalidKeyIndex{Index: index}
}
key, found := k.Read(index)
if !found {
return nil, nil
}
return &key, nil
}
}
// NewKeyManager returns a new key manager.
func NewKeyManager() *KeyManager {
return &KeyManager{
indexToPriv: make(map[uint8]glome.PrivateKey),
publicKeys: make([]PublicKey, 0),
}
}
<file_sep>package v2
import (
"encoding/base64"
"errors"
"fmt"
"strings"
"github.com/google/glome/go/glome"
)
const versionPrefix = "v2/"
// Responder can parse challenges and create responses.
//
// Instances of Responder must be created with NewResponder().
type Responder struct {
keysByIndex map[uint8]*glome.PrivateKey
keysByPrefix map[byte]*glome.PrivateKey
}
// NewResponder creates a Responder that uses the given private keys to respond
// to challenges.
func NewResponder(keys map[uint8]*glome.PrivateKey) (*Responder, error) {
r := &Responder{
keysByIndex: make(map[uint8]*glome.PrivateKey),
keysByPrefix: make(map[byte]*glome.PrivateKey),
}
for i, k := range keys {
if i >= 1<<7 {
return nil, fmt.Errorf("key index %d is not in range [0; 127]", i)
}
pk, err := k.Public()
if err != nil {
return nil, fmt.Errorf("invalid private key at index %d: %w", i, err)
}
r.keysByIndex[i] = k
// We _could_ validate that prefixes are unique here, but we choose not to.
r.keysByPrefix[pk[glome.PublicKeySize-1]] = k
}
return r, nil
}
// ServerChallenge contains the parsed Message from a challenge and an
// appropriate response.
//
// The Response must only be used after verifying the message content!
//
// Instances of ServerChallenge should be created by Responder.Accept().
type ServerChallenge struct {
Message *Message
Response string
}
// Accept an encoded challenge and produce a response.
func (r *Responder) Accept(encodedChallenge string) (*ServerChallenge, error) {
s := strings.TrimPrefix(encodedChallenge, "/")
if len(s) < len(versionPrefix) {
return nil, errors.New("challenge format error: too short")
}
if s[:len(versionPrefix)] != versionPrefix {
return nil, fmt.Errorf("challenge version incompatible: expected %q, got %q", versionPrefix, s[:len(versionPrefix)])
}
s = strings.TrimPrefix(s, versionPrefix)
s = strings.TrimSuffix(s, "/")
subs := strings.SplitN(s, "/", 2)
if len(subs) != 2 {
return nil, errors.New("challenge format error: wrong number of path segments")
}
h, err := decodeHandshake(subs[0])
if err != nil {
return nil, err
}
encodedMessage := []byte(subs[1])
m, err := decodeMessage(subs[1])
if err != nil {
return nil, err
}
var key *glome.PrivateKey
ok := false
if h.Prefix != nil {
key, ok = r.keysByPrefix[*h.Prefix]
} else {
key, ok = r.keysByIndex[h.Index]
}
if !ok {
return nil, &keyNotFoundError{h}
}
d, err := key.TruncatedExchange(h.PublicKey, 1)
if err != nil {
return nil, err
}
if len(h.MessageTagPrefix) > 0 && !d.Check(h.MessageTagPrefix, encodedMessage, 0) {
return nil, ErrTagPrefixMismatch
}
tag := d.Tag(encodedMessage, 0)
return &ServerChallenge{
Message: m,
Response: base64.URLEncoding.EncodeToString(tag),
}, nil
}
type keyNotFoundError struct {
h *handshake
}
func (e *keyNotFoundError) Error() string {
if e.h.Prefix != nil {
return fmt.Sprintf("no key found with prefix 0x%02x", *e.h.Prefix)
}
return fmt.Sprintf("no key found with index %d", e.h.Index)
}
// ErrTagPrefixMismatch is returned when a tag prefix is included in the
// challenge, but it does not verify with the chosen key. This means that the
// public key chosen based on handshake information is not the one the client
// expected.
var ErrTagPrefixMismatch = errors.New("message tag prefix did not match")
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GLOME_H_
#define GLOME_H_
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#define GLOME_MAX_PUBLIC_KEY_LENGTH 32
#define GLOME_MAX_PRIVATE_KEY_LENGTH 32
#define GLOME_MAX_TAG_LENGTH 32
#ifdef __cplusplus
extern "C" {
#endif
// Generates a new public/private key pair for use with GLOME.
int glome_generate_key(uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH]);
// Derives the public key from the private key.
int glome_derive_key(const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH]);
// Generates or verifies the GLOME tag for the message. Requires passing in the
// private key of the local peer and the public key of the remote peer.
int glome_tag(bool verify, unsigned char counter,
const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
const uint8_t peer_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
const uint8_t *message, size_t message_len,
uint8_t tag[GLOME_MAX_TAG_LENGTH]);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // GLOME_H_
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* Needs to go last to get size_t definition. */
#include <libpamtest.h>
const char *authtoks[] = {
"lyHuaHuCck", /* Correct code */
"lyHuaHuCc", /* Too short */
"INVALIDCODE", /* Wrong code */
/* fake passwords that might be provided by openssh-portable/auth-pam.c */
"\b\n\r\177", "\b\n\r\177INCORRECT", "\b\n\r\177INCORRECT\b\n\r\177",
NULL /* Terminator */
};
struct pamtest_conv_data conv_data = {
.in_echo_off = authtoks,
};
struct pam_testcase tests[] = {
pam_test(PAMTEST_AUTHENTICATE, PAM_SUCCESS),
pam_test(PAMTEST_AUTHENTICATE, PAM_AUTH_ERR),
pam_test(PAMTEST_AUTHENTICATE, PAM_AUTH_ERR),
pam_test(PAMTEST_AUTHENTICATE, PAM_AUTH_ERR),
pam_test(PAMTEST_AUTHENTICATE, PAM_AUTH_ERR),
pam_test(PAMTEST_AUTHENTICATE, PAM_AUTH_ERR),
};
/* Setup GLOME using only PAM parameters. */
int test_service() {
int len;
enum pamtest_err perr;
char *runtime_dir, *pam_glome, *service_file;
char *service = "test";
char *username = "root";
FILE *f;
pam_glome = getenv("PAM_GLOME");
if (pam_glome == NULL) {
puts("PAM_GLOME not found");
return 1;
}
runtime_dir = getenv("PAM_WRAPPER_RUNTIME_DIR");
if (runtime_dir == NULL) {
puts("PAM_WRAPPER_RUNTIME_DIR not found");
return 1;
}
len = strlen(runtime_dir) + 1 + strlen(service) + 1;
service_file = calloc(len, 1);
if (service_file == NULL) {
puts("calloc service_file failed");
return 1;
}
snprintf(service_file, len, "%s/%s", runtime_dir, service);
f = fopen(service_file, "w");
if (f == NULL) {
printf("fopen service_file '%s' failed: %s\n", service_file,
strerror(errno));
return 1;
}
free(service_file);
fprintf(f,
"auth required %s prompt=https://test.service/ "
"key="
"<KEY> "
"key_version=1 "
"ephemeral_key="
"<KEY> "
"host_id=my-server.local",
pam_glome);
fclose(f);
#if defined(OLDSTYLE_RUN_PAMTEST)
perr = run_pamtest(service, username, &conv_data, tests);
#else
perr = run_pamtest(service, username, &conv_data, tests, NULL);
#endif
if (perr != PAMTEST_ERR_OK) {
puts(pamtest_strerror(perr));
return 1;
}
return 0;
}
/* Setup GLOME using config file and PAM parameters. */
int test_config() {
int len;
enum pamtest_err perr;
char *runtime_dir, *pam_glome, *service_file, *config_file;
char *service = "test";
char *config = "config";
char *username = "root";
FILE *f;
pam_glome = getenv("PAM_GLOME");
if (pam_glome == NULL) {
puts("PAM_GLOME not found");
return 1;
}
runtime_dir = getenv("PAM_WRAPPER_RUNTIME_DIR");
if (runtime_dir == NULL) {
puts("PAM_WRAPPER_RUNTIME_DIR not found");
return 1;
}
len = strlen(runtime_dir) + 1 + strlen(config) + 1;
config_file = calloc(len, 1);
if (config_file == NULL) {
puts("calloc config_file failed");
return 1;
}
snprintf(config_file, len, "%s/%s", runtime_dir, config);
f = fopen(config_file, "w");
if (f == NULL) {
printf("fopen config_file '%s' failed: %s\n", config_file, strerror(errno));
return 1;
}
fprintf(f,
"[service]\n"
"key = "
"<KEY>"
"key-version = 1\n"
"prompt = https://test.service/\n");
fclose(f);
len = strlen(runtime_dir) + 1 + strlen(service) + 1;
service_file = calloc(len, 1);
if (service_file == NULL) {
puts("calloc service_file failed");
return 1;
}
snprintf(service_file, len, "%s/%s", runtime_dir, service);
f = fopen(service_file, "w");
if (f == NULL) {
printf("fopen service_file '%s' failed: %s\n", service_file,
strerror(errno));
return 1;
}
free(service_file);
fprintf(f,
"auth required %s config_path=%s "
"ephemeral-key="
"<KEY> "
"host-id=my-server.local",
pam_glome, config_file);
fclose(f);
free(config_file);
#if defined(OLDSTYLE_RUN_PAMTEST)
perr = run_pamtest(service, username, &conv_data, tests);
#else
perr = run_pamtest(service, username, &conv_data, tests, NULL);
#endif
if (perr != PAMTEST_ERR_OK) {
puts(pamtest_strerror(perr));
return 1;
}
return 0;
}
int main() {
int rc;
rc = test_service();
rc = rc || test_config();
return rc;
}
<file_sep># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python GLOME library.
This module contains the Glome class and generate_keys function.
Example use: Sender
>>> import pyglome
>>> tag_manager = pyglome.Glome(peer_key)
>>> first_tag = tag_manager.tag(first_msg, 0) # 0 as it is the first msg
>>> second_tag = tag_manager.tag(second_msg, 1)
Example use: Receiver
>>> import pyglome
>>> tag_manager = pyglome.Glome(peer_key, my_private_key)
>>> ## Need to have a private key (paired to the public key
>>> ## that the sender use)
>>> try:
... tag_manager.check(tag, msg, counter=0):
>>> except pyglome.IncorrectTagError as wte:
... ## Handle the exception
>>> ## do what you have to do
"""
import os
import hashlib
import hmac
from typing import NamedTuple
from cryptography.hazmat.primitives.asymmetric import x25519
from cryptography.hazmat.primitives import serialization
class KeyPair(NamedTuple):
"""
NamedTuple-Class that stores a private/public key pair.
Attributes:
- private: A private key.
- public: A public key paired with the private one.
"""
private: x25519.X25519PrivateKey
public: x25519.X25519PublicKey
class Error(Exception):
"""Error super-class for any error that is thrown in PyGLOME."""
class TagCheckError(Error):
"""Raised whenever a tag is not correct or the method failed to check it."""
class IncorrectTagError(Error):
"""Raised whenever the tag provided does not match the message and counter."""
class TagGenerationError(Error):
"""Raised whenever a tag could not be generated."""
class ExchangeError(Error):
"""Raised whenever the x25519 key exchange fails."""
def _public_key_encode(public_key: x25519.X25519PublicKey):
return public_key.public_bytes(serialization.Encoding.Raw,
serialization.PublicFormat.Raw)
def _tag(msg: bytes, counter: int, key: bytes) -> bytes:
if not 0 <= counter <= 255:
raise ValueError(f'tag counter (={counter}) must be within [0, 255]')
message = bytes([counter]) + msg # msg: N_x|M_n
digester = hmac.new(key=key, msg=message, digestmod=hashlib.sha256)
return digester.digest()
class Glome:
"""Implement tag managing functionalities for GLOME protocol.
This class is initialized by providing your peer's public key and
optionally your private key. If a private key is not provided, one is
automatically generated making use of `generate_keys`. Provides methods
tag (to generate new tags) and check (to check receiving tags).
"""
MAX_TAG_LEN = 32 # 32 is maximum tag length
MIN_TAG_LEN = 1
def __init__(self,
peer_key: x25519.X25519PublicKey,
my_private_key: x25519.X25519PrivateKey = None,
min_peer_tag_len: int = MAX_TAG_LEN):
"""Initialize Glome class.
Performs the handshake and generates keys.
Args:
peer_key: Your peer's public key.
my_private_key: Your private key.
min_peer_tag_len: Desired length (in bytes) for the tag.
Must be an integer in range 1-32.
Raises:
ValueError: Raised whenever min_peer_tag_len is not in range 1-32.
ExchangeError: Raised whenever null shared secret is derived from
user/peer key pair.
"""
if my_private_key is None:
my_private_key, my_public_key = generate_keys()
else:
my_public_key = my_private_key.public_key()
if not Glome.MIN_TAG_LEN <= min_peer_tag_len <= Glome.MAX_TAG_LEN:
raise ValueError(
f'min_peer_tag_len (={min_peer_tag_len}) is not within '
f'[{Glome.MIN_TAG_LEN}, {Glome.MAX_TAG_LEN}]')
try:
shared_secret = my_private_key.exchange(peer_key)
except ValueError as value_error:
raise ExchangeError(
'Failed to deduce shared secret') from value_error
self._send_key = shared_secret + _public_key_encode(
peer_key) + _public_key_encode(my_public_key)
self._receive_key = shared_secret + _public_key_encode(
my_public_key) + _public_key_encode(peer_key)
self._peer_key = peer_key
self._my_keys = KeyPair(my_private_key, my_public_key)
self._min_peer_tag_len = min_peer_tag_len
@property
def user_keys(self) -> KeyPair:
"""User's private and public keys used in handshake."""
return self._my_keys
@property
def peer_key(self) -> x25519.X25519PublicKey:
"""Peer's public key used in handshake."""
return self._peer_key
def tag(self, msg: bytes, counter: int) -> bytes:
"""Generates a tag from a message and a counter.
Generates a tag matching some provided message and counter.
This tag is generated following GLOME protocol specification
in the context of a communication from the users to theirs peers.
Args:
msg: Message to be transmitted.
counter: Numbers of messages transmitted previously in the
conversation in this direction (i.e. from the user
to the peer). Must be an integer in {0,...,255}.
Returns:
tag: Tag matching counter and msg.
Raises:
TagGenerationError: Raised whenever the method failed to generate tag
due to ValueError in the arguments.
"""
try:
return _tag(msg, counter, self._send_key)
except ValueError as value_error:
raise TagGenerationError('Failed to generate tag') from value_error
def check(self, tag: bytes, msg: bytes, counter: int):
"""Check whether a tag is correct for some message and counter.
Checks if a tag matches some provided message and counter.
The method generates the matching tag following GLOME protocol
specification in the context of a communication from the users'
peers to the users and then is compared with the tag provided.
Args:
tag: Object with the generated tag.
msg: Object containing received message.
counter: Numbers of messages transmitted previously in the
conversation in this direction (i.e. from the peer
to the user).
Returns:
None.
Raises:
TagCheckError: Raised whenever the method fails to check the tag
due to a ValueError in the arguments.
IncorrectTagError: Raised whenever the tag is incorrect.
"""
prefix_length = max(len(tag), self._min_peer_tag_len)
prefix_length = min(prefix_length, Glome.MAX_TAG_LEN)
try:
correct_tag = _tag(msg, counter, self._receive_key)[:prefix_length]
except ValueError as value_error:
raise TagCheckError('Failed to check the tag') from value_error
if not hmac.compare_digest(tag, correct_tag):
raise IncorrectTagError('Tag provided does not match correct tag')
def generate_keys() -> KeyPair:
"""Generates a private/public key pair.
Provides a random key pair based output of os.urandom. The format
matches the one requested by Glome Class.
Args:
None
Returns:
A KeyPair, containing a random private key and the public key derived
from the generated private key
"""
private = x25519.X25519PrivateKey.from_private_bytes(
os.urandom(Glome.MAX_TAG_LEN))
return KeyPair(private, private.public_key())
class AutoGlome:
"""Adds counter managing functionalities for GLOME protocol.
This class is initialized by providing your peer's public key and
optionally your private key. If a private key is not provided, one is
automatically generated making use of `generate_keys`. On initialization,
two counter (sending and receiving) are created and set to 0. Provides
methods tag (to generate new tags) and check (to check receiving tags).
"""
def __init__(self,
peer_key: x25519.X25519PublicKey,
my_private_key: x25519.X25519PrivateKey = None,
*,
min_peer_tag_len: int = Glome.MAX_TAG_LEN,
skippable_range: int = 0):
"""Initialize AutoGlome class.
Performs the handshake, generates keys and counters.
Args:
peer_key: Your peer's public key.
my_private_key: Your private key.
min_peer_tag_len: Desired length (in bytes) for the tag.
Must be an integer in range 1-32. keyword only.
skippable_range: Number of messages that can be missed. keyword only.
Must be non-negative. For more information please go to check method's
documentation.
Raises:
ValueError: Raised whenever min_peer_tag_len is not in range 1-32 or
skippable_length is a negative integer.
ExchangeError: Raised whenever null shared secret is derived from
user/peer key pair.
"""
if skippable_range < 0:
raise ValueError(
f'skippable_range (={skippable_range}) must be non-negative')
self.glome = Glome(peer_key,
my_private_key,
min_peer_tag_len=min_peer_tag_len)
self._sending_counter = 0
self._receiving_counter = 0
self.skippable_range = skippable_range
@property
def sending_counter(self) -> int:
"""Number of tags shared from the user to the peer.
It is incremented each time a new tag is generated. It is always
one byte long. When the counter gets past 255 it overflows at 0.
Setter raises ValueError if provided integer is not in range 0-255.
"""
return self._sending_counter
@sending_counter.setter
def sending_counter(self, value: int):
if not 0 <= value <= 255:
raise ValueError('Counter must be in range 0-255')
self._sending_counter = value
@property
def receiving_counter(self) -> int:
"""Number of tags the user receives from the peer.
It is always one byte long. When the counter gets past 255 it restarts at
0. Every time a message is successfully checked, the receiving_counter is
set to the next value after the last successful one. Note that if
skippable_range is n the counter might be increased by any amount in
range 1-n+1 after a successful check.
Setter raises ValueError if provided counter is not in range 0-255.
"""
return self._receiving_counter
@receiving_counter.setter
def receiving_counter(self, value: int):
if not 0 <= value <= 255:
raise ValueError('Counter must be in range 0-255')
self._receiving_counter = value
@property
def user_keys(self) -> KeyPair:
"""User's private and public keys used in handshake."""
return self.glome.user_keys
@property
def peer_key(self) -> x25519.X25519PublicKey:
"""Peer's public key used in handshake."""
return self.glome.peer_key
def tag(self, msg: bytes) -> bytes:
"""Generates a tag from a message.
Generates a tag matching some provided message and the internal
sending counter. This tag is generated following GLOME protocol
specification in the context of a communication from the users to
theirs peers.
Args:
msg: Message to be transmitted.
Returns:
tag: Tag matching counter and msg.
Raises:
TagGenerationError: Raised whenever the method failed to generate tag
due to ValueError in the arguments.
"""
tag = self.glome.tag(msg, self.sending_counter)
self._sending_counter = (self._sending_counter + 1) % 256
return tag
def check(self, tag: bytes, msg: bytes):
"""Check whether a tag is correct for some message.
Checks if a tag matches some provided message and internal receiving
counter. The method generates the matching tag following GLOME protocol
specification in the context of a communication from the users' peers to
the users and then is compared with the tag provided. If tag checking if
not successful, the receiving counter remains unchanged.
If skippable_range if greater than 0, the method try to check the tag
against all counters in range [receiving_counter, receiving_counter +
skippable_range], in order, until one is successful. If no one is successful,
an exceptions is raised and receiving counter remains unchanged.
Args:
tag: Object with the generated tag.
msg: Object containing received message.
Returns:
None.
Raises:
IncorrectTagError: Raised whenever the tag is incorrect.
"""
old_counter = self._receiving_counter
for _ in range(self.skippable_range + 1):
try:
self.glome.check(tag, msg, self.receiving_counter)
self._receiving_counter = (self._receiving_counter + 1) % 256
return None
except IncorrectTagError:
self._receiving_counter = (self._receiving_counter + 1) % 256
#If no counter matches.
self._receiving_counter = old_counter
raise IncorrectTagError('Tag provided does not match correct tag')
<file_sep>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "crypto.h"
#include <glome.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int is_zeroed(const uint8_t* buf, size_t len) {
int sum = 0;
while (len > 0) {
sum |= buf[--len];
}
return sum == 0;
}
int derive_or_generate_key(uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t public_key[GLOME_MAX_PUBLIC_KEY_LENGTH]) {
if (is_zeroed(private_key, PRIVATE_KEY_LENGTH)) {
// New key pair needs to be generated...
return glome_generate_key(private_key, public_key);
} else {
// ... unless a non-zero private key is provided.
return glome_derive_key(private_key, public_key);
}
}
static int login_tag(bool verify, const char* host_id, const char* action,
const uint8_t peer_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t output[GLOME_MAX_TAG_LENGTH]) {
size_t message_len = strlen(host_id) + 1 + strlen(action) + 1;
char* message = calloc(message_len, 1);
if (message == NULL) {
return -1;
}
int ret = snprintf(message, message_len, "%s/%s", host_id, action);
if (ret < 0) {
free(message);
return -1;
}
if ((size_t)ret >= message_len) {
free(message);
return -1;
}
if (glome_tag(verify, 0, private_key, peer_key, (uint8_t*)message,
strlen(message), output) != 0) {
free(message);
return -1;
}
free(message);
return 0;
}
int get_authcode(const char* host_id, const char* action,
const uint8_t peer_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t authcode[GLOME_MAX_TAG_LENGTH]) {
return login_tag(true, host_id, action, peer_key, private_key, authcode);
}
int get_msg_tag(const char* host_id, const char* action,
const uint8_t peer_key[GLOME_MAX_PUBLIC_KEY_LENGTH],
const uint8_t private_key[GLOME_MAX_PRIVATE_KEY_LENGTH],
uint8_t tag[GLOME_MAX_TAG_LENGTH]) {
return login_tag(false, host_id, action, peer_key, private_key, tag);
}
<file_sep># GLOME-Go
**This is not an officially supported Google product.**
This repository contains a Golang implementation for the GLOME protocol. You can
find the library as well as the tests in the folder `glome`.
## Go API
### Note
This API is Alpha. Thus, it might be subject to changes in the future.
### Example
In order for Alice and Bob to communicate, the first step would be to generate some
new keys:
```go
import (
"glome"
"crypto/rand"
)
// Alice generates new random KeyPair
alicePub, alicePriv, err := glome.GenerateKeys(rand.Reader)
if err != nil { [...] }
// Bob generates Private Key from an existing byte array
b := [32]byte{0,2,...,7,6}
bobPriv := glome.PrivateKey(b)
// Bob could have as well generated the key from byte slice
s := b[:]
bobPriv, err := glome.PrivateKeyFromSlice(s)
if err != nil { [...] }
// Bob deduces public key
bobPub, err := bobPriv.Public()
if err != nil { [...] }
```
Suppose that Alice knows `bobPub` and wants to send Bob the message
`msg` and no other message have been shared before. Alice will need to generate
a `Dialog`:
```go
d, err := alicePriv.Exchange(bobPub)
if err != nil { [...] }
firstTag := d.Tag(msg, 0)
secondTag := d.Tag(msg, 1)
```
And Alice will send Bob both `msg`, `firstTag` as well as Alice's public key.
On Bob ends he will need to do the following:
```go
d, err := bobPriv.Exchange(alicePub)
if err != nil { [...] }
valid := d.Check(tag, msg, 0)
if !valid {
// Maybe someone is pretending to be Alice!
// Return an error.
}
// do what you have to do
```
### Documentation
For more information see the in-code documentation.
### Test
Tests module can be execute with `go test`.
<file_sep># GLOME CLI
This is a CLI utility to facilitate GLOME operations from the command line.
## Usage
Generating two key pairs:
```shell
$ glome genkey | tee Alice | glome pubkey >Alice.pub
$ glome genkey | tee Bob | glome pubkey >Bob.pub
```
Alice calculates a tag and send it together with message and counter to Bob:
```shell
$ tag=$(echo "Hello world!" | glome tag --key Alice --peer Bob.pub)
$ echo "${tag?}"
_QuyLz_nkj5exUJscocS8LDnCMszvSmp9wpQuRshi30=
```
Bob can verify that the tag matches:
```shell
$ echo "Hello world!" | glome verify --key Bob --peer Alice.pub --tag "${tag?}"
$ echo $?
0
```
Both parties can agree to shorten the tag to reduce the protocol overhead:
```shell
$ echo "Hello world!" | glome verify --key Bob --peer Alice.pub --tag "${tag:0:12}"
$ echo $?
0
```
CLI also supports ganerating tags for the GLOME Login requests:
```shell
$ glome login --key Bob v1/AYUg8AmJMKdUdIt93LQ-91oNvzoNJjga9OukqY6qm05q0PU=/my-server.local/shell/root/
MT_Zc-hucXRjTXTBEo53ehoeUsFn1oFyVadViXf-I4k=
```
<file_sep># PyGLOME
**This is not an officially supported Google product.**
This repository contains a Python implementation for the GLOME
protocol. You can find the library in the folder pyglome. The test
files can be found in the test folder.
## Python API
### Requirements
- Python >= 3.6
- pyca/cryptography >= 2.5
### Example
We provide a brief example of use. In order for Alice and Bob to communicate,
the first step would be to generate some new keys:
```python
import pyglome
alice_keys = pyglome.generate_keys()
bob_keys = pyglome.generate_keys()
```
Suppose that Alice knows Bob's `public_key` and wants to send Bob the message
`msg` and no other message have been shared before. Alice will need to:
```python
glome = pyglome.Glome(bob_keys.public, alice_keys.private)
first_tag = glome.tag(msg, counter=0)
```
And Alice will send Bob both `msg`, `first_tag` as well as Alice's public key.
On Bob's end he will need to do the following:
```python
glome = pyglome.Glome(alice_keys.public, bob_keys.private)
try:
glome.check(first_tag, msg, counter=0)
except pyglome.TagCheckError as tag_error:
## Handle the exception.
## do what you have to do
```
### Key generation.
Should you want to use a preexisting key, it should match the format
`X25519Private/PublicKey` provided in [pyca/cryptography](https://cryptography.io/en/latest/).
Such a key can be easily read from a bytes object as follows:
```python
from cryptography.hazmat.primitives.asymmetric import x25519
my_private_key = x25519.X25519PrivateKey.from_private_bytes(private_key_bytes)
my_public_key = x25519.X25519PublicKey.from_public_bytes(public_key_bytes)
```
We provide a key generation function `generate_keys` that uses these methods to
create a new key pair from `os.urandom` bytes.
### Documentation
For more information see the in-code documentation.
### Test
In the test folder we have scripts that implement test classes based on unittest. To run all unittest use:
```
python -m test
```
from this directory. If you only want to execute a particular test module, then run:
```
python -m test.my_module_name
```
where `my_module_name` is the name of the test module to be executed (the name of the file without the .py).
To run the fuzzing test use:
```
python -m test.fuzzing_test
```
from this directory.
| ec01f70f02a47608b06e409a1cd9c31c37cfb8c4 | [
"Markdown",
"Python",
"Text",
"Go Module",
"C",
"Go",
"Dockerfile",
"Shell"
] | 57 | Text | google/glome | 48d28f82bd51ae4bccc84fbbee93c375b026596b | 4a7b598dd8f8f6f98117a2733984812f02d1333b |
refs/heads/master | <file_sep>// 1. Copy and paste your prototype in here and refactor into class syntax.
class CubeMaker {
constructor (attributes) {
this.length = attributes.length;
this.width = attributes.width;
this.height = attributes.height;
}
volume() {
return this.length * this.width * this.height;
}
surfaceArea() {
return 2 * (this.length * this.width + this.length * this.height + this.width * this.height);
}
}
const cube = new CubeMaker ({
"length": 4,
"width": 5,
"height": 5,
});
// Test your volume and surfaceArea methods by uncommenting the logs below:
console.log('Classes Test 1:', cube.volume()); // 100
console.log('Classes Test 2:', cube.surfaceArea()); // 130
// Stretch Task: Extend the base class CuboidMaker with a sub class called CubeMaker. Find out the formulas for volume and surface area for cubes and create those methods using the dimension properties from CuboidMaker. Test your work by logging out your volume and surface area.
class CubeMaker2 extends CubeMaker {
constructor(childAttributes) {
super(childAttributes);
}
volume() {
return this.length * this.width * this.height;
}
surfaceArea() {
return 2 * (this.length * this.width + this.length * this.height + this.width * this.height);
}
}
const cube2 = new CubeMaker2 ({
"length": 42,
"width": 9,
"height": 21,
});
console.log('Strech Task 1:', cube2.volume());
console.log('Strech Task 2:', cube2.surfaceArea()); | 53d75e496aa4232caf19ce09755c8c25d82dd239 | [
"JavaScript"
] | 1 | JavaScript | Thomas-Tuttle/Sprint-Challenge--JavaScript | 91277e323f29739f390614facd749034562f9db4 | 3137701b206c91485aa32573c211d8ecf9c858ba |
refs/heads/master | <repo_name>agushendra29/skripsi-climbing-nando<file_sep>/src/router/index.js
import Vue from 'vue'
import Router from 'vue-router'
import Home from '@/components/Home'
import Sop from '@/components/Sop'
import Rute from '@/components/Rute'
import Register from '@/components/Register'
import Kodebooking from '@/components/Kodebooking'
import Registerpendaki from '@/components/Registerpendaki'
import Newsdetail from '@/components/Newsdetail'
Vue.use(Router)
export default new Router({
routes: [{
path: '/',
name: 'Home',
component: Home
},
{
path: '/sop',
name: 'Sop',
component: Sop
},
{
path: '/rute',
name: 'Rute',
component: Rute
},
{
path: '/register',
name: 'Register',
component: Register
},
{
path: '/kodebooking',
name: 'Kodebooking',
component: Kodebooking
},
{
path: '/registerpendaki',
name: 'Registerpendaki',
component: Registerpendaki
},
{
path: '/newsdetail',
name: 'Newsdetail',
component: Newsdetail
}
]
})<file_sep>/src/service/service.js
import axios from 'axios'
const uri = "http://localhost:9093/api/"
export default {
getBooking(dd, yy) {
return axios.get(`${uri}climbing-schedule/${dd}/${yy}`);
},
getScheduler(data) {
return axios.post(`${uri}booking/`, data)
},
getBayes(num) {
return axios.post(`${uri}bayes/run?bookingNumber=${num}`)
},
getPdf(num) {
return axios.get(`${uri}booking/${num}/pdf`)
},
getBookingcode(num) {
return axios.get(`${uri}booking/${num}`)
}
} | 8d5e6f7dc631149aeae4d9182cce414f09689f42 | [
"JavaScript"
] | 2 | JavaScript | agushendra29/skripsi-climbing-nando | 09b3d916181e21c169302922d1806e771bce9074 | 5bd613960e580d40b7db1e0d3d14e8da97dc5479 |
refs/heads/master | <file_sep>package com.example.root.noseapp;
public class AngularMath {
public static double Normalize(double ang)
{
double angle = ang;
while (angle < 0)
{
angle += 2 * Math.PI;
}
while (angle >= 2 * Math.PI)
{
angle -= 2 * Math.PI;
}
return angle;
}
//beaware averaging angles can be tricky than you thought
public static double AverageAngle(double[] ang)
{
double initialAngle = ang[0];
//Console.Error.WriteLine("initialangle = " + ang[0]);
double avgAng = ang[0];
for (int i = 1; i < ang.length; i++)
{
double angle = ang[i];
//Console.Error.WriteLine("angle = " + ang[i]);
if (angle - initialAngle > Math.PI)
{
angle = ang[i] - 2*Math.PI;
}
else if (angle - initialAngle <= -Math.PI)
{
angle = angle + 2*Math.PI;
}
//Console.Error.WriteLine("angle = " + angle);
avgAng += angle;
}
avgAng = avgAng / (double)ang.length;
//Console.Error.WriteLine("avgangle = " + avgAng);
return avgAng;
}
public static double QuantizePhase(double Quantum, double phase)
{
double ansFloor = Math.floor(phase / Quantum) * Quantum;
double ansCeil = Math.ceil(phase / Quantum) * Quantum;
if(Math.abs(ansCeil - phase) < Math.abs(ansFloor - phase))
{
phase = ansCeil;
}
else
{
phase = ansFloor;
}
phase = Normalize(phase);
return phase;
}
}
<file_sep>package com.example.root.noseapp;
import android.content.Context;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.util.Log;
public class AudioSpeaker extends Thread {
AudioTrack track1;
int SamplingFreq;
Context mycontext;
short[] samples;
int speakerType;
AudioManager man;
public AudioSpeaker(Context mycontext,short[] samples,int samplingFreq, int speakerType)
{
this.mycontext = mycontext;
man = (AudioManager)mycontext.getSystemService(Context.AUDIO_SERVICE);
SamplingFreq = samplingFreq;
this.samples = samples;
this.speakerType = speakerType;
track1 = new AudioTrack(speakerType,SamplingFreq,AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,samples.length*2,AudioTrack.MODE_STATIC);
track1.write(samples,0,samples.length);
}
// for S6 volume is .01, 1 unit of volume
// for S7 volume is .02, 1 unit of volume
// for pixel use .01 of volume, 1 unit of volume
// for s9 volume is .03, 1 unit of volume (ringtone)
public void play(double vol)
{
try
{
track1.setLoopPoints(0,samples.length,-1);
// setVolume(1);
track1.setVolume((float)vol);
// track1.setStereoVolume((float) .01, (float) .01);
track1.play();
}catch(Exception e)
{
}
}
public void run()
{
track1.setLoopPoints(0,samples.length,-1);
track1.play();
}
public void pause()
{
track1.pause();
}
}
| 93f12c65726eff22aa9ddcd8fa09441238c29159 | [
"Java"
] | 2 | Java | justinklchan/NoseApp | e7f49db16a7069d77c2f621aecfaf6b903165adf | 93c14ce12aa2b0dd77daf5e4e31fac656564865a |
refs/heads/master | <file_sep># manual_submissions_jatc
This is the mock-up form that accompany the UX review of the JATC Manual Submission Functionality. Other Documents can be found here: https://drive.google.com/drive/folders/0B-8K-zhphHuNWmRkQUtSU2JudG8<file_sep>// Check for bad string while entering Student ID
function studentIdKeytest() {
var studentId = document.getElementById('studentId').value;
if (studentId.match(/[a-z]/i)) {
document.getElementById('studentIdError').setAttribute('class', 'error');
document.getElementById('studentIdError').innerHTML = " Can not use letters"
}
else if (studentId.match(/[0-9]/i)) {
document.getElementById('studentIdError').setAttribute('class', 'hidden');
}
}
// Check for bad string when Student ID loses focus
function checkStudentId() {
var studentId = document.getElementById('studentId').value;
if (studentId.length != 9) {
document.getElementById('studentIdError').setAttribute('class', 'error');
document.getElementById('studentIdError').innerHTML = " Enter '@' followed by 8 digits";
}
else if (studentId.match(/[a-z]/i)) {
document.getElementById('studentIdError').setAttribute('class', 'error');
document.getElementById('studentIdError').innerHTML = " Can not use letters"
}
else{
document.getElementById('studentIdError').setAttribute('class', 'hidden')
}
}
// Check not selected on course
function checkCourse() {
var courseChoice = document.getElementById('courseChoice').value;
if (courseChoice === 'Choose One') {
document.getElementById('courseError').setAttribute('class', 'error');
document.getElementById('courseError').innerHTML = " Choose a course";
}
else {
document.getElementById('courseError').setAttribute('class', 'hidden');
}
}
// Check not selected on submission type
function checkSubmissionType() {
var submissionChoice = document.getElementById('submissionChoice').value;
if (submissionChoice === 'Choose One') {
document.getElementById('submissionError').setAttribute('class', 'error');
document.getElementById('submissionError').innerHTML = " Choose a submission type";
}
else {
document.getElementById('submissionError').setAttribute('class', 'hidden');
}
}
// Check not selected on lesson
function checkLesson() {
var lessonChoice = document.getElementById('lessonChoice').value;
if (lessonChoice === 'Choose One') {
document.getElementById('lessonError').setAttribute('class', 'error');
document.getElementById('lessonError').innerHTML = " Choose a lesson";
}
else {
document.getElementById('lessonError').setAttribute('class', 'hidden');
}
}
// Check for bad string while entering Attempt ID
function attemptIdKeytest() {
var attemptId = document.getElementById('attemptId').value;
if (attemptId.match(/[a-z]/i)) {
document.getElementById('attemptIdError').setAttribute('class', 'error');
document.getElementById('attemptIdError').innerHTML = " Can not use letters"
}
else if (attemptId.match(/[0-9]/i)) {
document.getElementById('attemptIdError').setAttribute('class', 'hidden');
}
}
// Check for bad string when Attempt ID loses focus
function checkAttemptId() {
var attemptId = document.getElementById('attemptId').value;
if (attemptId.length != 10) {
document.getElementById('attemptIdError').setAttribute('class', 'error');
document.getElementById('attemptIdError').innerHTML = " Must be 10 digits";
}
else if (attemptId.match(/[a-z]/i)) {
document.getElementById('attemptIdError').setAttribute('class', 'error');
document.getElementById('attemptIdError').innerHTML = " Can not use letters"
}
else{
document.getElementById('attemptIdError').setAttribute('class', 'hidden')
}
}
// Check for bad string while entering Percent Correct
function percentCorrectKeytest() {
var percentCorrect = document.getElementById('percentCorrect').value;
if (percentCorrect.match(/[a-z]/i)) {
document.getElementById('percentCorrectError').setAttribute('class', 'error');
document.getElementById('percentCorrectError').innerHTML = " Can not use letters"
}
else if (percentCorrect.match(/[0-9]/i)) {
document.getElementById('percentCorrectError').setAttribute('class', 'hidden');
}
}
// Check for bad string when Percent Correct loses focus
function checkPercentCorrect() {
var percentCorrect = document.getElementById('percentCorrect').value;
var percentage = parseInt(percentCorrect);
if (percentage > 100) {
document.getElementById('percentCorrectError').setAttribute('class', 'error');
document.getElementById('percentCorrectError').innerHTML = " Enter a valid percent (0-100)";
}
else if (percentCorrect === '') {
document.getElementById('percentCorrectError').setAttribute('class', 'error');
document.getElementById('percentCorrectError').innerHTML = " Enter a valid percent (0-100)";
}
else if (percentCorrect.length > 3) {
document.getElementById('percentCorrectError').setAttribute('class', 'error');
document.getElementById('percentCorrectError').innerHTML = " Enter a valid percent (0-100)";
}
else if (percentCorrect.match(/[a-z]/i)) {
document.getElementById('percentCorrectError').setAttribute('class', 'error');
document.getElementById('percentCorrectError').innerHTML = " Can not use letters"
}
else{
document.getElementById('percentCorrectError').setAttribute('class', 'hidden')
}
}
// Check for bad string while entering Answer Responses
function answerResponseKeytest() {
var answerInput = document.getElementsByClassName('answer-input');
if (answerInput[0].value.match(/[0-9]/i)) {
document.getElementById('answerInputError').setAttribute('class', 'error');
document.getElementById('answerInputError').innerHTML = " Can not use numbers"
}
else if (answerInput[0].value.match(/[a-e]/i)) {
document.getElementById('answerInputError').setAttribute('class', 'hidden');
}
}
// Check for bad string when Answer Responses loses focus
function checkAnswerResponse() {
var answerInput = document.getElementsByClassName('answer-input');
var answerInputblocks = document.getElementById('answerResponseBlocks').getElementsByTagName('input');
var answerInputblocksLength = document.getElementById('answerResponseBlocks').getElementsByTagName('input').length;
var totalAnswers = [];
var totalQuestions = [];
for (var i = 0; i < answerInputblocksLength; i++) {
totalAnswers[i] = answerInputblocks[i].value.length;
totalQuestions[i] = answerInput[i].size;
};
function addAnswers(total1, num1) {
return total1 + num1;
}
function addQuestions(total2, num2) {
return total2 + num2;
}
function tally(item) {
if (totalAnswers.reduce(addAnswers) < totalQuestions.reduce(addQuestions)) {
document.getElementById('answerInputError').setAttribute('class', 'error');
document.getElementById('answerInputError').innerHTML = " There are missing answers: " + totalAnswers.reduce(addAnswers) + '/' + totalQuestions.reduce(addQuestions)
}
else {
document.getElementById('answerInputError').setAttribute('class', 'hidden');
}
}
tally();
}
// Check for bad string while entering date
function dateKeyTest() {
var monthDate = document.getElementById('month').value;
var monthDateNumber = Number(document.getElementById('month').value);
var dayDate = document.getElementById('day').value;
var dayDateNumber = Number(document.getElementById('day').value);
var yearDate = document.getElementById('year').value;
var yearDateNumber = Number(document.getElementById('year').value);
// Checks month for errors
if (monthDate.match(/[a-z]/i)) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Can not use letters"
}
else if (monthDate.length != 2) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Use two digit month format: Jan = 01"
}
else if (monthDateNumber > 12) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Not a month: 01-12"
}
else if (monthDate.match(/[0-9]/i)) {
document.getElementById('submissionDateError').setAttribute('class', 'hidden');
}
// Checks day for errors
if (dayDate.match(/[a-z]/i)) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Can not use letters"
}
else if (dayDate.length != 2) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Use two digit day format: 1st = 01"
}
else if (dayDateNumber > 31) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Not a day: 01-31"
}
else if (dayDate.match(/[0-9]/i)) {
document.getElementById('submissionDateError').setAttribute('class', 'hidden');
}
// Checks year for errors
if (yearDate.match(/[a-z]/i)) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Can not use letters"
}
else if (yearDate.length != 4) {
document.getElementById('submissionDateError').setAttribute('class', 'error');
document.getElementById('submissionDateError').innerHTML = " Use four digit year format: 2016, 2020"
}
else if (yearDate.match(/[0-9]/i)) {
document.getElementById('submissionDateError').setAttribute('class', 'hidden');
}
}
// Check for bad string when Student ID loses focus
function checkStudentId() {
var studentId = document.getElementById('studentId').value;
if (studentId.length != 9) {
document.getElementById('studentIdError').setAttribute('class', 'error');
document.getElementById('studentIdError').innerHTML = " Enter '@' followed by 8 digits";
}
else if (studentId.match(/[a-z]/i)) {
document.getElementById('studentIdError').setAttribute('class', 'error');
document.getElementById('studentIdError').innerHTML = " Can not use letters"
}
else{
document.getElementById('studentIdError').setAttribute('class', 'hidden')
}
} | fd95e1cf5407934d3e47cb3d57753f431a8e17d4 | [
"Markdown",
"JavaScript"
] | 2 | Markdown | chrisspring215/manual_submissions_jatc | 3f4a49d5e5d6121d5b576b70c2e87cb9b6b943ee | 5570d61e2cd30fea14058df75fc9119a265f8bc3 |
refs/heads/master | <file_sep>using System;
using System.Text;
namespace Mastermind
{
class Program
{
private const int CodeLength = 4;
private const int MaxNumber = 6;
private const int NumOfAttempts = 10;
static void Main(string[] args)
{
string intent = string.Empty;
do
{
string randomCodeString = "";
bool brokenCode = false;
Console.WriteLine("******Welcome to Mastermind*******");
int[] randomCode = new int[MaxNumber + 1];
randomCode = GetRandomCode(ref randomCodeString);
Console.WriteLine("You now have 10 attempts to break the code");
for (int i = 0; i < NumOfAttempts; i++)
{
int numOfPlus = 0;
int numOfMinus = 0;
int[] numberOfPlusTracker = new int[MaxNumber + 1];
int[] numberOfMinusTracker = new int[MaxNumber + 1];
Console.WriteLine($"Attempt {i+1}");
Console.WriteLine("==========");
Console.Write($"Please enter {CodeLength} digits between 1 and {MaxNumber} without space: ");
for (int j = 1; j <= CodeLength; j++)
{
char input = Console.ReadKey().KeyChar;
int number;
bool validInput = Int32.TryParse(input.ToString(), out number);
if (!validInput || number < 1 || number > 6)
{
Console.WriteLine("Invalid Input. Please try again");
i--;
break;
}
if (randomCode[number] == j)
{
numOfPlus++;
numberOfPlusTracker[number]++;
}
else if (randomCode[number] > CodeLength)
{
int checkNumber = randomCode[number];
bool matchFound = false;
while (checkNumber != 0)
{
if (checkNumber % 10 == j)
{
numOfPlus++;
numberOfPlusTracker[number]++;
matchFound = true;
break;
}
checkNumber = checkNumber / 10;
}
if (!matchFound)
{
numOfMinus++;
numberOfMinusTracker[number]++;
}
}
else if (randomCode[number] > 0)
{
numOfMinus++;
numberOfMinusTracker[number]++;
}
}
Console.WriteLine();
if (numOfPlus == CodeLength)
{
Console.WriteLine("You successfully broke the code");
brokenCode = true;
break;
}
Console.Write("Result for this attempt: ");
for (int j = 1; j <= MaxNumber; j++)
{
int num = 0;
if(randomCode[j] > 0) num = Convert.ToInt32(Math.Floor(Math.Log10(randomCode[j]) + 1));
if (num > 0)
{
int surplusNegatives = numberOfPlusTracker[j] + numberOfMinusTracker[j] - num;
if(surplusNegatives > 0) numOfMinus -= surplusNegatives;
}
}
for (int j = 0; j < numOfPlus; j++)
{
Console.Write("+");
}
for (int j = 0; j < numOfMinus; j++)
{
Console.Write("-");
}
Console.WriteLine();
}
if (!brokenCode)
{
Console.WriteLine("Random Code is " + randomCodeString);
Console.WriteLine("Better luck next time.");
}
Console.WriteLine();
Console.Write("DO you want to play again? Please y to play again:");
intent = Console.ReadLine();
} while (intent == "y");
}
private static int[] GetRandomCode(ref string randomCodeString)
{
StringBuilder sb = new StringBuilder();
int[] randomCode = new int[MaxNumber + 1];
Random rnd = new Random();
for (int i = 0; i < CodeLength; i++)
{
int randomDigit = rnd.Next(1, MaxNumber + 1);
sb.Append(randomDigit);
if(randomCode[randomDigit] != 0)
{
randomCode[randomDigit] = randomCode[randomDigit] * 10 + (i + 1);
}
else
{
randomCode[randomDigit] = i + 1;
}
}
randomCodeString = sb.ToString();
return randomCode;
}
}
}
| b5f644aa670869908a674c504724423e00848753 | [
"C#"
] | 1 | C# | RitReshav/Quadax | 69164926354dfdddf2a12c21ceefec1db66d6703 | 5a963ecde2d5e8d1e4574561b5b20094c87f25ef |
refs/heads/master | <file_sep># deepC compiler tests
## Use Model
```onnx2exe model.onnx```
1. *bundleDir :* dirname("generated exe, i.e. a.out or model.exe");
1. *parameter file(s) :* in bundleDir
1. input file(s) :* with a path relative to current dir.
1. *output file(s) :* in current dir
## Example
```onnx2cpp mnist.onnx```
```console
> reading onnx model from file mnist.onnx
> Model info:
> ir_vesion : 4
> doc :
> INFO (ONNX): writing model parameter fc.bias to dir ..
> INFO (ONNX): writing model parameter fc.weight to dir ..
> INFO (ONNX): writing model parameter fc2.bias to dir ..
> INFO (ONNX): writing model parameter fc2.weight to dir ..
> running DNNC graph sanity check.
> Writing intermediate file ./mnist.cpp
> Compiling ...
> INFO (ONNX): model files are ready in dir .
```
<file_sep># Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
#
import os, sys
import deepC.dnnc as dnnc
import onnx
import struct
class pbReader :
"""Reader class for DNNC models in ONNX binary/protobuf format."""
def __init__(self):
dnncModule = sys.modules.get('deepC.dnnc')
if ( dnncModule is None ) :
print("ERROR (DNNC): could not find dnnc module. Please make sure dnnc is imported before calling ", __name__)
self._dcGraph = None ;
self._bundleDir = None;
self._writeParamToDisk = True ;
def __del__(self):
del self._dcGraph ;
def writeParamsToFile(self, name, data):
str_data = '\n'.join([str(d) for d in data])
if ( len(str_data) == 0 ):
print("ERROR (ONNX): did not find data for initializer ", name);
return
paramFile = os.path.join(self._bundleDir,name)
print("INFO (ONNX): writing model parameter " + name + " to dir " + self._bundleDir + ".");
with open(paramFile, "w") as fp:
fp.write(str_data)
fp.close();
def addParams(self, param):
if ( param is None ):
return None;
if ( len(param.FindInitializationErrors()) > 0 ):
print("WARNING (ONNX): initializer " + param.name + " has following errors.\n");
print(" ", param.FindInitializationErrors());
print(" trying to load data with errors.\n");
param_type = dnnc.IR_DataType_NOTYPE;
param_shape = dnnc.vectorSizeT(param.dims)
param_vec = None
param_vals = None
param_len=1
for d in param.dims:
param_len *= d
if param.data_type == param.INT8 :
pack_format = 'b'
param_type = dnnc.IR_DataType_INT8;
param_vals = [int(n) for n in param.int32_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.INT16 :
pack_format = 'h'
param_type = dnnc.IR_DataType_INT16;
param_vals = [int(n) for n in param.int32_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.INT32:
pack_format = 'i'
param_type = dnnc.IR_DataType_INT32;
param_vals = [int(n) for n in param.int32_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.INT64:
pack_format = 'q'
param_type = dnnc.IR_DataType_INT64;
param_vals = [int(n) for n in param.int64_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.UINT8 :
pack_format = 'B'
param_type = dnnc.IR_DataType_UINT8;
param_vals = [int(n) for n in param.uint64_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.UINT16 :
pack_format = 'H'
param_type = dnnc.IR_DataType_UINT16;
param_vals = [int(n) for n in param.uint64_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.UINT32:
pack_format = 'I'
param_type = dnnc.IR_DataType_UINT32;
param_vals = [int(n) for n in param.uint64_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.UINT64:
pack_format = 'L'
param_type = dnnc.IR_DataType_UINT64;
param_vals = [int(n) for n in param.uint64_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorInt(param_vals)
elif param.data_type == param.FLOAT16 :
pack_format = 'e'
param_type = dnnc.IR_DataType_FLOAT16;
param_vals = [float(n) for n in param.float_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorInt()
else:
param_vec = dnnc.vectorFloat(param_vals)
elif param.data_type == param.BFLOAT16 :
pack_format = 'e'
param_type = dnnc.IR_DataType_BFLOAT16;
param_vals = [float(n) for n in param.float_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorFloat()
else:
param_vec = dnnc.vectorFloat(param_vals)
elif param.data_type == param.FLOAT:
pack_format = 'f'
param_type = dnnc.IR_DataType_FLOAT;
param_vals = [float(n) for n in param.float_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorFloat()
else:
param_vec = dnnc.vectorFloat(param_vals)
elif param.data_type == param.DOUBLE:
pack_format = 'd'
param_type = dnnc.IR_DataType_DOUBLE;
param_vals = [float(n) for n in param.double_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorFloat()
else:
param_vec = dnnc.vectorFloat(param_vals)
elif param.data_type == param.STRING:
pack_format = 's'
param_type = dnnc.IR_DataType_STRING;
param_vals = [str(s) for s in param.string_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorStr()
else:
param_vec = dnnc.vectorStr(param_vals)
elif param.data_type == param.BOOL:
pack_format = '?'
param_type = dnnc.IR_DataType_BOOL;
param_vals = [bool(b) for b in param.raw_data]
if ( len(param_vals) == 0 ):
param_vals = struct.unpack(pack_format*param_len, param.raw_data) ;
if ( self._writeParamToDisk ) :
self.writeParamsToFile(param.name, param_vals);
param_vec = dnnc.vectorBool()
else:
param_vec = dnnc.vectorBool(param_vals)
else:
print("ERROR (ONNX): graph-node " + node.name + "\'s attribute " + \
param.name + " type " + str(param.data_type) + " is not valid.")
if ( self._writeParamToDisk is False and
(param_type is dnnc.IR_DataType_NOTYPE or param_vec is None or param_vec.size()==0) ) :
print("ERROR (ONNX): did not find data for initializer ", param.name);
return;
param_irData = dnnc.irTypeData(param_type, param_vec) ;
dnnc_param = dnnc.dnnParameters(param.name, param_shape, param_irData);
self._dcGraph.addParameters(dnnc_param) ;
return dnnc_param;
def addOPNode(self, node):
op_type = dnnc.getOpCode(node.op_type);
if ( op_type is dnnc.opInvalid ):
print("ERROR (ONNX):" + node.op_type +" is not a valid graph-node op type.")
return None
dcNode = self._dcGraph.addOPNode(node.name, op_type);
for nd in node.input:
dcNode.addInput(nd)
for nd in node.output:
dcNode.addOutput(nd)
for attr in node.attribute:
attr_type = dnnc.IR_DataType_NOTYPE;
attr_vals = []
attr_vec = None
if attr.type == onnx.AttributeProto.INT:
attr_type = dnnc.IR_DataType_INT32;
attr_vals.append(attr.i)
attr_vec = dnnc.vectorInt(attr_vals)
elif attr.type == onnx.AttributeProto.INTS:
attr_type = dnnc.IR_DataType_INT32;
for val in attr.ints:
attr_vals.append(int(val))
attr_vec = dnnc.vectorInt(attr_vals)
elif attr.type == onnx.AttributeProto.FLOAT:
attr_type = dnnc.IR_DataType_FLOAT;
attr_vals.append(attr.f)
attr_vec = dnnc.vectorFloat(attr_vals)
elif attr.type == onnx.AttributeProto.FLOATS:
attr_type = dnnc.IR_DataType_FLOAT;
for val in attr.floats:
attr_vals.append(float(val))
attr_vec = dnnc.vectorFloat(attr_vals)
elif attr.type == onnx.AttributeProto.STRING:
attr_type = dnnc.IR_DataType_STRING;
attr_vals.append(str(attr.s))
attr_vec = dnnc.vectorStr(attr_vals)
elif attr.type == onnx.AttributeProto.STRINGS:
attr_type = dnnc.IR_DataType_STRING;
for val in attr.strings:
attr_vals.append(str(val))
attr_vec = dnnc.vectorStr(attr_vals)
elif attr.type == onnx.AttributeProto.TENSOR:
if ( attr.t.data_type == onnx.TensorProto.INT8 or
attr.t.data_type == onnx.TensorProto.INT16 or
attr.t.data_type == onnx.TensorProto.INT32 or
attr.t.data_type == onnx.TensorProto.INT64 ) :
attr_type = attr.t.data_type
attr_data = None;
pack_format = 'P';
if ( attr.t.data_type == onnx.TensorProto.INT8 ) :
pack_format = 'b'
if ( attr.t.data_type == onnx.TensorProto.INT16) :
pack_format = 'h'
if ( attr.t.data_type == onnx.TensorProto.INT32) :
if ( attr.t.int32_data ):
attr_data = attr.t.int32_data
pack_format = 'i'
if ( attr.t.data_type == onnx.TensorProto.INT64) :
if ( attr.t.int64_data ):
attr_data = attr.t.int64_data
pack_format = 'q'
if ( attr_data is None ) :
len=1
for d in attr.t.dims:
len *= d
attr_data = struct.unpack(pack_format*len, attr.t.raw_data) ;
if ( attr_data is not None ) :
attr_tensor = dnnc.intTensor(attr.t.dims, attr.name)
attr_tensor.load(attr_data);
attr_vec = dnnc.vectorTensorInt()
attr_vec.push_back(attr_tensor)
else:
print("ERROR (ONNX): could not extract data for graph-node " + \
node.name + "\'s attribute " + attr.name + ".\n");
elif ( attr.t.data_type == onnx.TensorProto.FLOAT16 or
attr.t.data_type == onnx.TensorProto.FLOAT or
attr.t.data_type == onnx.TensorProto.DOUBLE ):
attr_type = attr.t.data_type
attr_data = None;
pack_format = 'P';
if ( attr.t.data_type == onnx.TensorProto.FLOAT16 ) :
if ( attr.t.float_data ):
attr_data = attr.t.float_data
pack_format = 'e'
if ( attr.t.data_type == onnx.TensorProto.FLOAT ) :
if ( attr.t.float_data ):
attr_data = attr.t.float_data
pack_format = 'f'
if ( attr.t.data_type == onnx.TensorProto.DOUBLE ) :
if ( attr.t.double_data ):
attr_data = attr.t.double_data
pack_format = 'd'
if ( attr_data is None ) :
len=1
for d in attr.t.dims:
len *= d
attr_data = struct.unpack(pack_format*len, attr.t.raw_data) ;
if ( attr_data is not None ):
attr_tensor = dnnc.floatTensor(attr.t.dims, attr.name)
attr_tensor.load(attr_data);
attr_vec = dnnc.vectorTensorFloat()
attr_vec.push_back(attr_tensor)
else:
print("ERROR (ONNX): could not extract data for graph-node " + \
node.name + "\'s attribute " + attr.name + ".\n");
else:
print("ERROR (ONNX): attribute tensor's datatype " + str(attr.t.data_type) +
" isn't understood.")
elif attr.type == onnx.AttributeProto.TENSORS:
attr_type = dnnc.IR_DataType_TENSORS;
attr_vals.append(attr.tensors)
attr_vec = dnnc.vectorTensorFloat(dnnc.floatTensor(attr_vals))
elif attr.type == onnx.AttributeProto.GRAPH:
attr_type = dnnc.IR_DataType_GRAPH;
attr_vals.append(attr.g)
print("ERROR (ONNX): sub-graph in graph-node is not yet supported.")
elif attr.type == onnx.AttributeProto.GRAPHS:
attr_type = dnnc.IR_DataType_GRAPH;
attr_vals.append(attr.graphs)
print("ERROR (ONNX): sub-graph in graph-node is not yet supported.")
else:
print("ERROR (ONNX): graph-node " + node.name + "\'s attribute " + \
attr.name + " type " + str(attr.type) + " is not valid.")
continue
if ( attr_type is dnnc.IR_DataType_NOTYPE or attr_vec is None or attr_vec.size() == 0 ) :
print("ERROR (ONNX): graph-node " + node.name + "\'s attribute " + \
attr.name + " has no data.")
continue ;
attr_code = dnnc.getAttrName(attr.name);
if ( attr_code is dnnc.attr_invalid ):
print("WARN (ONNX): " + attr.name + " is not a valid graph-node attribute.")
print(" operator " + node.op_type + " will be added without this attribute." )
cAttrData = dnnc.irTypeData(attr_type,attr_vec) ;
cAttr = dnnc.nodeAttribute(attr_code, cAttrData);
dcNode.addAttribute(cAttr);
return dcNode;
def createTermNode(self, term):
term_name = term.name
data_type = dnnc.NOTYPE
term_shape = []
if ( term.type.tensor_type.elem_type ) :
data_type = term.type.tensor_type.elem_type
if ( data_type <= dnnc.NOTYPE and data_type >= dnnc.TENSOR ) :
print("ERROR (ONNX): Term " + term_name + "\'s type " + data_type + " is not valid" ) ;
return None ;
if ( term.type.tensor_type and term.type.tensor_type.shape ) :
shape = term.type.tensor_type.shape.dim
for dim in shape:
if ( dim.dim_param ):
if ( dim.dim_param == 'None' ):
term_shape.append(0);
else:
print("ERROR (ONNX): terminal (input/output) " + term_name + "\'s dim_param "
+ dim.dim_param + " is not recognized.");
elif ( dim.dim_value ) :
term_shape.append(dim.dim_value)
else:
print("ERROR (ONNX): terminal (input/output) " + term_name + " has no dim_param or dim_value")
return (term_name, data_type, term_shape)
def main(self, onnx_filename, bundle_dir=None, checker=False, optimize=False):
dnncModule = sys.modules.get('deepC.dnnc')
if ( dnncModule is None ) :
print("ERROR (DNNC): could not find dnnc module. Please make sure dnnc is imported before calling ", __name__)
return ;
print("reading onnx model from file ", onnx_filename)
self._bundleDir = bundle_dir
if ( self._bundleDir is None ) :
self._bundleDir = os.path.dirname(onnx_filename);
model = onnx.load(onnx_filename)
print("Model info:\n ir_vesion : ", model.ir_version, "\n doc :", model.doc_string)
if ( optimize ) :
print(" Optimization enabled.")
from onnx import optimizer
for opt_pass in optimizer.get_available_passes():
print(' running optimization step : {}'.format(opt_pass.replace("_", " ")))
try :
model = optimizer.optimize(model, [opt_pass]);
except Exception as e:
print (" optimization failed." + str(e) + "\n. Abandoning and trying next.");
print (" optimization done.")
if ( checker ) :
try:
print ("running ONNX model shape inference engine and verification");
onnx.checker.check_model(model)
from onnx import shape_inference
model = shape_inference.infer_shapes(model)
onnx.checker.check_model(model)
except Exception as e:
print (" failed. moving to next step." + str(e));
graph = model.graph
self._dcGraph = dnnc.Graph();
self._dcGraph.setName(graph.name)
nodes = graph.node
for node in nodes:
dcNode = self.addOPNode(node);
for terminal in graph.input:
dcTerm = self.createTermNode(terminal);
if ( dcTerm != None and len(dcTerm) == 3 ):
self._dcGraph.addInput(dcTerm[0], dcTerm[1], dcTerm[2]);
for terminal in graph.output:
dcTerm = self.createTermNode(terminal);
if ( dcTerm != None and len(dcTerm) == 3 ):
self._dcGraph.addOutput(dcTerm[0], dcTerm[1], dcTerm[2]);
for param in graph.initializer:
self.addParams(param);
try:
print("running DNNC graph sanity check.");
if ( False == self._dcGraph.sanityCheck() ):
print(" FAILED. Please check your model.");
except Exception as e:
print (" FAILED.\n" + str(e));
return self._dcGraph
def main():
onnx_file = None
if len(sys.argv) >= 2:
onnx_file = sys.argv[1]
if ( onnx_file is None ) :
print("\nUsage: "+sys.argv[0]+ " <onnx_model_file>.onnx [bundle_dir]\n")
exit(0)
bundle_dir = None
if len(sys.argv) >= 3:
bundle_dir = sys.argv[2]
else:
bundle_dir = os.path.dirname(onnx_file);
parser = pbReader()
parser.main(onnx_file, bundle_dir, checker=False, optimize=False)
if __name__ == "__main__":
sys.exit(main())
<file_sep>
// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! Flattens the input tensor into a 2D matrix. If input tensor has shape (d_0,
d_1, ... d_n) then
the output will have shape (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X
dn)*/
template <typename T> class Flatten : public baseOperator<T, T, T> {
protected:
int axis = 1; /*!< Indicate up to which input dimensions (exclusive) should be
flattened to the outer dimension of the output. The value for axis must be
in the range [0, R], where R is the rank of the input tensor. When axis = 0,
the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape
of the input tensor is (d_0, d_1, ... d_n).*/
public:
Flatten(std::string name = "opFlatten", int axis = 1)
: baseOperator<T, T, T>(opFlatten, name) {
this->axis = axis;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_axis) {
obj = axis;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_axis) {
axis = obj;
return true;
}
return false;
}
tensor<T> compute(tensor<T> a /*!< : N D tensor input of rank >= axis.*/) {
if (a.rank() < (size_t)axis) {
SPDLOG_ERROR("tensor rank or axis not appropriate for Flatten operator.");
return NULL_TENSOR<T>;
}
size_t row = 1;
size_t col = 1;
size_t i;
for (i = 0; i < (size_t)axis; i++) {
row *= a.shape()[i];
}
for (i = axis; i < (size_t)a.rank(); i++) {
col *= a.shape()[i];
}
std::vector<size_t> two_dimension{row, col};
a.reshape(two_dimension);
return a;
}
/*!<
\returns a 2D tensor with the contents of the input tensor, with input
dimensions up to axis flattened to the outer dimension of the output and
remaining input dimensions flattened into the inner dimension of the output.
*/
};
} // namespace dnnc
<file_sep>import common
import deepC.dnnc as dc
import numpy as np
import unittest
import sys
class LSTM_detailedTest(unittest.TestCase):
<EMAIL>("FAIL")
def test_LSTM_1(self):
"""
input_shape: [7, 6, 8]
weight_shape: [1, 72, 8]
recurrence_weight_shape: [1, 72, 18]
bias_shape: [1, 144]
output_shape: [7, 1, 6, 18]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.4966638953530237, 0.43607014563539637, 0.8097313919008828]
activation_beta = [0.12651506658849576, 0.1647539653231257, 0.04623650102301935]
activations = ['tanh', 'relu', 'sigmoid']
clip = 2.135794928171123
direction = "forward"
hidden_size = 18
input_forget = 1
rtr = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_2(self):
"""
input_shape: [8, 4, 1]
weight_shape: [2, 64, 1]
recurrence_weight_shape: [2, 64, 16]
bias_shape: [2, 128]
output_shape: [8, 2, 4, 16]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.20332784907676504, 0.22637955219185357, 0.6021193542725863, 0.6168572580474495, 0.40207405192136414, 0.036317260701121845]
activation_beta = [0.7717703726511062, 0.027305984207814826, 0.8047659241021807, 0.6452577518231254, 0.7319012533727602, 0.25505174775324035]
activations = ['tanh', 'tanh', 'sigmoid', 'relu', 'sigmoid', 'relu']
clip = 2.907158875085247
direction = "bidirectional"
hidden_size = 16
input_forget = 10
rtr = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_3(self):
"""
input_shape: [8, 1, 4]
weight_shape: [1, 56, 4]
recurrence_weight_shape: [1, 56, 14]
bias_shape: [1, 112]
output_shape: [8, 1, 1, 14]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.5353786525215217, 0.0047814145847226985, 0.17116077889292602]
activation_beta = [0.8724323449420001, 0.9207316192126214, 0.7391156087035118]
activations = ['relu', 'sigmoid', 'tanh']
clip = 7.5397611403351
direction = "reverse"
hidden_size = 14
input_forget = 14
rtr = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_4(self):
"""
input_shape: [2, 1, 1]
weight_shape: [2, 72, 1]
recurrence_weight_shape: [2, 72, 18]
bias_shape: [2, 144]
output_shape: [2, 2, 1, 18]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.9860778314893995, 0.12417696210947016, 0.0006744261981547206, 0.24339585920465567, 0.7498252461249489, 0.30754908604622977]
activation_beta = [0.1603792258866038, 0.1880417110347281, 0.6952466604231525, 0.11767276043277997, 0.61860245840078, 0.6615465711832315]
activations = ['sigmoid', 'relu', 'sigmoid', 'tanh', 'relu', 'tanh']
clip = 3.7019881776389996
direction = "bidirectional"
hidden_size = 18
input_forget = 8
rtr = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_5(self):
"""
input_shape: [2, 3, 10]
weight_shape: [2, 20, 10]
recurrence_weight_shape: [2, 20, 5]
bias_shape: [2, 40]
output_shape: [2, 2, 3, 5]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.9958868560901981, 0.5615704868314114, 0.5054884381550756, 0.5125119319409338, 0.18310275479264726, 0.4990119412451889]
activation_beta = [0.2876466600692591, 0.560778821439632, 0.2632346842213401, 0.13121922832510213, 0.8822817678248556, 0.9880592276419286]
activations = ['tanh', 'relu', 'tanh', 'sigmoid', 'sigmoid', 'relu']
clip = 6.117108798702516
direction = "bidirectional"
hidden_size = 5
input_forget = 17
rtr = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_6(self):
"""
input_shape: [7, 5, 9]
weight_shape: [1, 64, 9]
recurrence_weight_shape: [1, 64, 16]
bias_shape: [1, 128]
output_shape: [7, 1, 5, 16]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.1508855746391079, 0.4507448733258578, 0.41656131175216204]
activation_beta = [0.5657658415464043, 0.21611300965755376, 0.15922967506138452]
activations = ['tanh', 'relu', 'sigmoid']
clip = 3.1767036746309287
direction = "forward"
hidden_size = 16
input_forget = 14
rtr = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_7(self):
"""
input_shape: [6, 8, 6]
weight_shape: [2, 40, 6]
recurrence_weight_shape: [2, 40, 10]
bias_shape: [2, 80]
output_shape: [6, 2, 8, 10]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.28920619362824995, 0.747465052565989, 0.661162342694396, 0.8477376049646675, 0.07881817761441567, 0.16208001287665696]
activation_beta = [0.7627506699799991, 0.6606114297796492, 0.9585330972395699, 0.5549681443136113, 0.059042596260018065, 0.04648254501072813]
activations = ['sigmoid', 'sigmoid', 'tanh', 'relu', 'relu', 'tanh']
clip = 3.879685115272961
direction = "bidirectional"
hidden_size = 10
input_forget = 11
rtr = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_8(self):
"""
input_shape: [5, 1, 9]
weight_shape: [2, 4, 9]
recurrence_weight_shape: [2, 4, 1]
bias_shape: [2, 8]
output_shape: [5, 2, 1, 1]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.7746672952847123, 0.036382870533804956, 0.4848161740062119, 0.9830896771807061, 0.017064708201858125, 0.6242851269185792]
activation_beta = [0.2517994027716025, 0.28976631245816886, 0.38611683342345127, 0.13080875018242, 0.40170849770653727, 0.956570288835856]
activations = ['sigmoid', 'relu', 'sigmoid', 'relu', 'tanh', 'tanh']
clip = 2.72219901402834
direction = "bidirectional"
hidden_size = 1
input_forget = 20
rtr = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_9(self):
"""
input_shape: [1, 2, 9]
weight_shape: [1, 52, 9]
recurrence_weight_shape: [1, 52, 13]
bias_shape: [1, 104]
output_shape: [1, 1, 2, 13]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.08447232888329703, 0.6786879671317316, 0.6558691737892577]
activation_beta = [0.7615097936520958, 0.5651098460911419, 0.2265325436094976]
activations = ['sigmoid', 'relu', 'tanh']
clip = 6.4355391083683635
direction = "forward"
hidden_size = 13
input_forget = 14
rtr = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
<EMAIL>("FAIL")
def test_LSTM_10(self):
"""
input_shape: [9, 6, 2]
weight_shape: [2, 8, 2]
recurrence_weight_shape: [2, 8, 2]
bias_shape: [2, 16]
output_shape: [9, 2, 6, 2]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.5494076090797351, 0.4486022544214028, 0.8555569145519173, 0.36385914141140563, 0.2786060330869964, 0.3709594247211093]
activation_beta = [0.6841038069275263, 0.12454085979724905, 0.16010194778825715, 0.43645368358634684, 0.2006827543226236, 0.025382308479808713]
activations = ['relu', 'tanh', 'relu', 'sigmoid', 'sigmoid', 'tanh']
clip = 7.52494780016543
direction = "bidirectional"
hidden_size = 2
input_forget = 19
rtr = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/broadcast.h"
#include "operators/baseOperator.h"
#include <string>
#include <vector>
using namespace Eigen;
namespace dnnc {
/*! Returns the tensor resulted
* from Element-wise max of each of the input tensors (
* with Numpy-style broadcasting support).
*/
template <typename T> class Max : public baseOperator<T, T, T> {
// Max attributes
T maxEl(std::vector<T> &v) {
T max = 0;
if (v.size() == 0) {
SPDLOG_ERROR("Max operator requires non-zero size vector.");
return NULL_TENSOR<T>;
}
for (size_t i = 0; i < v.size(); i++)
max = i == 0 ? v[0] : (v[i] > max ? v[i] : max);
return max;
}
public:
Max(std::string name = "opMax") : baseOperator<T, T, T>(opMax, name) {}
tensor<T>
compute(std::vector<tensor<T>> inputs /*!<[float,double]: ND tensors */) {
if (!(this->template type_check<T, float, double>())) {
SPDLOG_ERROR("Constrain input and output types to float tensors.");
return NULL_TENSOR<T>;
}
if (inputs.size() == 0) {
SPDLOG_ERROR("Max operator requires non-zero size input vector.");
return NULL_TENSOR<T>;
}
try {
std::vector<DIMENSION> resultShape = vecBroadcastReShape(inputs);
tensor<T> result(resultShape);
// compute element wise max
for (size_t i = 0; i < result.length(); i++) {
std::vector<T> elVector;
for (size_t j = 0; j < inputs.size(); j++)
elVector.push_back(inputs[j][i]);
result[i] = maxEl(elVector);
}
return result;
} catch (const std::exception &e) {
SPDLOG_ERROR(
"operands could not be broadcast together with given shapes!!!");
return NULL_TENSOR<T>;
}
// std::vector<DIMENSION> resultShape = vecBroadcastReShape(inputs);
}
};
} // namespace dnnc
<file_sep># dnn Compiler Contributors
## Architect
**<NAME>**
## Lead Developers
## Mentors
1. <NAME>
1. <NAME>
## Committers
1. Hrishikesh
1. <NAME>
1. <NAME>
1. <NAME>
## Advisors
## Reviewers
## Testing
## Documentation
## Full List of Contributors
1. <NAME>
1. <NAME>
## How to cite
Here the appropriate way to cite DNNC in an publication?
```
@misc{Sharma2019,
author = {<NAME> and others},
title = {deepC: vendor independent deep learning library, compiler and inference framework microcomputers and microcontrollers},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/ai-techsystems/deepC}},
}
```
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import os, argparse
from tensor_op_dict import *
def assignment(assignment):
py_file = ""
for key, vals in assignment.items():
operator = operators["assignment_"+key]
py_file += "\n\t# Assignment "+key.title()+"\n"
for key_operand, value_operands in vals.items():
for value_operand in value_operands:
py_file += "\n\t# "+key_operand + " " + operator + " " + value_operand + "\n"
py_file += "\tdef test_Assignment_" + key.title() + "_" + key_operand + "_" + value_operand + " (self):\n"
py_file += "\t\ttemp_np = self.np_" + tensorOperands[key_operand] + ".copy()\n"
if "tensor" in value_operand:
py_file += "\t\ttemp_np "+ operator +" self.np_" + tensorOperands[value_operand] + "\n"
elif "scalar" in value_operand:
py_file += "\t\ttemp_np "+ operator + " " + tensorOperands[value_operand] + "\n"
py_file += "\t\ttemp_dc = self.dc_" + tensorOperands[key_operand] + ".copy()\n"
if "tensor" in value_operand:
py_file += "\t\ttemp_dc "+ operator +" self.dc_" + tensorOperands[value_operand] + "\n"
elif "scalar" in value_operand:
py_file += "\t\ttemp_dc "+ operator + " " + tensorOperands[value_operand] + "\n"
py_file += "\t\tnp.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))\n"
return py_file
def binary(binary):
py_file = ""
for key, vals in binary.items():
operator = operators["binary_"+key]
py_file += "\n\t# Binary "+key.title()+"\n"
for key_operand, value_operands in vals.items():
for value_operand in value_operands:
py_file += "\n\t# "+key_operand + " " + operator + " " + value_operand + "\n"
py_file += "\tdef test_Binary_" + key.title() + "_" + key_operand + "_" + value_operand + " (self):\n"
py_file += "\t\ttemp_np = self.np_" + tensorOperands[key_operand] + " "
if "tensor" in value_operand:
py_file += operator +" self.np_" + tensorOperands[value_operand] + "\n"
elif "scalar" in value_operand:
py_file += operator + " " + tensorOperands[value_operand] + "\n"
py_file += "\t\ttemp_dc = self.dc_" + tensorOperands[key_operand] + " "
if "tensor" in value_operand:
py_file += operator +" self.dc_" + tensorOperands[value_operand] + "\n"
elif "scalar" in value_operand:
py_file += operator + " " + tensorOperands[value_operand] + "\n"
py_file += "\t\tnp.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))\n"
return py_file
def unary(unary):
py_file = ""
for key, value_operands in unary.items():
operator = operators["unary_"+key]
py_file += "\n\t# Unary "+key.title()+"\n"
for value_operand in value_operands:
py_file += "\n\t# " + operator + " " + value_operand + "\n"
py_file += "\tdef test_Unary_" + key.title() + "_" + value_operand + " (self):\n"
py_file += "\t\ttemp_np = " + operator + " self.np_" + tensorOperands[value_operand] + "\n"
py_file += "\t\ttemp_dc = " + operator + " self.dc_" + tensorOperands[value_operand] + "\n"
py_file += "\t\tnp.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))\n"
return py_file
def comparison(comparison):
py_file = ""
for key, vals in comparison.items():
operator = operators["comparison_"+key]
py_file += "\n\t# Comparison "+key.title()+"\n"
for key_operand, value_operands in vals.items():
for value_operand in value_operands:
py_file += "\n\t# "+key_operand + " " + operator + " " + value_operand + "\n"
py_file += "\tdef test_Comparison_" + key.title() + "_" + key_operand + "_" + value_operand + " (self):\n"
py_file += "\t\ttemp_np = self.np_" + tensorOperands[key_operand] + " "
if "tensor" in value_operand:
py_file += operator +" self.np_" + tensorOperands[value_operand] + "\n"
elif "scalar" in value_operand:
py_file += operator + " " + tensorOperands[value_operand] + "\n"
py_file += "\t\ttemp_dc = self.dc_" + tensorOperands[key_operand] + " "
if "tensor" in value_operand:
py_file += operator +" self.dc_" + tensorOperands[value_operand] + "\n"
elif "scalar" in value_operand:
py_file += operator + " " + tensorOperands[value_operand] + "\n"
py_file += "\t\tnp.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))\n"
return py_file
def main():
py_file = '''# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
# This file is auto generated by tensor_op_gen.py
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class tensorOperatorsGeneratedTest(unittest.TestCase):
def setUp(self):
self.np_bool_0_4 = np.arange(5).astype(np.bool)
self.np_bool_5_9 = np.arange(5,10).astype(np.bool)
self.np_int_0_4 = np.arange(5).astype(np.int)
self.np_int_5_9 = np.arange(5,10).astype(np.int)
self.np_float_0_4 = np.arange(5).astype(np.float)
self.np_float_5_9 = np.arange(5,10).astype(np.float)
self.np_double_0_4 = np.arange(5).astype(np.double)
self.np_double_5_9 = np.arange(5,10).astype(np.double)
self.dc_bool_0_4 = dc.arange(5).asTypeBool()
self.dc_bool_5_9 = dc.arange(5,10).asTypeBool()
self.dc_int_0_4 = dc.arange(5).asTypeInt()
self.dc_int_5_9 = dc.arange(5,10).asTypeInt()
self.dc_float_0_4 = dc.arange(5).asTypeFloat()
self.dc_float_5_9 = dc.arange(5,10).asTypeFloat()
self.dc_double_0_4 = dc.arange(5).asTypeDouble()
self.dc_double_5_9 = dc.arange(5,10).asTypeDouble()
'''
parser = argparse.ArgumentParser(description="generate and run tensor operators' unittests")
parser.add_argument("-a", "--assignment", action="store_true", help="add assignment tensor operators in unittest file")
parser.add_argument("-u", "--unary", action="store_true", help="add unary tensor operators in unittest file")
parser.add_argument("-b", "--binary", action="store_true", help="add binary tensor operators in unittest file")
parser.add_argument("-c", "--comparison", action="store_true", help="add comaparison tensor operators in unittest file")
parser.add_argument("-r", "--run", action="store_true", help="run the tensor unittest file after generation")
parser.add_argument("-v", "--verbose", action="store_true", help="increase output verbosity of unittests")
args = parser.parse_args()
if not args.assignment and not args.unary and not args.binary and not args.comparison:
args.assignment = args.unary = args.binary = args.comparison = True
if args.assignment:
try:
py_file += assignment(tensorOperators['assignment'])
except:
py_file += "\n\n\t# something went wrong while handling Assignment tensor operators.\n\n"
if args.binary:
try:
py_file += binary(tensorOperators['binary'])
except:
py_file += "\n\n\t# something went wrong while handling Binary tensor operators.\n\n"
if args.unary:
try:
py_file += unary(tensorOperators['unary'])
except:
py_file += "\n\n\t# something went wrong while handling Unary tensor operators.\n\n"
if args.comparison:
try:
py_file += comparison(tensorOperators['comparison'])
except:
py_file += "\n\n\t# something went wrong while handling Comparison tensor operators.\n\n"
py_file += '''
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
'''
with open ("tensorOperatorsGenerated.py", "w") as f:
f.write(py_file)
if args.run:
if args.verbose:
os.system("python3 tensorOperatorsGenerated.py -v")
else:
os.system("python3 tensorOperatorsGenerated.py")
return
if __name__ == "__main__":
main()<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! \f$ y=(x-x_{0})*x_{scale} \f$*/
/*! Where \f$ x \f$ is a quantized tensor, \f$x_{0}\f$ is the origin,
and \f$x_{scale}\f$ is the scale.*/
/*! The formula shows how the Dequantize Linear works.*/
/*! Constraints: \f$x_{scale}\f$ and \f$x_{0}\f$ must have same shape.
\f$x_{0}\f$ and \f$ x \f$ must have same type (8-bit/32-bit integer tensor)*/
template <typename To, typename Ti>
class DequantizeLinear : public baseOperator<To, Ti, Ti> {
public:
DequantizeLinear(std::string name = "opDequantizeLinear")
: baseOperator<To, Ti, Ti>(opDequantizeLinear, name) {}
/*
static T dequantize_linear_function (T a, float a_scale, float a_zero_point) {
return ((a - a_zero_point) * a_scale);
}
static bool compare(tensor<T> &a, tensor<T> &x_zero_point) {
return ((typeid(a) == typeid(int)) && (typeid(x_zero_point) ==
typeid(int)));
}
*/
tensor<To>
compute(tensor<Ti> &a /*!<N-D quantized input tensor to be de-quantized*/,
tensor<float> &x_scale /*!<Scalar tensor*/,
tensor<Ti> &x_zero_point /*!<Scalar tensor*/) {
if (x_scale.shape() != x_zero_point.shape())
SPDLOG_ERROR(
"tensor dimenions not appropriate for DequantizeLinear operator.");
/*
if (!compare(a,a_zero_point))
SPDLOG_ERROR(
"Constrain input and output types to float tensors.");
*/
tensor<float> result(a.shape(), a.name());
for (size_t i = 0; i < a.length(); i++)
result[i] = (a[i] - x_zero_point[0]) * x_scale[0];
/*
float a_zero_point = x_zero_point[0];
float a_scale = x_scale[0];
a.flatteninplace();
DNNC_EIGEN_VECTOR(eigenVector, a);
DNNC_EIGEN_VECTOR_CTOR(T) eResult;
auto c0 = std::bind(dequantize_linear_function, std::placeholders::_2,
a_scale, a_zero_point); eResult.array() = eigenVector.array().unaryExpr(c0);
result.load(eResult.data());
*/
return result;
}
/*!<
\return The output tensor as float and of the same shape as input.
*/
};
} // namespace dnnc
<file_sep>#include "operators/Constant.h"
#include "operators/Reshape.h"
#include "operators/Gemm.h"
#include "operators/Relu.h"
using namespace dnnc;
void usage(char** args) {
std::cout << "\nUsage: " << args[0] <<
" <datafile for input \"0\">" <<
"\n\n";
std::cout << "This model has " << 1 << " input(s):\n";
std::cout << "\t 1. \"0\" (shape 714):\n";
std::cout << "Output(s) will be written in file(s):\n";
std::cout << "\t 1. \"14.out\" (shape 1, 2):\n";
}
int main(int argc, char** argv) {
#define BUNDLE_DIR std::string(argv[0]).substr(0,\
std::string(argv[0]).find_last_of("/")) + "/"
if ( argc < 2 || std::string(argv[1]).substr(0,2) == "-h" ) {
usage(argv);
return 1;
}
tensor<float> dnnc_0({714});
dnnc_0.read(argv[1]);
tensor<float> dnnc_fc1_dot_weight({8, 714});
dnnc_fc1_dot_weight.read(BUNDLE_DIR + "fc1.weight");
tensor<float> dnnc_fc1_dot_bias({8});
dnnc_fc1_dot_bias.read(BUNDLE_DIR + "fc1.bias");
tensor<float> dnnc_fc2_dot_weight({4, 8});
dnnc_fc2_dot_weight.read(BUNDLE_DIR + "fc2.weight");
tensor<float> dnnc_fc2_dot_bias({4});
dnnc_fc2_dot_bias.read(BUNDLE_DIR + "fc2.bias");
tensor<float> dnnc_fc3_dot_weight({2, 4});
dnnc_fc3_dot_weight.read(BUNDLE_DIR + "fc3.weight");
tensor<float> dnnc_fc3_dot_bias({2});
dnnc_fc3_dot_bias.read(BUNDLE_DIR + "fc3.bias");
Constant<int64_t> dnnc___1("dnnc___1");
std::vector<int64_t> dnnc___1_value_vec = {-1,714};
tensor<int64_t> dnnc___1_value({2}); dnnc___1_value.load(dnnc___1_value_vec);
dnnc___1.setAttribute ( attr_value, dnnc___1_value );
tensor<int64_t> dnnc_dnnc___1_7 = dnnc___1.compute ();
Reshape<float, float, int64_t> dnnc___2("dnnc___2");
tensor<float> dnnc_dnnc___2_8 = dnnc___2.compute ( dnnc_0, dnnc_dnnc___1_7);
Gemm<float, float, float> dnnc___3("dnnc___3");
float dnnc___3_alpha = 1.000000 ;
dnnc___3.setAttribute ( attr_alpha, dnnc___3_alpha );
float dnnc___3_beta = 1.000000 ;
dnnc___3.setAttribute ( attr_beta, dnnc___3_beta );
int32_t dnnc___3_transB = 1 ;
dnnc___3.setAttribute ( attr_transB, dnnc___3_transB );
tensor<float> dnnc_dnnc___3_9 = dnnc___3.compute ( dnnc_dnnc___2_8, dnnc_fc1_dot_weight, dnnc_fc1_dot_bias);
Relu<float, float> dnnc___4("dnnc___4");
tensor<float> dnnc_dnnc___4_10 = dnnc___4.compute ( dnnc_dnnc___3_9);
Gemm<float, float, float> dnnc___5("dnnc___5");
float dnnc___5_alpha = 1.000000 ;
dnnc___5.setAttribute ( attr_alpha, dnnc___5_alpha );
float dnnc___5_beta = 1.000000 ;
dnnc___5.setAttribute ( attr_beta, dnnc___5_beta );
int32_t dnnc___5_transB = 1 ;
dnnc___5.setAttribute ( attr_transB, dnnc___5_transB );
tensor<float> dnnc_dnnc___5_11 = dnnc___5.compute ( dnnc_dnnc___4_10, dnnc_fc2_dot_weight, dnnc_fc2_dot_bias);
Relu<float, float> dnnc___6("dnnc___6");
tensor<float> dnnc_dnnc___6_12 = dnnc___6.compute ( dnnc_dnnc___5_11);
Gemm<float, float, float> dnnc___7("dnnc___7");
float dnnc___7_alpha = 1.000000 ;
dnnc___7.setAttribute ( attr_alpha, dnnc___7_alpha );
float dnnc___7_beta = 1.000000 ;
dnnc___7.setAttribute ( attr_beta, dnnc___7_beta );
int32_t dnnc___7_transB = 1 ;
dnnc___7.setAttribute ( attr_transB, dnnc___7_transB );
tensor<float> dnnc_dnnc___7_13 = dnnc___7.compute ( dnnc_dnnc___6_12, dnnc_fc3_dot_weight, dnnc_fc3_dot_bias);
Relu<float, float> dnnc___8("dnnc___8");
tensor<float> dnnc_dnnc___8_14 = dnnc___8.compute ( dnnc_dnnc___7_13);
// Write the output tensor in a file.
dnnc_dnnc___8_14.write("14.out");
return 0;
}
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! Dropout takes one input floating tensor and produces two tensor outputs,
output (floating tensor) and mask (Tensor<bool>). Depending on whether
it is in test mode or not, the output Y will either be a random dropout
or a simple copy of the input. Note that our implementation of Dropout
does scaling in the training phase, so during testing nothing needs to be
done.*/
template <typename T> class Dropout : public baseOperator<T, T, T> {
protected:
float ratio = 0.5; /*!< The ratio of random dropout. */
// Dropout attributes
public:
Dropout(std::string name = "opDropout", float ratio = 0.5)
: baseOperator<T, T, T>(opDropout, name) {
this->ratio = ratio;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_ratio) {
obj = ratio;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_ratio) {
ratio = obj;
return true;
}
return false;
}
tensor<T> compute(tensor<T> &a /*!<[float,double]: ND tensor*/) {
if (!(this->template type_check<T, float, double>())) {
SPDLOG_ERROR("Constrain input and output types to float tensors.");
return NULL_TENSOR<T>;
}
// Dropout is a NOOP for compiler. During training, it zeros
// a fraction (attribute ratio) of the tensor a.
return a;
}
/*!<
\return The output tensor of the same shape and dtype as input.
*/
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import dnnc_testing
class tensorOperatorsTest(unittest.TestCase):
def setUp(self):
self.nullT=dc.array(0)
self.zeros=dc.zeros(2,3).asTypeInt()
self.ones=dc.ones(2,3).asTypeInt()
self.f0_4 = dc.arange(5)
self.f5_9 = dc.arange(10,5)
self.np_f0_4 = np.arange(5)
self.np_f5_9 = np.arange(10,5)
self.i0_4 = self.f0_4.asTypeInt()
self.i5_9 = self.f5_9.asTypeInt()
self.np_i0_4 = self.np_f0_4.astype(np.int)
self.np_i5_9 = self.np_f5_9.astype(np.int)
self.b0_4 = self.f0_4.asTypeBool()
self.b5_9 = self.f5_9.asTypeBool()
# assignment operators
def test_assignments(self):
assert not self.nullT
# The truth value of an array with more than one element is ambiguous. Use dc.any() or dc.all()
# assert self.zeros
# assert self.ones
# Add
temp_zeros = self.zeros.copy()
temp_zeros += self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.ones )
temp_zeros = self.zeros.copy()
temp_zeros += 1
dnnc_testing.utils.assert_equal( temp_zeros , self.ones )
# Sub
temp_ones = self.ones.copy()
temp_ones -= self.ones
dnnc_testing.utils.assert_equal( temp_ones , self.zeros )
temp = self.f5_9
temp -= dc.array([5])
dnnc_testing.utils.assert_allclose( temp , self.f0_4 )
temp = self.i5_9
temp -= dc.array([5]).asTypeInt()
dnnc_testing.utils.assert_equal( temp , self.i0_4 )
temp = self.b5_9
temp -= dc.array([5]).asTypeBool()
dnnc_testing.utils.assert_equal(temp, dc.zeros(5).asTypeBool())
# Mul
temp_zeros = self.zeros.copy()
temp_zeros *= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
temp_ones = self.ones.copy()
temp_ones *= 0
dnnc_testing.utils.assert_equal( temp_ones , self.zeros )
# TrueDiv
temp_zeros = self.zeros.copy().asTypeFloat()
temp_zeros /= self.ones.asTypeFloat()
dnnc_testing.utils.assert_equal( temp_zeros.asTypeFloat() , self.zeros.asTypeFloat() )
temp_zeros = self.zeros.copy()
temp_zeros /= 1
dnnc_testing.utils.assert_equal( temp_zeros.asTypeFloat() , self.zeros.asTypeFloat() )
# FloorDiv
temp_zeros = self.zeros.copy()
temp_zeros //= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
temp_zeros = self.zeros.copy()
temp_zeros //= 1
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
# Pow
temp_zeros = self.zeros.copy()
temp_zeros **= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
temp_ones = self.ones.copy()
temp_ones **= 0
dnnc_testing.utils.assert_equal( temp_ones , self.ones )
# Mod
temp_zeros = self.zeros.copy()
temp_zeros %= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
temp_zeros = self.zeros.copy()
temp_zeros %= 1
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
# Left Shift
temp_zeros = self.zeros.copy()
temp_zeros <<= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros << self.ones )
temp_ones = self.ones.copy()
temp_ones <<= 0
dnnc_testing.utils.assert_equal( temp_ones , self.ones << 0 )
# Right Shift
temp_zeros = self.zeros.copy()
temp_zeros <<= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros >> self.ones)
temp_ones = self.ones.copy()
temp_ones <<= 0
dnnc_testing.utils.assert_equal( temp_ones , self.ones >> 0 )
# And
temp_zeros = self.zeros.copy()
temp_zeros &= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.zeros )
temp_ones = self.ones.copy()
temp_ones &= 0
dnnc_testing.utils.assert_equal( temp_ones , self.zeros )
# Or
temp_zeros = self.zeros.copy()
temp_zeros |= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.ones )
temp_ones = self.ones.copy()
temp_ones |= 0
dnnc_testing.utils.assert_equal( temp_ones , self.ones )
# Xor
temp_zeros = self.zeros.copy()
temp_zeros ^= self.ones
dnnc_testing.utils.assert_equal( temp_zeros , self.ones )
temp_ones = self.ones.copy()
temp_ones ^= 1
dnnc_testing.utils.assert_equal( temp_ones , self.zeros )
# binary operators
def test_binary(self):
# Add
dnnc_testing.utils.assert_equal( self.ones , self.zeros+self.ones)
dnnc_testing.utils.assert_equal( self.ones , self.zeros+1)
dnnc_testing.utils.assert_equal( self.ones , 1+self.zeros)
dnnc_testing.utils.assert_equal( self.zeros+self.ones, self.ones)
# Sub
dnnc_testing.utils.assert_equal( -self.ones, self.zeros-self.ones)
# dnnc_testing.utils.assert_equal( self.ones , 1-self.zeros) # Not working for some wierd reason
dnnc_testing.utils.assert_equal( self.ones, self.ones-self.zeros)
# Mul
dnnc_testing.utils.assert_equal( self.ones, self.ones*1)
dnnc_testing.utils.assert_equal( self.ones, self.ones*self.ones)
dnnc_testing.utils.assert_equal( self.zeros , 1*self.zeros)
dnnc_testing.utils.assert_equal( self.zeros, self.zeros*1)
dnnc_testing.utils.assert_equal( self.zeros, self.ones*0)
# Left Shift
dnnc_testing.utils.assert_equal( self.ones<<1, self.ones<<1)
dnnc_testing.utils.assert_equal( self.ones<<self.ones, self.ones<<self.ones)
dnnc_testing.utils.assert_equal( self.zeros<<1, self.zeros<<1)
dnnc_testing.utils.assert_equal( self.ones<<0, self.ones<<0)
# Right Shift
dnnc_testing.utils.assert_equal( self.ones>>1, self.ones>>1)
dnnc_testing.utils.assert_equal( self.ones>>self.ones, self.ones>>self.ones)
dnnc_testing.utils.assert_equal( self.zeros>>1, self.zeros>>1)
dnnc_testing.utils.assert_equal( self.ones>>0, self.ones>>0)
# FloorDiv
dnnc_testing.utils.assert_equal( self.zeros, self.zeros//self.ones)
dnnc_testing.utils.assert_equal( self.ones, 1.0//self.ones)
dnnc_testing.utils.assert_equal( self.zeros, self.zeros//1)
dnnc_testing.utils.assert_equal( self.ones, self.ones//1)
# True Div
dnnc_testing.utils.assert_equal( self.ones.asTypeFloat(), self.ones/self.ones)
dnnc_testing.utils.assert_equal( self.ones.asTypeFloat(), dc.true_div(self.ones, self.ones))
dnnc_testing.utils.assert_equal( self.ones, 1.0/self.ones)
dnnc_testing.utils.assert_equal( self.zeros.asTypeFloat(), self.zeros/1.0)
dnnc_testing.utils.assert_equal( self.ones.asTypeFloat(), self.ones/1.0)
# Pow
dnnc_testing.utils.assert_equal( self.ones , self.ones**self.ones)
dnnc_testing.utils.assert_equal( self.ones , self.ones**1)
dnnc_testing.utils.assert_equal( self.ones , self.ones**0)
# And
dnnc_testing.utils.assert_equal( self.zeros.asTypeBool() , self.zeros&self.ones)
dnnc_testing.utils.assert_equal( self.ones , 1&self.ones)
dnnc_testing.utils.assert_equal( self.zeros , 0&self.ones)
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , self.ones&1)
dnnc_testing.utils.assert_equal( self.zeros.asTypeBool() , self.ones&0)
# Or
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , self.zeros|self.ones)
dnnc_testing.utils.assert_equal( self.ones , 0|self.ones)
dnnc_testing.utils.assert_equal( self.ones , 1|self.zeros)
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , self.ones|0)
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , self.zeros|1)
# Xor
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , self.zeros^self.ones)
dnnc_testing.utils.assert_equal( self.ones , 0^self.ones)
dnnc_testing.utils.assert_equal( self.zeros , 0^self.zeros)
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , self.ones^0)
dnnc_testing.utils.assert_equal( self.zeros.asTypeBool() , self.zeros^0)
# unary operators
def test_unary(self):
# Neg
dnnc_testing.utils.assert_equal( self.i0_4, -(-self.i0_4))
# Pos
dnnc_testing.utils.assert_equal( -self.i0_4, +(-self.i0_4))
# Abs
dnnc_testing.utils.assert_equal( self.i0_4, abs(-self.i0_4))
# Invert
dnnc_testing.utils.assert_equal( self.zeros.asTypeBool() , ~self.ones)
dnnc_testing.utils.assert_equal( self.ones.asTypeBool() , ~self.zeros)
dnnc_testing.utils.assert_equal( ~~~self.f5_9 , ~self.f5_9)
# comparison operators
def test_comparison(self):
# Less
lessResult = self.zeros < self.ones
dnnc_testing.utils.assert_equal(lessResult, self.ones)
# LessEqual
lessEqualResult = self.zeros <= self.zeros
dnnc_testing.utils.assert_equal(lessEqualResult, self.ones)
# Greater
greaterResult = self.ones > self.zeros
dnnc_testing.utils.assert_equal(greaterResult, self.ones)
# GreaterEqual
greaterEqualResult = self.ones >= self.ones
dnnc_testing.utils.assert_equal(greaterEqualResult, self.ones)
# Equal
equalResult = self.ones == dc.ones(2,3).asTypeInt()
dnnc_testing.utils.assert_equal(equalResult, self.ones)
# NotEqual
notEqualResult = self.ones != self.zeros
dnnc_testing.utils.assert_equal(notEqualResult, self.ones)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import sys
class BroadcastTest(unittest.TestCase):
def setUp(self):
self.len1 = 20
self.len2 = 40
self.np_a = np.random.randn(self.len1).astype(np.float32)
self.np_b = np.random.randn(self.len2).astype(np.float32)
self.np_c = np.random.randn(self.len1).astype(np.float32)
self.np_d = np.random.randn(self.len2).astype(np.float32)
self.dc_a = dc.array(list(self.np_a));
self.dc_b = dc.array(list(self.np_b));
self.dc_c = dc.array(list(self.np_c));
self.dc_d = dc.array(list(self.np_d));
self.err = "operands could not be broadcast together with shapes (2,5,2) (4,5,2) ";
# deepC now uses logger instead of exceptions, so this test is not necessary
'''
def test_error_message (self):
dc_a = dc.reshape(self.dc_a, (2,5,2));
dc_b = dc.reshape(self.dc_b, (4,5,2));
np_a = np.reshape(self.np_a,(2,5,2))
np_b = np.reshape(self.np_b,(4,5,2))
try:
np_sum = np.add(np_a,np_b)
except:
type, val, tb = sys.exc_info()
np_err = val.__str__()
assert (np_err[0:65]==self.err[0:65]), "ASSERT FAILED for numpy error message"
try:
dc_sum = dc.add(dc_a,dc_b)
except:
type, val, tb = sys.exc_info()
dc_err = val.__str__()
assert (dc_err[0:65]==self.err[0:65]), "ASSERT FAILED for dc error message"
assert (dc_err[0:65]==np_err[0:65]), "ASSERT FAILED for matching numpy and dc error message"
'''
def test_Add(self):
dc_a = dc.reshape(self.dc_a, (5,4));
dc_b = dc.reshape(self.dc_b, (2,5,4));
np_a = np.reshape(self.np_a,(5,4))
np_b = np.reshape(self.np_b,(2,5,4))
npr = np.add(np_a, np_b);
dcr = dc.add(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Maxof4(self):
np_a = np.reshape(self.np_a,(5,4))
np_b = np.reshape(self.np_b,(2,5,4))
np_c = np.reshape(self.np_c,(5,4))
np_d = np.reshape(self.np_d,(2,5,4))
npr = np.maximum(np.maximum(np_a,np_b),np.maximum(np_c,np_d))
dc_a = dc.reshape(self.dc_a,(5,4))
dc_b = dc.reshape(self.dc_b,(2,5,4))
dc_c = dc.reshape(self.dc_c,(5,4))
dc_d = dc.reshape(self.dc_d,(2,5,4))
dcr = dc.max(dc.vectorTensorFloat([dc_a, dc_b, dc_c, dc_d]))
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Misc files
<file_sep># Papers, Poseters, Publications, Letters and Books
## deepC Papers
- Paper: [Deep Neural Network Operators](DNNC-operators-paper.pdf), appeared in [Proceedings of AITS Summit, 2019](https://www.amazon.com/Proceedings-AITS-Summit-2019-www-ai-techsystems-com-ebook/dp/B083ZJWFGT)
- Letter: [Gesture Recognition with deepC](IJCRT%20-%20Gesture%20Recognition%20with%20deepC.pdf), appeared in [INTERNATIONAL JOURNAL OF CREATIVE RESEARCH THOUGHTS]()
- Poster: [Deep Neural Network Compiler and Inference Framework for microcontrollers and microcomputers](AITS%20poster.pdf), appeared in [IRISS 2020
14th Inter-Research-Institute Student Seminar in Computer Science](https://events.iitgn.ac.in/2020/IRISS/)
## deepC Citations
- Title: [Artificial Intelligence in the IoT Era: A Review of Edge AI Hardware and Software](https://ieeexplore.ieee.org/abstract/document/9770931/)
- [Download pdf](https://fruct.org/publications/fruct31/files/Sip.pdf)
- Title: [Tiny transformers for environmental sound classification at the edge](https://arxiv.org/abs/2103.12157)
- [Download pdf](https://arxiv.org/pdf/2103.12157)
- Title: [Efficient Edge Analytics: Addressing Cyber-Physical MASINT with Machine Learning on Audio at the Edge](https://repository.lib.fit.edu/handle/11141/3223)
- [Download pdf](https://repository.lib.fit.edu/bitstream/handle/11141/3223/ELLIOTT-DISSERTATION-2020.pdf?sequence=1&isAllowed=y)
## deepC Book
1. deepC Chapter in book [Introduction to TinyML](http://thetinymlbook.com/), available on [Amazon](https://www.amazon.com/dp/B0B662D7ZW/) and other retailers
<file_sep># Loguru issues
### Error:
```
terminate called after throwing an instance of 'std::runtime_error'
what(): locale::facet::_S_create_c_locale name not valid
```
### Fix:
#### If first fix doesn't work, go for second fix
* First Fix:
- Open `~/.profile`
```
gedit ~/.profile
```
- Add the following line
```
export LC_ALL=C; unset LANGUAGE
```
- Save and Reboot.
* Second Fix (Requires SUDO):
- Open `/etc/locale.gen`
```
gedit /etc/locale.gen
```
- Uncomment the following line:
```
en_US.UTF-8 UTF-8
```
- Save(sudo) and Reboot.
### If the above methods doesn't fix the error, open a new issue.
# Reference
**[GithHub Issues](https://github.com/potree/PotreeConverter/issues/281)**<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To> class Constant : public baseOperator<To, To, To> {
// Constant attributes
tensor<To> _data;
public:
Constant(std::string name = "opConstant", tensor<To> data = NULL_TENSOR<To>)
: baseOperator<To, To, To>(opConstant, name), _data(data) {}
bool getAttribute(OPATTR attrName, tensor<To> &obj) override {
if (attrName == attr_value) {
obj = _data;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, tensor<To> obj) override {
if (attrName == attr_value) {
_data = obj;
return true;
}
return false;
}
tensor<To> compute(void) { return _data; }
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
def temp_flatten(x, shape, axis):
new_shape = (1, -1) if axis == 0 else (np.prod(shape[0:axis]).astype(int), -1)
y = np.reshape(x, new_shape)
return y
class FlattenTest(unittest.TestCase):
def setUp(self):
self.len = 48
self.np_bool_a = np.random.randn(self.len).astype(np.bool)
self.dc_bool_a = dc.array(list(self.np_bool_a))
self.np_int_a = np.random.randn(self.len).astype(np.int)
self.dc_int_a = dc.array(list(self.np_int_a))
self.np_float_a = np.random.randn(self.len).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.np_double_a = np.random.randn(self.len).astype(np.float64)
self.dc_double_a = dc.array(list(self.np_double_a))
def test_Flatten1D_bool (self):
axis = 0
shape = (1,48)
npr = temp_flatten(self.np_bool_a, shape, axis)
dcr = dc.flatten(self.dc_bool_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten1D_int (self):
axis = 0
shape = (1,48)
npr = temp_flatten(self.np_int_a, shape, axis)
dcr = dc.flatten(self.dc_int_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten1D_float (self):
axis = 0
shape = (1,48)
npr = temp_flatten(self.np_float_a, shape, axis)
dcr = dc.flatten(self.dc_float_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten1D_double (self):
axis = 0
shape = (1,48)
npr = temp_flatten(self.np_double_a, shape, axis)
dcr = dc.flatten(self.dc_double_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten2D_bool (self):
axis = 2
shape = (8,6)
np_bool_a = np.reshape(self.np_bool_a, shape)
dc_bool_a = dc.reshape(self.dc_bool_a, shape)
npr = temp_flatten(np_bool_a, shape, axis)
dcr = dc.flatten(dc_bool_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten2D_int (self):
axis = 2
shape = (8,6)
np_int_a = np.reshape(self.np_int_a, shape)
dc_int_a = dc.reshape(self.dc_int_a, shape)
npr = temp_flatten(np_int_a, shape, axis)
dcr = dc.flatten(dc_int_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten2D_float (self):
axis = 2
shape = (8,6)
np_float_a = np.reshape(self.np_float_a, shape)
dc_float_a = dc.reshape(self.dc_float_a, shape)
npr = temp_flatten(np_float_a, shape, axis)
dcr = dc.flatten(dc_float_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten2D_double (self):
axis = 2
shape = (8,6)
np_double_a = np.reshape(self.np_double_a, shape)
dc_double_a = dc.reshape(self.dc_double_a, shape)
npr = temp_flatten(np_double_a, shape, axis)
dcr = dc.flatten(dc_double_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten3D_bool (self):
axis = 2
shape = (4,4,3)
np_bool_a = np.reshape(self.np_bool_a, shape)
dc_bool_a = dc.reshape(self.dc_bool_a, shape)
npr = temp_flatten(np_bool_a, shape, axis)
dcr = dc.flatten(dc_bool_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten3D_int (self):
axis = 2
shape = (4,4,3)
np_int_a = np.reshape(self.np_int_a, shape)
dc_int_a = dc.reshape(self.dc_int_a, shape)
npr = temp_flatten(np_int_a, shape, axis)
dcr = dc.flatten(dc_int_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten3D_float (self):
axis = 2
shape = (4,4,3)
np_float_a = np.reshape(self.np_float_a, shape)
dc_float_a = dc.reshape(self.dc_float_a, shape)
npr = temp_flatten(np_float_a, shape, axis)
dcr = dc.flatten(dc_float_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten3D_double (self):
axis = 2
shape = (4,4,3)
np_double_a = np.reshape(self.np_double_a, shape)
dc_double_a = dc.reshape(self.dc_double_a, shape)
npr = temp_flatten(np_double_a, shape, axis)
dcr = dc.flatten(dc_double_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten4D_bool (self):
axis = 3
shape = (4,2,2,3)
np_bool_a = np.reshape(self.np_bool_a, shape)
dc_bool_a = dc.reshape(self.dc_bool_a, shape)
npr = temp_flatten(np_bool_a, shape, axis)
dcr = dc.flatten(dc_bool_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten4D_int (self):
axis = 3
shape = (4,2,2,3)
np_int_a = np.reshape(self.np_int_a, shape)
dc_int_a = dc.reshape(self.dc_int_a, shape)
npr = temp_flatten(np_int_a, shape, axis)
dcr = dc.flatten(dc_int_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten4D_float (self):
axis = 3
shape = (4,2,2,3)
np_float_a = np.reshape(self.np_float_a, shape)
dc_float_a = dc.reshape(self.dc_float_a, shape)
npr = temp_flatten(np_float_a, shape, axis)
dcr = dc.flatten(dc_float_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def test_Flatten4D_double (self):
axis = 3
shape = (4,2,2,3)
np_double_a = np.reshape(self.np_double_a, shape)
dc_double_a = dc.reshape(self.dc_double_a, shape)
npr = temp_flatten(np_double_a, shape, axis)
dcr = dc.flatten(dc_double_a, axis)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
np.testing.assert_equal(npr.shape, dcr.shape())
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class FloorTest(unittest.TestCase):
def setUp(self):
self.len = 48
self.np_float_a = np.random.randn(self.len).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.np_double_a = np.random.randn(self.len).astype(np.float64)
self.dc_double_a = dc.array(list(self.np_double_a))
def test_Floor1D_float (self):
npr = np.floor(self.np_float_a)
dcr = dc.floor(self.dc_float_a)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor1D_double (self):
npr = np.floor(self.np_double_a)
dcr = dc.floor(self.dc_double_a)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor2D_float_1 (self):
np_float_a = np.reshape(self.np_float_a, (3,16))
dc_float_a = dc.reshape(self.dc_float_a, (3,16))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor2D_float_2 (self):
np_float_a = np.reshape(self.np_float_a, (6,8))
dc_float_a = dc.reshape(self.dc_float_a, (6,8))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor2D_float_3 (self):
np_float_a = np.reshape(self.np_float_a, (12,4))
dc_float_a = dc.reshape(self.dc_float_a, (12,4))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor2D_double_1 (self):
np_double_a = np.reshape(self.np_double_a, (3,16))
dc_double_a = dc.reshape(self.dc_double_a, (3,16))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor2D_double_2 (self):
np_double_a = np.reshape(self.np_double_a, (6,8))
dc_double_a = dc.reshape(self.dc_double_a, (6,8))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor2D_double_3 (self):
np_double_a = np.reshape(self.np_double_a, (12,4))
dc_double_a = dc.reshape(self.dc_double_a, (12,4))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor3D_float_1 (self):
np_float_a = np.reshape(self.np_float_a, (4,4,3))
dc_float_a = dc.reshape(self.dc_float_a, (4,4,3))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor3D_float_2 (self):
np_float_a = np.reshape(self.np_float_a, (8,2,3))
dc_float_a = dc.reshape(self.dc_float_a, (8,2,3))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor3D_float_3 (self):
np_float_a = np.reshape(self.np_float_a, (2,4,6))
dc_float_a = dc.reshape(self.dc_float_a, (2,4,6))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor3D_double_1 (self):
np_double_a = np.reshape(self.np_double_a, (4,4,3))
dc_double_a = dc.reshape(self.dc_double_a, (4,4,3))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor3D_double_2 (self):
np_double_a = np.reshape(self.np_double_a, (8,2,3))
dc_double_a = dc.reshape(self.dc_double_a, (8,2,3))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor3D_double_3 (self):
np_double_a = np.reshape(self.np_double_a, (2,4,6))
dc_double_a = dc.reshape(self.dc_double_a, (2,4,6))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Floor4D_float (self):
np_float_a = np.reshape(self.np_float_a, (4,2,2,3))
dc_float_a = dc.reshape(self.dc_float_a, (4,2,2,3))
npr = np.floor(np_float_a)
dcr = dc.floor(dc_float_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Floor4D_double (self):
np_double_a = np.reshape(self.np_double_a, (4,2,2,3))
dc_double_a = dc.reshape(self.dc_double_a, (4,2,2,3))
npr = np.floor(np_double_a)
dcr = dc.floor(dc_double_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class DivTest(unittest.TestCase):
def setUp(self):
self.len = 48
self.np_int_a = np.random.randn(self.len).astype(np.int)
self.np_int_b = np.random.randn(self.len).astype(np.int)
self.dc_int_a = dc.array(list(self.np_int_a))
self.dc_int_b = dc.array(list(self.np_int_b))
self.np_float_a = np.random.randn(self.len).astype(np.float32)
self.np_float_b = np.random.randn(self.len).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.dc_float_b = dc.array(list(self.np_float_b))
self.np_double_a = np.random.randn(self.len).astype(np.float64)
self.np_double_b = np.random.randn(self.len).astype(np.float64)
self.dc_double_a = dc.array(list(self.np_double_a))
self.dc_double_b = dc.array(list(self.np_double_b))
def test_Div1D_int (self):
# npr = np.floor_divide(self.np_int_a, self.np_int_b)
npr = np.true_divide(self.np_int_a, self.np_int_b)
dcr = dc.div(self.dc_int_a, self.dc_int_b)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_Div1D_float (self):
npr = np.true_divide(self.np_float_a, self.np_float_b)
dcr = dc.div(self.dc_float_a, self.dc_float_b)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Div1D_double (self):
npr = np.true_divide(self.np_double_a, self.np_double_b)
dcr = dc.div(self.dc_double_a, self.dc_double_b)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Div2D_int (self):
np_int_a = np.reshape(self.np_int_a, (6,8))
np_int_b = np.reshape(self.np_int_b, (6,8))
dc_int_a = dc.reshape(self.dc_int_a, (6,8))
dc_int_b = dc.reshape(self.dc_int_b, (6,8))
# npr = np.floor_divide(np_int_a, np_int_b)
npr = np.true_divide(np_int_a, np_int_b)
dcr = dc.div(dc_int_a, dc_int_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_Div2D_float (self):
np_float_a = np.reshape(self.np_float_a, (6,8))
np_float_b = np.reshape(self.np_float_b, (6,8))
dc_float_a = dc.reshape(self.dc_float_a, (6,8))
dc_float_b = dc.reshape(self.dc_float_b, (6,8))
npr = np.true_divide(np_float_a, np_float_b)
dcr = dc.div(dc_float_a, dc_float_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Div2D_double (self):
np_double_a = np.reshape(self.np_double_a, (6,8))
np_double_b = np.reshape(self.np_double_b, (6,8))
dc_double_a = dc.reshape(self.dc_double_a, (6,8))
dc_double_b = dc.reshape(self.dc_double_b, (6,8))
npr = np.true_divide(np_double_a, np_double_b)
dcr = dc.div(dc_double_a, dc_double_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Div3D_int (self):
np_int_a = np.reshape(self.np_int_a, (4,4,3))
np_int_b = np.reshape(self.np_int_b, (4,4,3))
dc_int_a = dc.reshape(self.dc_int_a, (4,4,3))
dc_int_b = dc.reshape(self.dc_int_b, (4,4,3))
npr = np.true_divide(np_int_a, np_int_b)
dcr = dc.div(dc_int_a, dc_int_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_Div3D_float (self):
np_float_a = np.reshape(self.np_float_a, (4,4,3))
np_float_b = np.reshape(self.np_float_b, (4,4,3))
dc_float_a = dc.reshape(self.dc_float_a, (4,4,3))
dc_float_b = dc.reshape(self.dc_float_b, (4,4,3))
npr = np.true_divide(np_float_a, np_float_b)
dcr = dc.div(dc_float_a, dc_float_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Div3D_double (self):
np_double_a = np.reshape(self.np_double_a, (4,4,3))
np_double_b = np.reshape(self.np_double_b, (4,4,3))
dc_double_a = dc.reshape(self.dc_double_a, (4,4,3))
dc_double_b = dc.reshape(self.dc_double_b, (4,4,3))
npr = np.true_divide(np_double_a, np_double_b)
dcr = dc.div(dc_double_a, dc_double_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Div4D_int (self):
np_int_a = np.reshape(self.np_int_a, (4,2,2,3))
np_int_b = np.reshape(self.np_int_b, (4,2,2,3))
dc_int_a = dc.reshape(self.dc_int_a, (4,2,2,3))
dc_int_b = dc.reshape(self.dc_int_b, (4,2,2,3))
npr = np.true_divide(np_int_a, np_int_b)
dcr = dc.div(dc_int_a, dc_int_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_Div4D_float (self):
np_float_a = np.reshape(self.np_float_a, (4,2,2,3))
np_float_b = np.reshape(self.np_float_b, (4,2,2,3))
dc_float_a = dc.reshape(self.dc_float_a, (4,2,2,3))
dc_float_b = dc.reshape(self.dc_float_b, (4,2,2,3))
npr = np.true_divide(np_float_a, np_float_b)
dcr = dc.div(dc_float_a, dc_float_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Div4D_double (self):
np_double_a = np.reshape(self.np_double_a, (4,2,2,3))
np_double_b = np.reshape(self.np_double_b, (4,2,2,3))
dc_double_a = dc.reshape(self.dc_double_a, (4,2,2,3))
dc_double_b = dc.reshape(self.dc_double_b, (4,2,2,3))
npr = np.true_divide(np_double_a, np_double_b)
dcr = dc.div(dc_double_a, dc_double_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class SubTest(unittest.TestCase):
def setUp(self):
self.len = 48
self.np_bool_a = np.random.randn(self.len).astype(np.bool)
self.np_bool_b = np.random.randn(self.len).astype(np.bool)
self.dc_bool_a = dc.array(list(self.np_bool_a))
self.dc_bool_b = dc.array(list(self.np_bool_b))
self.np_int_a = np.random.randn(self.len).astype(np.int)
self.np_int_b = np.random.randn(self.len).astype(np.int)
self.dc_int_a = dc.array(list(self.np_int_a))
self.dc_int_b = dc.array(list(self.np_int_b))
self.np_float_a = np.random.randn(self.len).astype(np.float32)
self.np_float_b = np.random.randn(self.len).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.dc_float_b = dc.array(list(self.np_float_b))
self.np_double_a = np.random.randn(self.len).astype(np.float64)
self.np_double_b = np.random.randn(self.len).astype(np.float64)
self.dc_double_a = dc.array(list(self.np_double_a))
self.dc_double_b = dc.array(list(self.np_double_b))
def test_Sub1D_bool_float (self):
npr = np.subtract(self.np_bool_a, self.np_float_b)
dcr = dc.sub(self.dc_bool_a, self.dc_float_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_float_bool (self):
npr = np.subtract(self.np_float_a, self.np_bool_b)
dcr = dc.sub(self.dc_float_a, self.dc_bool_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_bool_double (self):
npr = np.subtract(self.np_bool_a, self.np_double_b)
dcr = dc.sub(self.dc_bool_a, self.dc_double_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_double_bool (self):
npr = np.subtract(self.np_double_a, self.np_bool_b)
dcr = dc.sub(self.dc_double_a, self.dc_bool_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_int_int (self):
npr = np.subtract(self.np_int_a, self.np_int_b)
dcr = dc.sub(self.dc_int_a, self.dc_int_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_int_float (self):
npr = np.subtract(self.np_int_a, self.np_float_b)
dcr = dc.sub(self.dc_int_a, self.dc_float_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_float_int (self):
npr = np.subtract(self.np_float_a, self.np_int_b)
dcr = dc.sub(self.dc_float_a, self.dc_int_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_int_double (self):
npr = np.subtract(self.np_int_a, self.np_double_b)
dcr = dc.sub(self.dc_int_a, self.dc_double_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_double_int (self):
npr = np.subtract(self.np_double_a, self.np_int_b)
dcr = dc.sub(self.dc_double_a, self.dc_int_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_float_double (self):
npr = np.subtract(self.np_float_a, self.np_double_b)
dcr = dc.sub(self.dc_float_a, self.dc_double_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_double_float (self):
npr = np.subtract(self.np_double_a, self.np_float_b)
dcr = dc.sub(self.dc_double_a, self.dc_float_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_float_float (self):
npr = np.subtract(self.np_float_a, self.np_float_b)
dcr = dc.sub(self.dc_float_a, self.dc_float_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub1D_double_double (self):
npr = np.subtract(self.np_double_a, self.np_double_b)
dcr = dc.sub(self.dc_double_a, self.dc_double_b)
np.testing.assert_allclose(npr, np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub2D_int_int (self):
np_int_a = np.reshape(self.np_int_a, (6,8))
np_int_b = np.reshape(self.np_int_b, (6,8))
dc_int_a = dc.reshape(self.dc_int_a, (6,8))
dc_int_b = dc.reshape(self.dc_int_b, (6,8))
npr = np.subtract(np_int_a, np_int_b)
dcr = dc.sub(dc_int_a, dc_int_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub2D_float_float (self):
np_float_a = np.reshape(self.np_float_a, (6,8))
np_float_b = np.reshape(self.np_float_b, (6,8))
dc_float_a = dc.reshape(self.dc_float_a, (6,8))
dc_float_b = dc.reshape(self.dc_float_b, (6,8))
npr = np.subtract(np_float_a, np_float_b)
dcr = dc.sub(dc_float_a, dc_float_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub2D_double_double (self):
np_double_a = np.reshape(self.np_double_a, (6,8))
np_double_b = np.reshape(self.np_double_b, (6,8))
dc_double_a = dc.reshape(self.dc_double_a, (6,8))
dc_double_b = dc.reshape(self.dc_double_b, (6,8))
npr = np.subtract(np_double_a, np_double_b)
dcr = dc.sub(dc_double_a, dc_double_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub3D_int_int (self):
np_int_a = np.reshape(self.np_int_a, (4,4,3))
np_int_b = np.reshape(self.np_int_b, (4,4,3))
dc_int_a = dc.reshape(self.dc_int_a, (4,4,3))
dc_int_b = dc.reshape(self.dc_int_b, (4,4,3))
npr = np.subtract(np_int_a, np_int_b)
dcr = dc.sub(dc_int_a, dc_int_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub3D_float_float (self):
np_float_a = np.reshape(self.np_float_a, (4,4,3))
np_float_b = np.reshape(self.np_float_b, (4,4,3))
dc_float_a = dc.reshape(self.dc_float_a, (4,4,3))
dc_float_b = dc.reshape(self.dc_float_b, (4,4,3))
npr = np.subtract(np_float_a, np_float_b)
dcr = dc.sub(dc_float_a, dc_float_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub3D_double_double (self):
np_double_a = np.reshape(self.np_double_a, (4,4,3))
np_double_b = np.reshape(self.np_double_b, (4,4,3))
dc_double_a = dc.reshape(self.dc_double_a, (4,4,3))
dc_double_b = dc.reshape(self.dc_double_b, (4,4,3))
npr = np.subtract(np_double_a, np_double_b)
dcr = dc.sub(dc_double_a, dc_double_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub4D_int_int (self):
np_int_a = np.reshape(self.np_int_a, (4,2,2,3))
np_int_b = np.reshape(self.np_int_b, (4,2,2,3))
dc_int_a = dc.reshape(self.dc_int_a, (4,2,2,3))
dc_int_b = dc.reshape(self.dc_int_b, (4,2,2,3))
npr = np.subtract(np_int_a, np_int_b)
dcr = dc.sub(dc_int_a, dc_int_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub4D_float_float (self):
np_float_a = np.reshape(self.np_float_a, (4,2,2,3))
np_float_b = np.reshape(self.np_float_b, (4,2,2,3))
dc_float_a = dc.reshape(self.dc_float_a, (4,2,2,3))
dc_float_b = dc.reshape(self.dc_float_b, (4,2,2,3))
npr = np.subtract(np_float_a, np_float_b)
dcr = dc.sub(dc_float_a, dc_float_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def test_Sub4D_double_double (self):
np_double_a = np.reshape(self.np_double_a, (4,2,2,3))
np_double_b = np.reshape(self.np_double_b, (4,2,2,3))
dc_double_a = dc.reshape(self.dc_double_a, (4,2,2,3))
dc_double_b = dc.reshape(self.dc_double_b, (4,2,2,3))
npr = np.subtract(np_double_a, np_double_b)
dcr = dc.sub(dc_double_a, dc_double_b)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
#
############################
# Description:
# DNNC CPP FIle generator
#############################
import os, sys
import deepC.dnnc as dnnc
import deepC.compiler.read_onnx as read_onnx
class dnncCpp:
""" write C++ file, given a DNNC graph. """
def __init__ (self):
self.deleteMe = ""
def main(self, dc_graph, bundle_dir, cpp_file):
print("Writing C++ file ", bundle_dir+os.path.sep+cpp_file);
cppCode = dnnc.cppCodeGen(dc_graph, bundle_dir, cpp_file);
cppCode.write();
def main():
onnx_file = None
if len(sys.argv) >= 2:
onnx_file = sys.argv[1]
if ( onnx_file is None ) :
print("\nUsage: "+sys.argv[0]+ " <onnx_model_file>.onnx [bundle_dir] \n")
exit(0)
bundle_dir = None
if len(sys.argv) >= 3:
bundle_dir = sys.argv[2]
else:
bundle_dir = os.path.dirname(onnx_file);
if not bundle_dir:
bundle_dir = os.getcwd()
cpp_file = os.path.splitext(os.path.basename(onnx_file))[0]+'.cpp'
parser = read_onnx.pbReader()
dcGraph = parser.main(onnx_file, bundle_dir, optimize=False, checker=False)
cppCodeGen = dnncCpp();
cppCodeGen.main(dcGraph, bundle_dir, cpp_file);
print("INFO (ONNX): model files are ready in dir " + bundle_dir);
return (bundle_dir, cpp_file);
if __name__ == "__main__":
sys.exit(main())
<file_sep># generate split test header file only if it does not yet exist
# in order to prevent a rebuild everytime cmake is configured
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
foreach(i RANGE 1 999)
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
"#ifdef EIGEN_TEST_PART_${i}\n"
"#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
"#else\n"
"#define CALL_SUBTEST_${i}(FUNC)\n"
"#endif\n\n"
)
endforeach()
endif()
# check if we have a Fortran compiler
include("../cmake/language_support.cmake")
workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
if(EIGEN_Fortran_COMPILER_WORKS)
enable_language(Fortran OPTIONAL)
if(NOT CMAKE_Fortran_COMPILER)
set(EIGEN_Fortran_COMPILER_WORKS OFF)
endif()
endif()
if(NOT EIGEN_Fortran_COMPILER_WORKS)
# search for a default Lapack library to complete Eigen's one
find_package(LAPACK QUIET)
endif()
# configure blas/lapack (use Eigen's ones)
set(EIGEN_BLAS_LIBRARIES eigen_blas)
set(EIGEN_LAPACK_LIBRARIES eigen_lapack)
set(EIGEN_TEST_MATRIX_DIR "" CACHE STRING "Enable testing of realword sparse matrices contained in the specified path")
if(EIGEN_TEST_MATRIX_DIR)
if(NOT WIN32)
message(STATUS "Test realworld sparse matrices: ${EIGEN_TEST_MATRIX_DIR}")
add_definitions( -DTEST_REAL_CASES="${EIGEN_TEST_MATRIX_DIR}" )
else(NOT WIN32)
message(STATUS "REAL CASES CAN NOT BE CURRENTLY TESTED ON WIN32")
endif(NOT WIN32)
endif(EIGEN_TEST_MATRIX_DIR)
set(SPARSE_LIBS " ")
find_package(Cholmod)
if(CHOLMOD_FOUND)
add_definitions("-DEIGEN_CHOLMOD_SUPPORT")
include_directories(${CHOLMOD_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${EIGEN_BLAS_LIBRARIES} ${EIGEN_LAPACK_LIBRARIES})
set(CHOLMOD_ALL_LIBS ${CHOLMOD_LIBRARIES} ${EIGEN_BLAS_LIBRARIES} ${EIGEN_LAPACK_LIBRARIES})
ei_add_property(EIGEN_TESTED_BACKENDS "Cholmod, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "Cholmod, ")
endif()
find_package(Umfpack)
if(UMFPACK_FOUND)
add_definitions("-DEIGEN_UMFPACK_SUPPORT")
include_directories(${UMFPACK_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
ei_add_property(EIGEN_TESTED_BACKENDS "UmfPack, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ")
endif()
find_package(SuperLU 4.0)
if(SUPERLU_FOUND)
add_definitions("-DEIGEN_SUPERLU_SUPPORT")
include_directories(${SUPERLU_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES})
ei_add_property(EIGEN_TESTED_BACKENDS "SuperLU, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "SuperLU, ")
endif()
find_package(PASTIX QUIET COMPONENTS METIS SCOTCH)
# check that the PASTIX found is a version without MPI
find_path(PASTIX_pastix_nompi.h_INCLUDE_DIRS
NAMES pastix_nompi.h
HINTS ${PASTIX_INCLUDE_DIRS}
)
if (NOT PASTIX_pastix_nompi.h_INCLUDE_DIRS)
message(STATUS "A version of Pastix has been found but pastix_nompi.h does not exist in the include directory."
" Because Eigen tests require a version without MPI, we disable the Pastix backend.")
endif()
if(PASTIX_FOUND AND PASTIX_pastix_nompi.h_INCLUDE_DIRS)
add_definitions("-DEIGEN_PASTIX_SUPPORT")
include_directories(${PASTIX_INCLUDE_DIRS_DEP})
if(SCOTCH_FOUND)
include_directories(${SCOTCH_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
elseif(METIS_FOUND)
include_directories(${METIS_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
else(SCOTCH_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ")
endif(SCOTCH_FOUND)
set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES_DEP} ${ORDERING_LIBRARIES})
set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES_DEP})
ei_add_property(EIGEN_TESTED_BACKENDS "PaStiX, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ")
endif()
if(METIS_FOUND)
add_definitions("-DEIGEN_METIS_SUPPORT")
include_directories(${METIS_INCLUDE_DIRS})
ei_add_property(EIGEN_TESTED_BACKENDS "METIS, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "METIS, ")
endif()
find_package(SPQR)
if(SPQR_FOUND AND CHOLMOD_FOUND AND (EIGEN_Fortran_COMPILER_WORKS OR LAPACK_FOUND) )
add_definitions("-DEIGEN_SPQR_SUPPORT")
include_directories(${SPQR_INCLUDES})
set(SPQR_ALL_LIBS ${SPQR_LIBRARIES} ${CHOLMOD_LIBRARIES} ${EIGEN_LAPACK_LIBRARIES} ${EIGEN_BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${SPQR_ALL_LIBS})
ei_add_property(EIGEN_TESTED_BACKENDS "SPQR, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "SPQR, ")
endif()
option(EIGEN_TEST_NOQT "Disable Qt support in unit tests" OFF)
if(NOT EIGEN_TEST_NOQT)
find_package(Qt4)
if(QT4_FOUND)
include(${QT_USE_FILE})
ei_add_property(EIGEN_TESTED_BACKENDS "Qt4 support, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "Qt4 support, ")
endif()
endif(NOT EIGEN_TEST_NOQT)
if(TEST_LIB)
add_definitions("-DEIGEN_EXTERN_INSTANTIATIONS=1")
endif(TEST_LIB)
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Official")
add_custom_target(BuildOfficial)
ei_add_test(rand)
ei_add_test(meta)
ei_add_test(numext)
ei_add_test(sizeof)
ei_add_test(dynalloc)
ei_add_test(nomalloc)
ei_add_test(first_aligned)
ei_add_test(nullary)
ei_add_test(mixingtypes)
ei_add_test(packetmath "-DEIGEN_FAST_MATH=1")
ei_add_test(unalignedassert)
ei_add_test(vectorization_logic)
ei_add_test(basicstuff)
ei_add_test(constructor)
ei_add_test(linearstructure)
ei_add_test(integer_types)
ei_add_test(unalignedcount)
if(NOT EIGEN_TEST_NO_EXCEPTIONS)
ei_add_test(exceptions)
endif()
ei_add_test(redux)
ei_add_test(visitor)
ei_add_test(block)
ei_add_test(corners)
ei_add_test(swap)
ei_add_test(resize)
ei_add_test(conservative_resize)
ei_add_test(product_small)
ei_add_test(product_large)
ei_add_test(product_extra)
ei_add_test(diagonalmatrices)
ei_add_test(adjoint)
ei_add_test(diagonal)
ei_add_test(miscmatrices)
ei_add_test(commainitializer)
ei_add_test(smallvectors)
ei_add_test(mapped_matrix)
ei_add_test(mapstride)
ei_add_test(mapstaticmethods)
ei_add_test(array)
ei_add_test(array_for_matrix)
ei_add_test(array_replicate)
ei_add_test(array_reverse)
ei_add_test(ref)
ei_add_test(is_same_dense)
ei_add_test(triangular)
ei_add_test(selfadjoint)
ei_add_test(product_selfadjoint)
ei_add_test(product_symm)
ei_add_test(product_syrk)
ei_add_test(product_trmv)
ei_add_test(product_trmm)
ei_add_test(product_trsolve)
ei_add_test(product_mmtr)
ei_add_test(product_notemporary)
ei_add_test(stable_norm)
ei_add_test(permutationmatrices)
ei_add_test(bandmatrix)
ei_add_test(cholesky)
ei_add_test(lu)
ei_add_test(determinant)
ei_add_test(inverse)
ei_add_test(qr)
ei_add_test(qr_colpivoting)
ei_add_test(qr_fullpivoting)
ei_add_test(upperbidiagonalization)
ei_add_test(hessenberg)
ei_add_test(schur_real)
ei_add_test(schur_complex)
ei_add_test(eigensolver_selfadjoint)
ei_add_test(eigensolver_generic)
ei_add_test(eigensolver_complex)
ei_add_test(real_qz)
ei_add_test(eigensolver_generalized_real)
ei_add_test(jacobi)
ei_add_test(jacobisvd)
ei_add_test(bdcsvd)
ei_add_test(householder)
ei_add_test(geo_orthomethods)
ei_add_test(geo_quaternion)
ei_add_test(geo_eulerangles)
ei_add_test(geo_parametrizedline)
ei_add_test(geo_alignedbox)
ei_add_test(geo_hyperplane)
ei_add_test(geo_transformations)
ei_add_test(geo_homogeneous)
ei_add_test(stdvector)
ei_add_test(stdvector_overload)
ei_add_test(stdlist)
ei_add_test(stdlist_overload)
ei_add_test(stddeque)
ei_add_test(stddeque_overload)
ei_add_test(sparse_basic)
ei_add_test(sparse_block)
ei_add_test(sparse_vector)
ei_add_test(sparse_product)
ei_add_test(sparse_ref)
ei_add_test(sparse_solvers)
ei_add_test(sparse_permutations)
ei_add_test(simplicial_cholesky)
ei_add_test(conjugate_gradient)
ei_add_test(incomplete_cholesky)
ei_add_test(bicgstab)
ei_add_test(lscg)
ei_add_test(sparselu)
ei_add_test(sparseqr)
ei_add_test(umeyama)
ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
ei_add_test(zerosized)
ei_add_test(dontalign)
ei_add_test(evaluators)
if(NOT EIGEN_TEST_NO_EXCEPTIONS)
ei_add_test(sizeoverflow)
endif()
ei_add_test(prec_inverse_4x4)
ei_add_test(vectorwiseop)
ei_add_test(special_numbers)
ei_add_test(rvalue_types)
ei_add_test(dense_storage)
ei_add_test(ctorleak)
ei_add_test(mpl2only)
ei_add_test(inplace_decomposition)
ei_add_test(half_float)
ei_add_test(array_of_string)
add_executable(bug1213 bug1213.cpp bug1213_main.cpp)
check_cxx_compiler_flag("-ffast-math" COMPILER_SUPPORT_FASTMATH)
if(COMPILER_SUPPORT_FASTMATH)
set(EIGEN_FASTMATH_FLAGS "-ffast-math")
else()
check_cxx_compiler_flag("/fp:fast" COMPILER_SUPPORT_FPFAST)
if(COMPILER_SUPPORT_FPFAST)
set(EIGEN_FASTMATH_FLAGS "/fp:fast")
endif()
endif()
ei_add_test(fastmath " ${EIGEN_FASTMATH_FLAGS} ")
# # ei_add_test(denseLM)
if(QT4_FOUND)
ei_add_test(qtvector "" "${QT_QTCORE_LIBRARY}")
endif(QT4_FOUND)
if(UMFPACK_FOUND)
ei_add_test(umfpack_support "" "${UMFPACK_ALL_LIBS}")
endif()
if(SUPERLU_FOUND)
ei_add_test(superlu_support "" "${SUPERLU_ALL_LIBS}")
endif()
if(CHOLMOD_FOUND)
ei_add_test(cholmod_support "" "${CHOLMOD_ALL_LIBS}")
endif()
if(PARDISO_FOUND)
ei_add_test(pardiso_support "" "${PARDISO_ALL_LIBS}")
endif()
if(PASTIX_FOUND AND (SCOTCH_FOUND OR METIS_FOUND))
ei_add_test(pastix_support "" "${PASTIX_ALL_LIBS}")
endif()
if(SPQR_FOUND AND CHOLMOD_FOUND)
ei_add_test(spqr_support "" "${SPQR_ALL_LIBS}")
endif()
if(METIS_FOUND)
ei_add_test(metis_support "" "${METIS_LIBRARIES}")
endif()
string(TOLOWER "${CMAKE_CXX_COMPILER}" cmake_cxx_compiler_tolower)
if(cmake_cxx_compiler_tolower MATCHES "qcc")
set(CXX_IS_QCC "ON")
endif()
ei_add_property(EIGEN_TESTING_SUMMARY "CXX: ${CMAKE_CXX_COMPILER}\n")
if(CMAKE_COMPILER_IS_GNUCXX AND NOT CXX_IS_QCC)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version COMMAND head -n 1 OUTPUT_VARIABLE EIGEN_CXX_VERSION_STRING OUTPUT_STRIP_TRAILING_WHITESPACE)
ei_add_property(EIGEN_TESTING_SUMMARY "CXX_VERSION: ${EIGEN_CXX_VERSION_STRING}\n")
endif()
ei_add_property(EIGEN_TESTING_SUMMARY "CXX_FLAGS: ${CMAKE_CXX_FLAGS}\n")
ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags: ${SPARSE_LIBS}\n")
option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF)
mark_as_advanced(EIGEN_TEST_EIGEN2)
if(EIGEN_TEST_EIGEN2)
message(WARNING "The Eigen2 test suite has been removed")
endif()
# boost MP unit test
find_package(Boost)
if(Boost_FOUND)
include_directories(${Boost_INCLUDE_DIRS})
ei_add_test(boostmultiprec "" "${Boost_LIBRARIES}")
ei_add_property(EIGEN_TESTED_BACKENDS "Boost.Multiprecision, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "Boost.Multiprecision, ")
endif()
# CUDA unit tests
option(EIGEN_TEST_CUDA "Enable CUDA support in unit tests" OFF)
option(EIGEN_TEST_CUDA_CLANG "Use clang instead of nvcc to compile the CUDA tests" OFF)
if(EIGEN_TEST_CUDA_CLANG AND NOT CMAKE_CXX_COMPILER MATCHES "clang")
message(WARNING "EIGEN_TEST_CUDA_CLANG is set, but CMAKE_CXX_COMPILER does not appear to be clang.")
endif()
if(EIGEN_TEST_CUDA)
find_package(CUDA 5.0)
if(CUDA_FOUND)
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE)
endif()
if(EIGEN_TEST_CUDA_CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 --cuda-gpu-arch=sm_30")
endif()
cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR})
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
ei_add_test(cuda_basic)
unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
endif(CUDA_FOUND)
endif(EIGEN_TEST_CUDA)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests)
add_test(NAME failtests WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests COMMAND ${CMAKE_COMMAND} ${Eigen_SOURCE_DIR} -G "${CMAKE_GENERATOR}" -DEIGEN_FAILTEST=ON)
option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF)
IF(EIGEN_TEST_BUILD_DOCUMENTATION)
add_dependencies(buildtests doc)
ENDIF()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! The General Matrix Multiplication formula is*/
/*! \f$ Y=\alpha\;A'\;B'+\beta\;C\f$ */
/*! Where */
/*! \f$ A'=transpose(A)\;,\;\;if\;A_{trans}=1\;;\\A'=A\;,\;\;if\;A_{trans}=0 \f$
*/
/*! And */
/*! \f$ B'=transpose(B)\;,\;\;if\;B_{trans}=1\;;\\B'=B\;,\;\;if\;B_{trans}=0 \f$
*/
/*! Input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N)
or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y
has shape (M, N). A will be transposed before doing the computation if attribute
transA is non-zero, same for B and transB.\n This operator supports
unidirectional broadcasting (tensor C should be
unidirectional broadcastable to tensor A * B)*/
template <typename To, typename Ti1, typename Ti2>
class Gemm : public baseOperator<To, Ti1, Ti2> {
protected:
float alpha =
1.0; /*!< Scalar multiplier for the product of input tensors A * B */
float beta = 1.0; /*!< Scalar multiplier for input tensor C */
int transA = 0; /*!< Whether A should be transposed */
int transB = 0; /*!< Whether B should be transposed */
inline std::vector<size_t> swap0And1(std::vector<size_t> v) {
return (v.size() == 2) ? std::vector<size_t>({v[1], v[0]}) : v;
}
public:
Gemm(std::string name = "opGemm", float alpha = 1.0, float beta = 1.0,
int transA = 0, int transB = 0)
: baseOperator<To, Ti1, Ti2>(opGemm, name) {
this->alpha = alpha;
this->beta = beta;
this->transA = transA;
this->transB = transB;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_transA) {
obj = transA;
return true;
} else if (attrName == attr_transB) {
obj = transB;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_alpha) {
obj = alpha;
return true;
} else if (attrName == attr_beta) {
obj = beta;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_transA) {
transA = obj;
return true;
} else if (attrName == attr_transB) {
transB = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_alpha) {
alpha = obj;
return true;
} else if (attrName == attr_beta) {
beta = obj;
return true;
}
return false;
}
tensor<To> compute(tensor<Ti1> a/*!<Input tensor A. The shape of A should be (M, K)
if \f$ A_{trans} \f$ is 0, or (K, M) if \f$ A_{trans} \f$
is non-zero.*/,
tensor<Ti1> b/*!<Input tensor B. The shape of B should be (K, N)
if \f$ B_{trans} \f$ is 0, or (N, K) if \f$ B_{trans} \f$
is non-zero.*/,
tensor<Ti1> c/*!<Input tensor C. The shape of C should be
unidirectional broadcastable to (M, N)*/) override {
std::vector<size_t> aShape = transA ? swap0And1(a.shape()) : a.shape();
std::vector<size_t> bShape = transB ? swap0And1(b.shape()) : b.shape();
if (a.rank() == 1 && b.rank() == 2) {
if (aShape[0] == bShape[0])
aShape = {1, aShape[0]};
else if (bShape[0] == 1)
aShape = {aShape[0], 1};
aShape = transA ? swap0And1(aShape) : aShape;
a.reshape(aShape);
}
if (a.rank() == 2 && b.rank() == 1) {
if (aShape[1] == bShape[0])
bShape = {bShape[0], 1};
else if (aShape[1] == 1)
bShape = {1, bShape[0]};
bShape = transB ? swap0And1(bShape) : bShape;
b.reshape(bShape);
}
if (a.rank() != 2 || b.rank() != 2) {
SPDLOG_ERROR("tensor dimenions not appropriate for Gemm operator.");
return NULL_TENSOR<To>;
}
if (!(this->template type_check<Ti1, float, double, int>())) {
SPDLOG_ERROR(
"Constrain input and output types to float and int tensors.");
return NULL_TENSOR<To>;
}
std::vector<size_t> targetShape = {aShape[0], bShape[1]};
tensor<Ti1> broadcastedC = broadcast(c, targetShape);
tensor<Ti1> result(broadcastedC.shape(), broadcastedC.name());
DNNC_EIGEN_MATRIX(eigenMatrixA, Ti1, a);
DNNC_EIGEN_MATRIX(eigenMatrixB, Ti1, b);
DNNC_EIGEN_MATRIX(eigenMatrixC, Ti1, broadcastedC);
Matrix<Ti1, Dynamic, Dynamic, RowMajor> eResult(broadcastedC.shape()[0],
broadcastedC.shape()[1]);
try {
if (transA == 0 && transB == 0) {
eResult = alpha * (eigenMatrixA * eigenMatrixB) + beta * eigenMatrixC;
} else if (transA == 1 && transB == 0) {
eResult = alpha * ((eigenMatrixA.transpose()) * eigenMatrixB) +
beta * eigenMatrixC;
} else if (transA == 0 && transB == 1) {
eResult = alpha * (eigenMatrixA * (eigenMatrixB.transpose())) +
beta * eigenMatrixC;
} else if (transA == 1 && transB == 1) {
eResult =
alpha * ((eigenMatrixA.transpose()) * (eigenMatrixB.transpose())) +
beta * eigenMatrixC;
}
} catch (...) {
SPDLOG_ERROR("tensor dimenions not appropriate for Gemm operator.");
return NULL_TENSOR<To>;
}
result.load(eResult.data());
// perform type conversion
if (!(this->template type_check<To, Ti1>()))
return result.template asType<To>();
return result;
}
/*!<
\return The output tensor of the same shape and type as tensor C.
*/
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <cmath>
#include <string>
using namespace Eigen;
namespace dnnc {
/*! Local response normalization. This Normalizes over local input regions. The
* local region is defined across the channels.*/
/*! For an element \f$X[n, c, d1, ..., dk]\f$ in a tensor of shape \f$ (N * C *
* D1 * D2* ...* Dk)\f$, its region is \f$\left \{ X[n, i, d1, ..., dk] \mid
* max(0, c - floor((size - 1) / 2)) \leq i \leq min(C - 1, c + ceil((size -
* 1) / 2)) \right \}\f$ */
/*! Define \f$square\_sum[n, c, d1, ..., dk] = \sum_{i=max(0, c - floor((size -
* 1) / 2))}^{min(C - 1, c + ceil((size - 1) / 2))}(X[n, i, d1, ..., dk] ^ 2)\f$
*/
/*! and then apply \f$ Y[n, c, d1, ..., dk] = X[n, c, d1, ...,
* dk]/\begin{pmatrix}bias + \alpha*square\_sum[n, c, d1, ..., dk]
* \end{pmatrix}^{beta} \f$ */
template <typename T> class LRN : public baseOperator<T, T, T> {
protected:
float alpha = 0.0001; /*!< Scaling parameter.*/
float beta = 0.75; /*!< The exponent.*/
float bias = 1.0;
int size; /*!< (Required) The number of channels to sum over.*/
public:
LRN(int size, std::string name = "opLRN", float alpha = 0.0001,
float beta = 0.75, float bias = 1.0)
: baseOperator<T, T, T>(opLRN, name) {
this->alpha = alpha;
this->beta = beta;
this->bias = bias;
this->size = size;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_alpha) {
obj = alpha;
return true;
} else if (attrName == attr_beta) {
obj = beta;
return true;
} else if (attrName == attr_bias) {
obj = bias;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_size) {
obj = size;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_alpha) {
alpha = obj;
return true;
} else if (attrName == attr_beta) {
beta = obj;
return true;
} else if (attrName == attr_bias) {
bias = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_size) {
size = obj;
return true;
}
return false;
}
tensor<T> compute(tensor<T> input/*!< Input data tensor from the previous operator;
dimensions for image case are \f$(N * C * H * W)\f$, where N is the batch size,
C is the number of channels, and H and W are the height and the width of the data.
For non image case, the dimensions are in the form of \f$(N * C * D1 * D2* ...* Dn)\f$,
where N is the batch size.*/) {
if (!(this->template type_check<T, float, double>())) {
SPDLOG_ERROR("Constrain input and output types to float tensors.");
return NULL_TENSOR<T>;
}
tensor<T> result(input.shape(), input.name());
std::vector<size_t> original_shape = input.shape();
size_t size = 1;
for (size_t i = 2; i < input.rank(); i++) {
size *= input.shape()[i];
}
std::vector<size_t> shape{input.shape()[0], input.shape()[1], size};
input.reshape(shape);
result.reshape(shape);
T sq_sum = 0;
for (size_t c = 0; c < input.shape()[1]; c++) {
int temp1 = c - floor((size - 1) / 2);
int lower = (0 > temp1) ? 0 : temp1;
int temp2 = c + ceil((size - 1) / 2);
int upper = ((int(input.shape()[1]) - 1) < temp2)
? (int(input.shape()[1]) - 1)
: temp2;
/*std::cout << "Current Channel=" << c << "\n";
std::cout << "Upper=" << upper << "Lower" << lower << "\n";*/
for (int i = lower; i <= upper; i++) {
for (size_t j = 0; j < input.shape()[0]; j++) {
for (size_t k = 0; k < size; k++) {
/*std::cout << input(j, i, k) << ',';*/
sq_sum += input(j, i, k) * input(j, i, k);
}
}
/*std::cout << "\n";
std::cout << "sq_sum= " << sq_sum << std::endl;*/
for (size_t j = 0; j < input.shape()[0]; j++) {
for (size_t k = 0; k < size; k++) {
result(j, i, k) =
input(j, i, k) / pow((bias + alpha / size * sq_sum), beta);
}
}
sq_sum = 0;
}
}
result.reshape(original_shape);
return result;
}
/*!<
\return Output tensor, which has the shape and type as input tensor
*/
};
} // namespace dnnc
<file_sep> # Contribution Guidelines
## Code Guide
## Code Review
### How to Merge Pull Request (permissions needed)
**Steps**
1. clone repo, fetch PR, checkout
1. inspect,
1. compile and run tests,
1. merge.
1. and push
**Git Receipe for the steps**
```
set PullRequest=75; # PR number on github.com
set branch=operators
git clone https://github.com/ai-techsystems/dnnCompiler.git -b $branch
cd dnnCompiler/
git fetch origin pull/${PullRequest}/head:prBranch
git checkout prBranch
# make sure compile and test goes through
make
git checkout $branch
git merge --no-ff prBranch
# resolve conflicts
# make sure compile and test goes through, again
make
git push origin $branch
```
## Document
## Committer Guide
#### Forking:
* Go to **[dnnCompiler](https://github.com/ai-techsystems/dnnCompiler)**
* Click **Fork** to your own repository.
- This will take 10 sec or so.
- Now you will be redirected to a copy of **dnnCompiler** under your username
- And it will be written :
> your_username/dnnCompiler
> forked from ai-techsystems/dnnCompiler
* Click on the **Clone or Download button** and copy the link.
- It will look like (https://github.com/your_username/dnnCompiler.git)
* Go to your terminal and go to any directory under which you want to clone the repo and open terminal.
- Paste the link you copied after typing `git clone `. It will look like this :
```console
git clone https://github.com/your_username/dnnCompiler.git
```
#### Changing branch
- Go inside the repo
```console
cd dnnCompiler
```
* Now you will be inside the repository.
- Check how many branches this repository has.
```console
git branch -r
```
- You will see something like:
```bash
origin/HEAD -> origin/master
origin/master
origin/operators
```
- Check on which branch you are currently on
```console
git branch
```
- You will see something like:
```bash
* master
operators
```
- The `*` shows your current branch.
- Change the branch to the operators as all the newer development is done on that branch.
```console
git checkout operators
```
- You will see something like
```bash
Switched to a new branch 'operators'
Branch 'operators' set up to track remote branch 'operators' from 'origin'.
```
- Now if you do
```console
git branch
```
- You will see:
```bash
master
* operators
```
- Now you are on operators branch.
#### Update code
* Change the code inside the repo where you want to change.
#### Backing up uncommitted work:
* But first back up your current work:
```console
git stash
```
#### Add synchronization steps to get latest updates from `AITS dnnCompiler`
* Now you will have to setup your repo so that it can sync new updates from the original **dnnCompiler** repo under **AITS**. As there will be other developers working on that. To do that you have to set **dnnCompiler** repo of **AITS** as an **upstream**.
* In the top level under your local **dnnCompiler** repo, open terminal.
- Add a remote upstream of the original **dnnCompiler** (You only need to do this upstream setup once! But **fetching** and **merging** should be done everytime)
```console
git remote add upstream https://github.com/ai-techsystems/dnnCompiler
```
- This will add original **dnnCompiler** as upstream.
- To fetch the latest updates from the **dnnCompiler** repo from **AITS**, use
```console
git fetch upstream
```
- You will see something like
```bash
From https://github.com/ai-techsystems/dnnCompiler
* [new branch] master -> upstream/master
* [new branch] operators -> upstream/operators
```
* Now based on which branch you are currently on, you have to merge `origin/branch_name` with `upstream/branch_name`. **Origin** means your forked local repo, and **Upstream** means the original repo from **AITS** here.
#### Merging the update from upstream
* If you followed all previous steps, you will be currently on `origin/operators` branch, if you haven't you will be on `origin/master` branch. To check which branch you are on currently, see the above steps. In the next steps, I am assuming you are on `origin/operators` branch.
* Now we will merge the upstream operators branch.
```console
git merge upstream/operators
```
- This will update your repo with the latest update from upstream repo. If you are already upto date, you will see something like this.
```bash
Already up to date.
```
- Else every update will be merged from operators branch.
* We will not merge the `upstream/master` as it is not required, but if you want to do that too, follow the steps below.
- First change to master branch
```console
git checkout master
```
- If you did `git fetch` previously, don't bother to do that again, or do a `git fetch upstream`.
- Then merge master branch
```console
git merge upstream/master
```
- Now your master branch will also be updated, before you forget, go back to `operators` branch, as we will modify that only.
```console
git checkout operators
```
- Now both of your branches are synchronized with the latest update from **AITS dnnCompiler** repo.
* Now your repo is synchronized with the latest update from upstream. Now sync your forked repo with upstream. Till now you synced your local repo with upstream, but not published it in your github forked repo, to do that simply type
```console
git push
```
* Now everything is in sync.
#### Get uncomitted code back
* Now get back the local changes you saved earlier with `git stash` command.
```console
git stash pop
```
#### Push your modified code to your forked repo in GitHub
* Now you will have your uncommitted work over the synced repo, just as you wanted. Do more modifications if required. And then do the usual commands to push your changes in your forked repo.
```console
git add .
git commit -m "commit message"
git push
```
* This will update your forked repo with your additions, Now if you want them to be added in the **AITS dnnCompiler** repo, see the Pull request sectionbelow.
## Pull Request
* If you followed previous instructions, you will have a forked repo which has the latest update from **AITS dnnCompiler** with your further modifications.
* Now go to your forked repo in GitHub in your browser.
* Change branch from master to operators.
* You will see something like
> Your branch is ahead of n commits of ai-techsystems:operators.
* Click on **pull request**
* You will be taken to a new page where in the top you can see
> merge [operator branch] [aits dnnCompiler] <-- [operator branch] [your_username dnnCompiler]
* You will also be able to see the changes you made in the comparison of files below that.
* Now click on **create pull request**
* It's done!
<file_sep>
# Copyright 2018 The AITS DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
from generate_output import *
operators = {}
operators['Abs'] = {
'nodes':['helper.make_node("Abs", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Acos'] = {
'nodes':['helper.make_node("Acos", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Acosh'] = {
'nodes':['helper.make_node("Acosh", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Add'] = {
'nodes':['helper.make_node("Add", ["A","B"], ["C"])'],
'inputs':'[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)),helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['And'] = {
'nodes':['helper.make_node("And", ["A","B"], ["C"])'],
'inputs':'[helper.make_tensor_value_info("A", TensorProto.BOOL, (2, 3, 4)),helper.make_tensor_value_info("B", TensorProto.BOOL, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("C", TensorProto.BOOL, (2, 3, 4))]'
}
operators['ArgMax'] = {
'nodes': ['helper.make_node("ArgMax", ["0"], ["1"], axis = 0 , keepdims = 1)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['ArgMin'] = {
'nodes': ['helper.make_node("ArgMin", ["0"], ["1"], axis = 0 , keepdims = 1)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Asin'] = {
'nodes':['helper.make_node("Asin", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Asinh'] = {
'nodes':['helper.make_node("Asinh", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Atan'] = {
'nodes':['helper.make_node("Atan", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Atanh'] = {
'nodes':['helper.make_node("Atanh", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['AveragePool'] = {
'nodes': ['helper.make_node("AveragePool", ["0"], ["1"], kernel_shape=[2,2])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 3, 32, 32))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 3, 31, 31))]'
}
operators['BatchNormalization'] = {
'nodes': ["helper.make_node('BatchNormalization',inputs=['x', 's', 'bias', 'mean', 'var'],outputs=['y'])"],
'inputs':'[helper.make_tensor_value_info("x", TensorProto.FLOAT, (2, 3)), helper.make_tensor_value_info("s", TensorProto.FLOAT, (1,2)), helper.make_tensor_value_info("bias", TensorProto.FLOAT, (1,2)), helper.make_tensor_value_info("mean", TensorProto.FLOAT, (1,2)), helper.make_tensor_value_info("var", TensorProto.FLOAT, (1,2))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3))]',
}
operators['BitShift'] = {
'nodes':['helper.make_node("BitShift", ["A","B"], ["C"], direction="LEFT")'],
'inputs':'[helper.make_tensor_value_info("A", TensorProto.UINT16, (2, 3, 4)),helper.make_tensor_value_info("B", TensorProto.UINT16, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("C", TensorProto.UINT16, (2, 3, 4))]'
}
operators['Cast'] = {
'nodes':['helper.make_node("Cast", ["input"], ["output"], to = 1)'],
'inputs':'[helper.make_tensor_value_info("input", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("output", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Ceil'] = {
'nodes':['helper.make_node("Ceil", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Clip'] = {
'nodes':['helper.make_node("Clip", ["0"], ["1"], min= -1.0, max= 1.0)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Concat'] = {
'nodes': ['helper.make_node("Concat", ["0"], ["1"], axis = 0)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Constant'] = {
'nodes':['helper.make_node("Constant", [], ["1"], value = onnx.helper.make_tensor(name="const_tensor", data_type=onnx.TensorProto.FLOAT, dims=values.shape, vals=values.flatten().astype(float)))'],
'inputs':'[]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (5,5))]',
'declarations':['values=np.random.randn(5, 5).astype(np.float32)']
}
operators['Compress'] = {
'nodes': ['helper.make_node("Compress", ["input", "condition"], ["output"] , axis = 1)'],
'inputs':'[helper.make_tensor_value_info("input", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("condition", TensorProto.FLOAT, (2, 3, 4)) ]',
'outputs':'[helper.make_tensor_value_info("output", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['ConstantOfShape'] = {
'nodes':['helper.make_node("ConstantOfShape", ["0"], ["1"], value=helper.make_tensor("value", TensorProto.FLOAT, [1], [6]))'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Conv'] = {
'nodes':['helper.make_node("Conv", ["0", "1"], ["2"], dilations=[1, 1], kernel_shape=[3, 3])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 1, 6, 6)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 1, 3, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (1, 1, 4, 4))]'
}
operators['ConvInteger'] = {
'nodes':['helper.make_node("ConvInteger", ["0", "1"], ["2"], dilations=[1, 1], kernel_shape=[3, 3])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 1, 6, 6)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 1, 3, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (1, 1, 4, 4))]'
}
operators['ConvTranspose'] = {
'nodes':['helper.make_node("ConvTranspose", ["0", "1"], ["2"], dilations=[1, 1], kernel_shape=[3, 3])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 1, 3, 3)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 2, 3, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (1, 2, 5, 5))]'
}
operators['Cos'] = {
'nodes':['helper.make_node("Cos", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Cosh'] = {
'nodes':['helper.make_node("Cosh", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['CumSum'] = {
'nodes':['helper.make_node("CumSum", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (5,))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (5,))]'
}
# ONNX doesn't seem to recognize opset 11 operators, even though they are listed on the operator page
operators['DepthToSpace'] = {
'nodes':['helper.make_node("DepthToSpace", ["0"], ["1"], blocksize=2)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 4, 2, 3))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 1, 4, 6))]'
}
operators['DequantizeLinear'] = {
'nodes':['helper.make_node("DequantizeLinear", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (5,)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (1,))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (5,))]'
}
operators['Div'] = {
'nodes':['helper.make_node("Div", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Dropout'] = {
'nodes':['helper.make_node("Dropout", ["0"], ["1"], ratio=0.3)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (4,))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (4,))]'
}
operators['Elu'] = {
'nodes':['helper.make_node("Elu", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (4,))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (4,))]'
}
operators['Equal'] = {
'nodes':['helper.make_node("Equal", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Erf'] = {
'nodes':['helper.make_node("Erf", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Exp'] = {
'nodes':['helper.make_node("Exp", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Expand'] = {
'nodes':['helper.make_node("Expand", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (3, 1)), helper.make_tensor_value_info("1", TensorProto.INT64, (2, 1, 6))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 1, 6))]'
}
operators['EyeLike'] = {
'nodes':['helper.make_node("EyeLike", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (5, 5))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (5, 5))]'
}
operators['Flatten'] = {
'nodes':['helper.make_node("Flatten", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (5, 4, 3, 2))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (5, 24))]'
}
operators['Floor'] = {
'nodes':['helper.make_node("Floor", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['GRU'] = {
'nodes':['helper.make_node("GRU", ["0", "1", "2"], ["", "3"], hidden_size=5)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 3, 2)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 3 * 5, 2)), helper.make_tensor_value_info("2", TensorProto.FLOAT, (1, 3 * 5, 5))]',
'outputs':'[helper.make_tensor_value_info("3", TensorProto.FLOAT, (1, 3, 5))]'
}
operators['Gather'] = {
'nodes':['helper.make_node("Gather",["data","indices"],["y"],axis=0)'],
'inputs':'[helper.make_tensor_value_info("data", TensorProto.FLOAT, (3, 2)), helper.make_tensor_value_info("indices", TensorProto.INT64, (2,2))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,2,2))]'
}
operators['Gemm'] = {
'nodes':['helper.make_node("Gemm",["A","B","C"],["Y"],alpha=1.22,beta=5.3,transA=1,transB=0)'],
'inputs':'[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5, 3)), helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,3)),helper.make_tensor_value_info("C", TensorProto.FLOAT, (3,3))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.FLOAT,(3,3))]'
}
operators['GlobalAveragePool'] = {
'nodes':['helper.make_node("GlobalAveragePool",["x"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4,5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,1,1))]'
}
operators['GlobalLpPool'] = {
'nodes':['helper.make_node("GlobalLpPool",["x"],["y"],p=3)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4,5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,1,1))]'
}
operators['GlobalMaxPool'] = {
'nodes':['helper.make_node("GlobalMaxPool",["x"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4,5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,1,1))]'
}
operators['Greater'] = {
'nodes':['helper.make_node("Greater",["A","B"],["C"])'],
'inputs':'[helper.make_tensor_value_info("A",TensorProto.FLOAT,(2,3)), helper.make_tensor_value_info("B",TensorProto.FLOAT,(2,3))]',
'outputs':'[helper.make_tensor_value_info("C",TensorProto.FLOAT,(2,3))]'
}
operators['HardSigmoid'] = {
'nodes':['helper.make_node("HardSigmoid",["X"],["Y"],alpha=0.2,beta=0.2)'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(2,3))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.FLOAT,(2,3))]'
}
operators['Hardmax'] = {
'nodes':['helper.make_node("Hardmax",["X"],["Y"],axis=1)'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(2,3))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.FLOAT,(2,3))]'
}
operators['Identity'] = {
'nodes':['helper.make_node("Identity",["X"],["Y"])'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(3,3,5))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.FLOAT,(3,3,5))]'
}
operators['If'] = {
'nodes':['helper.make_node("If", ["0"], ["1"], else_branch=sub_graph, then_branch=sub_graph)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.BOOL, [1]), helper.make_tensor_value_info("2", TensorProto.BOOL, [1])]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.BOOL, [1])]',
'declarations':['sub_graph = helper.make_graph([helper.make_node("And", ["0", "2"], ["1"])], "graph", inputs, outputs)']
}
operators['InstanceNormalization'] = {
'nodes':['helper.make_node("InstanceNormalization",["x","s","bias"],["y"],epsilon=1e-05)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(3,4,2,2)), helper.make_tensor_value_info("s",TensorProto.FLOAT,(3,)), helper.make_tensor_value_info("bias",TensorProto.FLOAT,(3,))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(3,4,2,2))]'
}
operators['IsInf'] = {
'nodes':['helper.make_node("IsInf",["X"],["Y"],detect_negative=1,detect_positive=1)'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(1,))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.BOOL,(1,))]'
}
operators['IsNaN'] = {
'nodes':['helper.make_node("IsNaN",["X"],["Y"])'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(1,))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.BOOL,(1,))]'
}
operators['LRN'] = {
'nodes':['helper.make_node("LRN",["x"],["y"],alpha = 0.0001,beta = 0.75,bias = 1.0,size = 3)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4,5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,4,5))]'
}
operators['LSTM'] = {
'nodes':['helper.make_node("LSTM",["x","w","r"],["y"],activations=["sigmoid","tanh","relu"],direction="reverse",hidden_size=3)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(3,3,2)),helper.make_tensor_value_info("w",TensorProto.FLOAT,(1,4*3,2)),helper.make_tensor_value_info("r",TensorProto.FLOAT,(1,4*3,3))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(3,1,3,3))]'
}
operators['LeakyRelu'] = {
'nodes':['helper.make_node("LeakyRelu",["x"],["y"],alpha=0.1)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,))]'
}
operators['Less'] = {
'nodes':['helper.make_node("Less",["x","y"],["z"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,)),helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,))]',
'outputs':'[helper.make_tensor_value_info("z",TensorProto.BOOL,(1,))]'
}
operators['Log'] = {
'nodes':['helper.make_node("Log",["x"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,))]'
}
operators['LogSoftmax'] = {
'nodes':['helper.make_node("LogSoftmax",["x"],["y"],axis=1)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3))]'
}
operators['Loop'] = {
'nodes':['helper.make_node("Loop", ["0", "", "1", "3"], ["2"], body=graph)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.INT64, (1,)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (4, 5)),helper.make_tensor_value_info("3", TensorProto.FLOAT, (4, 5))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (4, 5))]',
'declarations': ['graph = helper.make_graph([helper.make_node("Add", ["1", "3"], ["2"])], "graph", inputs, outputs)']
}
operators['LpNormalization'] = {
'nodes':[' helper.make_node("LpNormalization", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['LpPool'] = {
'nodes':[' helper.make_node("LpPool", ["0"], ["1"], kernel_shape=[2])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4, 1))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['MatMul'] = {
'nodes':['helper.make_node("MatMul", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 4, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 3))]'
}
operators['MatMulInteger'] = {
'nodes':['helper.make_node("MatMulInteger", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.INT64, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.INT64, (2, 4, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.INT64, (2, 3, 3))]'
}
operators['Max'] = {
'nodes':['helper.make_node("Max", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['MaxPool'] = {
'nodes':[' helper.make_node("MaxPool", ["0"], ["1"], kernel_shape=[2])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 3, 32))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 3, 31))]'
}
operators['MaxRoiPool'] = {
'nodes':[' helper.make_node("MaxRoiPool", ["0","1"], ["2"], pooled_shape=[4,5])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4, 5)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (3,5))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (3, 3, 1, 1))]'
}
operators['MaxUnpool'] = {
'nodes':[' helper.make_node("MaxUnpool", ["0","1"], ["2"], kernel_shape=[2])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4, 5)),helper.make_tensor_value_info("1", TensorProto.INT64, (2, 3, 4, 5))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4, 5))]'
}
operators['Mean'] = {
'nodes':['helper.make_node("Mean", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Min'] = {
'nodes':['helper.make_node("Min", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Mod'] = {
'nodes':[' helper.make_node("Mod", ["0","1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Mul'] = {
'nodes':[' helper.make_node("Mul", ["0","1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Multinomial'] = {
'nodes':[' helper.make_node("Multinomial", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3))]'
}
operators['Neg'] = {
'nodes':[' helper.make_node("Neg", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['NonMaxSuppression'] = {
'nodes':[' helper.make_node("NonMaxSuppression", ["0","1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (4, 3))]'
}
operators['NonZero'] = {
'nodes':[' helper.make_node("NonZero", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Not'] = {
'nodes':['helper.make_node("Not", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.BOOL, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.BOOL, (2, 3, 4))]'
}
operators['OneHot'] = {
'nodes':['helper.make_node("OneHot", ["0", "1","2"], ["3"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (10,)), helper.make_tensor_value_info("2", TensorProto.FLOAT, (1, 3))]',
'outputs':'[helper.make_tensor_value_info("3", TensorProto.FLOAT, (3, 4, 5))]'
}
operators['Or'] = {
'nodes':['helper.make_node("Or", ["0","1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.BOOL, (2, 3, 4)),helper.make_tensor_value_info("1", TensorProto.BOOL, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.BOOL, (2, 3, 4))]'
}
operators['PRelu'] = {
'nodes':[' helper.make_node("PRelu", ["0","1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['QLinearConv'] = {
'nodes':['helper.make_node("QLinearConv",["x", "x_scale", "x_zero_point", "w", "w_scale", "w_zero_point", "y_scale", "y_zero_point"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("x_scale", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("x_zero_point", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("w", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("w_scale", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("w_zero_point", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("y_scale", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("y_zero_point", TensorProto.FLOAT, (2,3,4))]',
'outputs':'[helper.make_tensor_value_info("y", TensorProto.FLOAT, (2,3,4))]'
}
operators['Pow'] = {
'nodes':['helper.make_node("Pow",["X","Y"],["Z"],)'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(2,3,4)), helper.make_tensor_value_info("Y", TensorProto.FLOAT,(2,3,4))]',
'outputs':'[helper.make_tensor_value_info("Z",TensorProto.FLOAT,(2,3,4))]'
}
operators['QLinearMatMul'] = {
'nodes':['helper.make_node("QLinearMatMul",["a", "a_scale", "a_zero_point", "b", "b_scale", "b_zero_point", "y_scale", "y_zero_point"],["y"],)'],
'inputs':'[helper.make_tensor_value_info("a", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("a_scale", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("a_zero_point", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("b", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("b_scale", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("b_zero_point", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("y_scale", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("y_zero_point", TensorProto.FLOAT, (2,3,4))]',
'outputs':'[helper.make_tensor_value_info("y", TensorProto.FLOAT, (2,3,4))]'
}
operators['QuantizeLinear']= {
'nodes':['helper.make_node("QuantizeLinear",["x","y_scale","y_zero_point"],["y"])'],
'inputs':'''[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4)),
helper.make_tensor_value_info("y_scale",TensorProto.FLOAT,(2,3,4)),
helper.make_tensor_value_info("y_zero_point",TensorProto.FLOAT,(2,3,4))]''',
'outputs':'[helper.make_tensor_value_info("y", TensorProto.FLOAT, (2,3,4))]'
}
operators['RNN'] = {
'nodes':['helper.make_node("RNN",["X","W","R","B"],["","Y"])'],
'inputs':'[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("W", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("R", TensorProto.FLOAT, (2,3,4)),\
helper.make_tensor_value_info("B", TensorProto.FLOAT, (2,3,4))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.FLOAT,(1,2))]'
}
operators['Reciprocal'] = {
'nodes':['helper.make_node("Reciprocal",["X"],["Y"],)'],
'inputs':'[helper.make_tensor_value_info("X",TensorProto.FLOAT,(1,2))]',
'outputs':'[helper.make_tensor_value_info("Y",TensorProto.FLOAT,(2,3))]'
}
operators['ReduceL1'] = {
'nodes':['helper.make_node("ReduceL1",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['RandomUniformLike']={
'nodes':['helper.make_node("RandomUniformLike",inputs=["x"],outputs=["y"],low=0.0,high=1.0,dtype=TensorProto.FLOAT)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,2,))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,2,))]'
}
operators['ReduceL2'] = {
'nodes':['helper.make_node("ReduceL2",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['ReduceLogSum'] = {
'nodes':['helper.make_node("ReduceLogSum",["D"],["R"],axes=[0,1],keepdims=0)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2))]'
}
operators['ReduceLogSumExp'] = {
'nodes':['helper.make_node("ReduceLogSumExp",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['RandomUniform']={
'nodes':['helper.make_node("RandomUniform",inputs=["x"],outputs=["y"],low=0.0,high=1.0,dtype=TensorProto.FLOAT)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,2,))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,2,))]'
}
operators['ReduceMax'] = {
'nodes':['helper.make_node("ReduceMax",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['ReduceMean'] = {
'nodes':['helper.make_node("ReduceMean",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['Pad'] = {
'nodes':['helper.make_node("Pad",["x"],["y"],mode="constant",pads=[0, 0, 1, 3, 0, 0, 2, 4])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1, 3, 4, 5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1, 3, 7, 12))]'
}
operators['RandomNormalLike']={
'nodes':['helper.make_node("RandomNormalLike",inputs=["x"],outputs=["y"],mean=0.0,scale=1.0)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1, 2))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1, 2))]'
}
operators['ReduceMin'] = {
'nodes':['helper.make_node("ReduceMin",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['RandomNormal']={
'nodes':['helper.make_node("RandomNormal",inputs=["x"],outputs=["y"],mean=0.0,scale=1.0)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,2,))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,2,))]'
}
operators['ReduceProd'] = {
'nodes':['helper.make_node("ReduceProd",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['ReduceSum'] = {
'nodes':['helper.make_node("ReduceSum",["D"],["R"],axes=[0],keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]',
'outputs':'[helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2,3))]'
}
operators['ReduceSumSquare'] = {
'nodes':['helper.make_node("ReduceSumSquare", ["0"], ["1"], keepdims=1)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (3, 2, 2))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 1, 1))]'
}
operators['Relu'] = {
'nodes':['helper.make_node("Relu",["x"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,4))]'
}
operators['Reshape'] = {
'nodes':['helper.make_node("Reshape", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("1", TensorProto.INT64, (2,))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (3, 8))]'
}
operators['Resize'] = {
'nodes':['helper.make_node("Resize", ["0", "1"], ["2"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 1, 2, 4)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (4,))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (1, 1, 1, 2))]'
}
operators['ReverseSequence'] = {
'nodes':['helper.make_node("ReverseSequence", ["0", "1"], ["2"], batch_axis=1, time_axis=0)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (4, 4)), helper.make_tensor_value_info("1", TensorProto.INT64, (4,))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (4, 4))]'
}
operators['RoiAlign'] = {
'nodes':['helper.make_node("RoiAlign", ["0", "1", "2"], ["3"], output_height=5, output_width=5, sampling_ratio=2)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 1, 10, 10)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (3, 4)), helper.make_tensor_value_info("2", TensorProto.INT64, (3,))]',
'outputs':'[helper.make_tensor_value_info("3", TensorProto.FLOAT, (3, 1, 5, 5))]'
}
operators['Round'] = {
'nodes': ['helper.make_node("Round", inputs=["0"], outputs=["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 4))]'
}
operators['Scan'] = {
'nodes':['onnx.helper.make_node("Scan",inputs=["initial", "x"],outputs=["y", "z"],num_scan_inputs=1,body=scan_body)'],
'inputs':'[helper.make_tensor_value_info("initial",TensorProto.FLOAT,(1,2)), helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,3,2))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,2)), helper.make_tensor_value_info("z",TensorProto.FLOAT,(1,3,2))]',
'declarations':[
'sum_in = onnx.helper.make_tensor_value_info("sum_in", onnx.TensorProto.FLOAT, [2])',
"next = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])",
"sum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])",
"scan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])",
"add_node = onnx.helper.make_node('Add',inputs=['sum_in', 'next'],outputs=['sum_out'])",
"id_node = onnx.helper.make_node('Identity',inputs=['sum_out'],outputs=['scan_out'])",
"scan_body = onnx.helper.make_graph([add_node, id_node],'scan_body',[sum_in, next],[sum_out, scan_out])"]
}
operators['Scatter'] = {
'nodes':["onnx.helper.make_node('Scatter',inputs=['data', 'indices', 'updates'],outputs=['y'],axis=1)"],
'inputs':'[helper.make_tensor_value_info("data",TensorProto.FLOAT,(1,6)), helper.make_tensor_value_info("indices",TensorProto.INT32,(1,3)), helper.make_tensor_value_info("updates",TensorProto.FLOAT,(1,2))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,5))]'
}
operators['Selu'] = {
'nodes':["onnx.helper.make_node('Selu',inputs=['x'],outputs=['y'],alpha=2.0,gamma=3.0)"],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,3))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,3))]'
}
operators['Shape'] = {
'nodes':["onnx.helper.make_node('Shape',inputs=['x'],outputs=['y'])"],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(3,4,5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.INT32,(3,4,5))]'
}
operators['Sigmoid'] = {
'nodes':['helper.make_node("Sigmoid", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Sin'] = {
'nodes':['helper.make_node("Sin", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Shrink'] = {
'nodes':['helper.make_node("Shrink", ["0"], ["1"], bias = 0.0 , lambd = 0.5)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Size'] = {
'nodes':['helper.make_node("Size", ["0"] , ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Slice'] = {
'nodes':['''helper.make_node("Slice",["data",
"starts",
"ends",
"axes",
"steps"],["output"],
)'''],
'inputs':'''[helper.make_tensor_value_info("data", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("starts", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("ends", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("axes", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("steps", TensorProto.FLOAT, (2, 3, 4))]''',
'outputs':'[helper.make_tensor_value_info("output", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Sign'] = {
'nodes':['helper.make_node("Sign", ["0"] , ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Sinh'] = {
'nodes':['helper.make_node("Sinh", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Softmax'] = {
'nodes':['helper.make_node("Softmax",["x"],["y"],axis=1)'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,4))]'
}
operators['Softplus'] = {
'nodes':['helper.make_node("Softplus",["x"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(2,3,4))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(2,3,4))]'
}
operators['Sub'] = {
'nodes':['helper.make_node("Sub", ["A","B"], ["C"])'],
'inputs':'[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4))], [helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Tan'] = {
'nodes':['helper.make_node("Tan", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (4, 5, 6))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (4, 5, 6))]'
}
operators['Tanh'] = {
'nodes':['helper.make_node("Tanh", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3))]'
}
operators['Sqrt'] = {
'nodes':['helper.make_node("Sqrt", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (4, 5, 6))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (4, 5, 6))]'
}
operators['Squeeze'] = {
'nodes':['helper.make_node("Squeeze", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1,3,4,5))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1,3,4,5))]'
}
operators['Unsqueeze'] = {
'nodes':['helper.make_node("Unsqueeze", ["0"], ["1"], axes=[])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (3,4,5))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (3,4,5))]'
}
operators['Sum'] = {
'nodes':['helper.make_node("Sum", ["0", "1"], ["3"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (4,3,2)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (4,3,2))]',
'outputs':'[helper.make_tensor_value_info("3", TensorProto.FLOAT, (4,3,2))]'
}
operators['Xor'] = {
'nodes':['helper.make_node("Xor", ["x", "y"], ["z"])'],
'inputs':'[helper.make_tensor_value_info("x", TensorProto.BOOL, (2, 3)), helper.make_tensor_value_info("y", TensorProto.BOOL, (2, 3))]',
'outputs':'[helper.make_tensor_value_info("z", TensorProto.BOOL, (2, 3))]'
}
operators['Softsign'] = {
'nodes':['helper.make_node("Softsign",["x"],["y"])'],
'inputs':'[helper.make_tensor_value_info("x",TensorProto.FLOAT,(3,4,5))]',
'outputs':'[helper.make_tensor_value_info("y",TensorProto.FLOAT,(3,4,5))]'
}
operators['ThresholdedRelu'] = {
'nodes':[' helper.make_node("ThresholdedRelu", ["0"], ["1"], alpha=2.0)'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (3,4,5))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (3,4,5))]'
}
operators['Upsample'] = {
'nodes':['helper.make_node("Upsample", ["0","1"], ["2"], mode="nearest")'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2,2)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (1,4))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (4,6))]'
}
operators['TopK'] = {
'nodes':[' helper.make_node("TopK", ["0","1"], ["2","3"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2,3,4)), helper.make_tensor_value_info("1", TensorProto.INT64, (1,1))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2,3,4)), helper.make_tensor_value_info("3", TensorProto.FLOAT, (2,3,4))]'
}
operators['Split'] = {
'nodes':[' helper.make_node("Split", ["0"], ["1","2"], axis=1) '],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2,6)), helper.make_tensor_value_info("1", TensorProto.FLOAT, (2,3))]',
'outputs':'[helper.make_tensor_value_info("2", TensorProto.FLOAT, (2,3))]'
}
operators['Transpose'] = {
'nodes':['helper.make_node("Transpose", ["A"],["B"])'],
'inputs': '[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("B", TensorProto.FLOAT, (3, 2, 4))]'
}
operators['Where'] = {
'nodes':['helper.make_node("Where", ["A","B","C"], ["D"])'],
'inputs': '[helper.make_tensor_value_info("A", TensorProto.BOOL, (2, 3, 4)), helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4))]',
'outputs':'[helper.make_tensor_value_info("D", TensorProto.FLOAT, (2, 3, 4))]'
}
operators['Tile'] = {
'nodes':['helper.make_node("Tile", ["A","B"],["C"])'],
'inputs': ' [helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)), helper.make_tensor_value_info("B", TensorProto.INT64, (2, 2))]',
'outputs':'[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4)*2)]'
}
operators['StringNormalizer'] = {
'nodes': ['helper.make_node("StringNormalizer", ["0"], ["1"])'],
'inputs':'[helper.make_tensor_value_info("0", TensorProto.STRING, (1, 4))]',
'outputs':'[helper.make_tensor_value_info("1", TensorProto.STRING, (1, 3))]'
}
operators['TfIdfVectorizer'] = {
'nodes': ['helper.make_node("TfIdfVectorizer", ["0"], ["1"], mode="TF", min_gram_length=2, max_gram_length=2, max_skip_count=0, ngram_counts=ngram_counts, ngram_indexes=ngram_indexes, pool_int64s=pool_int64s)'],
'inputs': '[helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 6))]',
'outputs': '[helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 7))]',
'declarations': ['ngram_counts = np.array([0, 4]).astype(np.int64)',
'ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)',
'pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(np.int64)']
}
operators['SpaceToDepth'] = {
'nodes':['helper.make_node("SpaceToDepth", ["0"], ["1"], blocksize=2)'],'inputs':'[helper.make_tensor_value_info("0", TensorProto.FLOAT, (1, 1, 4, 6))]','outputs':'[helper.make_tensor_value_info("1", TensorProto.FLOAT, (1, 4, 2, 3))]'
}
for operator in operators.keys():
operator_info = operators[operator]
declarations = []
if 'declarations' in operators[operator].keys():
declarations = operators[operator]['declarations']
node_params = []
if 'node_params' in operators[operator].keys():
node_params = operators[operator]['node_params']
create_testcase(operator, operator_info['inputs'], operator_info['outputs'], operator_info['nodes'], declarations)
<file_sep># Developer's getting started guide
## Contents
* **[Abstract](#abstract)**
* **[Setting up repository](#setting-up-repository)**
* **[Add new operators](#add-new-operators)**
* **[Why use Eigen](#why-use-eigen)**
* **[Add documentation for the operators](#add-documentation-for-the-operators)**
* **[Add operators in python interface](#add-operators-in-python-interface)**
* **[Add unittests for operator testing](#add-unittests-for-operator-testing)**
* **[Work-Flow (Usage and commands for updating code)](#work-flow)**
* **[Pull latest updates](#pull-latest-updates)**
* **[Create pull request](#create-pull-request)**
## Abstract
#### Our work is divided in 5 parts:
1. Implementing these **[ONNX operators](https://github.com/onnx/onnx/blob/rel-1.5.0/docs/Operators.md)**.
* We are working on **[dnnc-operators](https://github.com/ai-techsystems/dnnc-operators)** repo in C++. Our development status is mentioned **[here](https://github.com/ai-techsystems/dnnc-operators/blob/master/README.md)**
* You can study [Matmul.h](https://github.com/ai-techsystems/dnnc-operators/blob/master/include/operators/MatMul.h), [Add.h](https://github.com/ai-techsystems/dnnc-operators/blob/master/include/operators/Add.h) and [Threshholdrelu.h](https://github.com/ai-techsystems/dnnc-operators/blob/master/include/operators/ThresholdedRelu.h) to get a demo of what we are trying to achieve.
* Here is a **[Tutorial](https://dritchie.github.io/csci2240/assignments/eigen_tutorial.pdf)** for eigen library.
* Here is more **[indepth discussion of philosophy and features of Eigen](http://downloads.tuxfamily.org/eigen/eigen_CGLibs_Giugno_Pisa_2013.pdf)**
* Also check out **[Eigen documentation](http://eigen.tuxfamily.org/dox/.)**
2. Test the above **[ONNX operators](https://github.com/onnx/onnx/blob/rel-1.5.0/docs/Operators.md)**.
* Here are the **[test cases](https://github.com/ai-techsystems/dnnc-operators/tree/master/src/operators)**
* After making this branch stable it will be merged in the official repo **[dnnCompiler](https://github.com/ai-techsystems/dnnCompiler/tree/operators)**.
3. Add documentation to the operators with the help of **[Doxygen](http://www.doxygen.nl/index.html)**.
* Here is a **[tutorial for Doxygen](https://www.youtube.com/watch?v=44Ja2X_fzv4)**
4. Implement SWIG for python interface.
* Here is a **[SWIG Tutorial](http://www.swig.org/tutorial.html)**.
* DNNC [operators](https://github.com/ai-techsystems/dnnc-operators/tree/master/include/operators) and tensors should be implemented for the python interface with **[SWIG](http://www.swig.org/exec.html)**.
* To understand how we are wrapping operators written in cpp, with python see [usage](#usage) guide below.
* Check out **[Numpy](https://docs.scipy.org/doc/numpy/reference/)** for the implementation of our tensor and it's simplicity.
5. Test the operators with python unittest.
* Here is the guide of **[python unittest](https://docs.python.org/3/library/unittest.html)**.
* For reference go to **[test / swig](https://github.com/ai-techsystems/dnnCompiler/tree/master/test/swig)** and see [MatMul.py](https://github.com/ai-techsystems/dnnCompiler/blob/master/test/swig/MatMul.py), [Exp.py](https://github.com/ai-techsystems/dnnCompiler/blob/master/test/swig/Exp.py) to get a demo.
## Setting up repository
### Forking:
* Go to **[dnnCompiler](https://github.com/ai-techsystems/dnnCompiler)**
* Click **Fork** to your own repository.
- This will take 10 sec or so.
- Now you will be redirected to a copy of **dnnCompiler** under your username
- And it will be written :
> your_username/dnnCompiler
> forked from ai-techsystems/dnnCompiler
* Choose active development branch (e.g. `operators`), click on the **Clone or Download button** and copy the link.
- It will look like (https://github.com/your_username/dnnCompiler.git)
* Choose active development Go to your terminal and go to any directory under which you want to clone the repo and open terminal.
- Paste the link you copied after typing `git clone `. It will look like this :
```console
git clone --single-branch -b operators https://github.com/your_username/dnnCompiler.git
```
### Changing branch
- Go inside the repo
```console
cd dnnCompiler
```
* Now you will be inside the repository.
- Check how many branches this repository has.
```console
git branch -r
```
- You will see something like:
```bash
origin/HEAD -> origin/master
origin/master
origin/operators
```
- Check on which branch you are currently on
```console
git branch
```
- You will see something like:
```bash
* master
operators
```
- The `*` shows your current branch.
- Change the branch to the operators as all the newer development is done on that branch.
```console
git checkout operators
```
- You will see something like
```bash
Switched to a new branch 'operators'
Branch 'operators' set up to track remote branch 'operators' from 'origin'.
```
- Now if you do
```console
git branch
```
- You will see:
```bash
master
* operators
```
- Now you are on operators branch.
#### Add synchronization steps to get latest updates from `AITS dnnCompiler`
* Now you will have to setup your repo so that it can sync new updates from the original **dnnCompiler** repo under **AITS**. As there will be other developers working on that. To do that you have to set **dnnCompiler** repo of **AITS** as an **upstream**.
- Add a remote upstream of the original **dnnCompiler** (You only need to do this upstream setup once! But **fetching** and **merging** should be done everytime)
```console
git remote add upstream https://github.com/ai-techsystems/dnnCompiler
```
- This will add original **dnnCompiler** as upstream.
#### Update code
* Now you are set to change and update your code.
## Add new operators
This is a tutorial for adding new operator implementation in C++ using Eigen and Swig for interface to Python. Video explains in more detail how implementation is carried out for each operator.
1. Create header file (.h) in **[include / operators](https://github.com/ai-techsystems/dnnCompiler/tree/master/include/operators)** (_see other files for example_)
2. Create test file (.cpp) in **[src / operators](https://github.com/ai-techsystems/dnnCompiler/tree/master/src/operators)** (_see other files for example_)
3. Compile and run .cpp file.
For reference look at this tutorial, and just watch till **8:33 minutes**, as after that he shows how to add them in swig, but the process of adding the operators in the swig has changed to a much easier convenient way.
[<img src="https://img.youtube.com/vi/2CITO2SEAfE/maxresdefault.jpg" width="100%">](https://youtu.be/2CITO2SEAfE)
---
## Why use Eigen
#### Below is a snippet code only for **2D**. One uses Eigen, and another just uses loop.
<details>
<summary>With Eigen</summary>
```cpp
tensor<T> eigen_compute(tensor<T> &a, tensor<T> &b){
if (a.shape() != b.shape())
throw std::invalid_argument(
"tensor dimenions not appropriate for Div operator.");
if (a.rank() == 2 && b.rank() == 2) {
tensor<T> result(a.shape()[0], b.shape()[1]);
DNNC_EIGEN_MATRIX(eigenMatrixA, a);
DNNC_EIGEN_MATRIX(eigenMatrixB, b);
Matrix<T, Dynamic, Dynamic, RowMajor> eResult =
eigenMatrixA.array() / eigenMatrixB.array();
result.load(eResult.data());
return result;
}
return tensor<T>();
}
```
</details>
<details>
<summary>Without Eigen</summary>
```cpp
tensor<T> without_eigen_compute(tensor<T> &a, tensor<T> &b) {
if (a.shape() != b.shape())
throw std::invalid_argument(
"tensor dimenions not appropriate for Div operator.");
tensor<T> result(a.shape(), a.name());
for (size_t i = 0; i < a.length(); i++)
result[i] = a[i] / b[i];
return result;
}
```
</details>
### Now let's see the performance
<details>
<summary>Random array generation funtion</summary>
```cpp
void generate_random(float* a,int size){
srand(time(0));
int i;
for (i=0;i<size;i++){
a[i]=rand();
}
}
```
</details>
#### Going with relatively small matrix
<details>
<summary>Small matrix input</summary>
```cpp
int main() {
float d1[100],d2[100];
generate_random(d1,100);
generate_random(d2,100);
tensor<float> a(10, 10);
a.load(d1);
tensor<float> b(10, 10);
b.load(d2);
Div<float> m("localOpName");
clock_t t;
t = clock();
auto result_1 = m.without_eigen_compute(a, b);
t = clock() - t;
double time_taken_1 = ((double)t)/CLOCKS_PER_SEC;
t = clock();
auto result_2 = m.eigen_compute(a, b);
t = clock() - t;
double time_taken_2 = ((double)t)/CLOCKS_PER_SEC;
std::cout << time_taken_1 << " seconds took without eigen " << std::endl;
std::cout << time_taken_2 << " seconds took with eigen" << std::endl;
return 0;
}
```
</details>
##### Here Eigen is **~10x** faster than looping
#### Going with relatively large matrix
<details>
<summary>Large matrix input</summary>
```cpp
int main() {
float d1[1000000],d2[1000000];
generate_random(d1,1000000);
generate_random(d2,1000000);
tensor<float> a(1000, 1000);
a.load(d1);
tensor<float> b(1000, 1000);
b.load(d2);
Div<float> m("localOpName");
clock_t t;
t = clock();
auto result_1 = m.without_eigen_compute(a, b);
t = clock() - t;
double time_taken_1 = ((double)t)/CLOCKS_PER_SEC;
t = clock();
auto result_2 = m.eigen_compute(a, b);
t = clock() - t;
double time_taken_2 = ((double)t)/CLOCKS_PER_SEC;
std::cout << time_taken_1 << " seconds took without eigen " << std::endl;
std::cout << time_taken_2 << " seconds took with eigen" << std::endl;
return 0;
```
</details>
##### Here Eigen is **~2x** faster than looping
#### Eigen is excellent in memory handling and efficiency, rather than us looping through the tensor.
## Add documentation for the operators
This is a tutorial for documenting your operator implementation in C++.
We will be using [Doxygen](http://www.doxygen.nl/index.html) for our documentation purpose.
Install doxygen in your system by following this [tutorial](https://www.youtube.com/watch?v=44Ja2X_fzv4).
Here's how to run doxygen.
```console
doxygen doxygen.cfg
```
This will create a 'docs' folder outside your local repo folder
Search for 'index.html' in docs/html and run it on your browser.
#### Steps to follow for documentation
1. This is how we to put documentation for the operator class. Notice the '!' i the comment box.
```cpp
/*! <Put your operator description here>
...
*/
template <typename T> class <operator> : public baseOperator<T> {
...
};
```
2. Here's how you can put formulas in your operator [link](http://www.doxygen.nl/manual/formulas.html).
We will be using MathJax so no need to installing LaTeX in your system. [You can use this site to help generate LaTex code](https://www.codecogs.com/latex/eqneditor.php).
```cpp
/*! \f$ \max (0,\min(1,alpha*x+beta)) \f$
*/
template <typename T> class HardSigmoid : public baseOperator<T> {
```
3. You can implement all your member functions and protected attributes
Here's a full [manual](http://www.doxygen.nl/manual/docblocks.html#cppblock) for documentation using doxygen.
I will be giving quick examples to document attributes and member functions.
Notice the '!<' i the comment box. Attributes-
```cpp
float epsilon = 1e-05; /*!< In case variance goes to zero and to avoid division by zero. */
```
Member functions- documenting the inputs and outputs
```cpp
tensor<T> compute(tensor<T> &input /*!< [float,double]: ND tensor of shape ( NxCxD1xD2…Dk ).*/){
...
}
/*!<
\return The output tensor of the same shape as input.
*/
```
Note that this is only for class members. For documenting non-members and static members see point 1
You can look at **[include / operators / InstanceNormalization.h](../include/operators/InstanceNormalization.h)** for a full example.
You might want to delete the docs folder outside your local repo after work.
## Add operators in python interface
#### Operator Interface Automation:
We are currently automating the `dnnc.i` and `dnnc_api.cpp` file, to save you some time, and repeatative works.
In the process of automation we will be needing two files,
* **[swig / dnnc.api](../swig/dnnc.api)** (pseudo cpp/python file which you will be adding your opearators in)
* **[swig / op_gen.py](../swig/op_gen.py)** (which will generate `dnnc_swig_externs.h` and `dnnc_api.cpp` file from the above `dnnc.api` file)
#### op_gen.py is integrated in Makefile, so running make at the top-level or in [swig /](../swig) will generate required files.
* So here is the **[Guide](#guide)** to follow while writing **dnnc.api**, there are some examples shown below.
* After adding your operator inside dnnc.api, run make clean to clean previous compilations
```console
make clean
```
* Then run make again, to compile it with your addition.
```console
make
```
- This will generate the required swig files and compile them so that we can use them from pyhton interface too.
##### Explicit Usage of automation:
```console
python op_gen.py
```
#### I have tried to pick and write some diverse examples below to give you an idea how the `dnnc.api` file will look like.
---
##### MatMul and Add operators has input and output of same dtypes
```cpp
tensor<output> matmul(tensor<input> &a, tensor<input> &b) {
MatMul<input> op;
return op.compute(a, b);
dtype = {
"float" : "float",
"int" : "int"
}
}
tensor<output> add(tensor<input> &a, tensor<input> &b) {
Add<input> op;
return op.compute(a, b);
dtype = {
"float" : "float",
"int" : "int"
}
}
```
---
##### DequantizeLinear takes b tensor as float, and it's fixed, so declared the b tensor as `<float>`, instead of `<input>`
```cpp
tensor<output> dequantize_linear(tensor<input> &a, tensor<float> &b, tensor<input> &c) {
DequantizeLinear<input> op;
return op.compute(a, b, c);
dtype = {
"float" : "int"
}
}
```
---
##### Elu has fixed input and output, `<float>` only, either you can write `<float>` instead of `<input>` and `<output>`, or specify dtype, both works.
```cpp
tensor<output> elu(tensor<input> &a, float alpha = 1.0) {
Elu<input> op("localOpName", alpha);
return op.compute(a);
dtype = {
"float" : "float"
}
}
```
---
##### Equal only outputs in `<bool>`
```cpp
tensor<output> equal(tensor<input> &a, tensor<input> &b) {
Equal<input> op;
return op.compute(a, b);
dtype = {
"bool" : "bool",
"bool" : "int",
"bool" : "float"
}
}
```
---
##### This should give you a rough idea how the dnnc.api file will look like. If you like to see the whole picture, see below
<details>
<summary>Example</summary>
```cpp
tensor<output> matmul(tensor<input> &a, tensor<input> &b) {
MatMul<input> op;
return op.compute(a, b);
dtype = {
"float" : "float",
"int" : "int"
}
}
tensor<output> add(tensor<input> &a, tensor<input> &b) {
Add<input> op;
return op.compute(a, b);
dtype = {
"float" : "float",
"int" : "int"
}
}
tensor<output> dequantize_linear(tensor<input> &a, tensor<float> &b, tensor<input> &c) {
DequantizeLinear<input> op;
return op.compute(a, b, c);
dtype = {
"float" : "int"
}
}
tensor<output> elu(tensor<input> &a, float alpha = 1.0) {
Elu<input> op("localOpName", alpha);
return op.compute(a);
dtype = {
"float" : "float"
}
}
tensor<output> equal(tensor<input> &a, tensor<input> &b) {
Equal<input> op;
return op.compute(a, b);
dtype = {
"bool" : "float",
"bool" : "int",
"bool" : "bool"
}
}
```
</details>
### Guide :
* Everything except **dtype** block is a cpp block, and **dtype** is a python dictionary which contains all kinds of input output datatype combination possible for the operators:
```python
dtype = {
"output1" : "input1",
"output2" : "input2",
"output2" : "input1",
...
}
```
* Everything inside `dnnc.api` is **whitespace** and **newline** sensitive, so try to keep the structure similar.
* Make sure to add a blank line between 2 operators.
* Don't leave any blank lines inside operators' functions.
* Don't leave more than one blank line anywhere.
* Use comment syntax (`/*` or `*/`) in the same line as the code. See the example below
```cpp
tensor<output> less_equal(tensor<input> &a, tensor<input> &b) {
LessEqual<input> op;
return op.compute(a, b);
dtype = {
"bool" : "bool",
"bool" : "int",
"bool" : "float",
"bool" : "double"
}
}
/* The below operators need to change accroding to above operators */
tensor<float> thresholded_relu(tensor<float> &a) {
ThresholdedRelu<float> op;
return op.compute(a);
}
/* tensor<output> logical_xor(tensor<input> &a, tensor<input> &b) {
Xor<input> op;
return op.compute(a, b);
dtype = {
"bool" : "double",
"bool" : "float",
"bool" : "bool",
"bool" : "int"
}
} */
tensor<output> transpose(tensor<input> &a) {
Transpose<input> op;
return op.compute(a);
dtype = {
"double" : "double",
"float" : "float",
"int" : "int",
"bool" : "bool"
}
}
```
## Add unittests for operator testing
#### Test Case Automation:
##### We have created 2 files which will keep track of our operators, which passes or fails the test cases:
* **[test / swig / passingTests.txt](../test/swig/passingTests.txt)**
* **[test / swig / failingTests.txt](../test/swig/failingTests.txt)**
##### We have created 2 python scripts to run the tests at ease:
* **[test / run_all.py](../test/run_all.py)** (It will run all the testcases mentioned on the `passingTests.txt`)
* **[test / run_one.py](../test/run_one.py)** (It will run only one testcase opearator at a time)
##### Why do we need them?
In a distant future in dnnCompiler development, we will come at a point, when pull request can only be done when the make command builds successfully. Currently in top level make, the `run_all.py` is already implemented. You can check that with command
```console
make TEST
```
This will help us to get rid of the tension when it comes to merging a update, whether the update will break the functionality or not.
#### How to add your unittest
* Go to **[test / swig /](https://github.com/ai-techsystems/dnnCompiler/tree/operators/test/swig)**
* Here are all the **[python unittest](https://docs.python.org/3/library/unittest.html)** files. Go add yours too by looking at others as demo.
* you can run them by (if your operator name is MatMul.py)
```console
python MatMul.py
```
## Work-FLow
* For adding new opeartors you have add your code in as mentioned in **[Add new operators](#add-new-operators)**
* **[include / operators /](https://github.com/ai-techsystems/dnnCompiler/tree/master/include/operators)** (The .h file)
* **[src / operators /](https://github.com/ai-techsystems/dnnCompiler/tree/master/src/operators)** (The .cpp file)
* Now to wrap them in python interface go to **[swig / ](https://github.com/ai-techsystems/dnnCompiler/tree/operators/swig)** folder
* Look for a file named **[dnnc.api](https://github.com/ai-techsystems/dnnCompiler/blob/master/swig/dnnc.api)**
* It's a pseudo (cpp/python) code. There are some things which you need to remember before adding your operator in this file. Head towards **[guide](#guide-)** section to learn how to add your operator inside **[dnnc.api](https://github.com/ai-techsystems/dnnCompiler/blob/master/swig/dnnc.api)** file.
* After that, run **make** command followed by a **make clean** in the same directory.
```console
make clean
```
```console
make
```
* If everything went fine, go to **[test / swig /](https://github.com/ai-techsystems/dnnCompiler/tree/operators/test/swig)**
* Here are all the **[python unittest](https://docs.python.org/3/library/unittest.html)** files. Go add yours too by looking at others as demo.
* To test your **unittest** file, there are 2 ways.
- `Option 1`: Inside **[test / swig /](https://github.com/ai-techsystems/dnnCompiler/tree/operators/test/swig)** (If your operator is **Reciprocal.py**) run the following command:
```console
python Reciprocal.py
```
- `Option 2`: Inside **[test /](https://github.com/ai-techsystems/dnnCompiler/tree/operators/test/)** (If your operator is **Reciprocal.py**) run the following command:
```console
python run_one.py Reciprocal.py
```
* If your operator's unittest was successful, go to **[test / swig / passingTests.txt](https://github.com/ai-techsystems/dnnCompiler/blob/master/test/swig/passingTests.txt)** and append your operator's unittest name there, in a new line.
* If your operator's unittest was unsuccessful, go to **[test / swig / failingTests.txt](https://github.com/ai-techsystems/dnnCompiler/blob/master/test/swig/failingTests.txt)** and append your operator's unittest name there, in a new line.
* After that go to **[test /](https://github.com/ai-techsystems/dnnCompiler/tree/operators/test/)** and run the following command, which will run all the passing tests listed in the **[test / swig / passingTests.txt](https://github.com/ai-techsystems/dnnCompiler/blob/master/test/swig/passingTests.txt)**. If you added your operator there, your unittest will run too. Command:
```console
python run_all.py
```
- If everything goes well, you have successfully added your operator and integrated it with python.
## Pull latest updates
* If you don't want to keep any changes you made, and just pull the upstream, use this:
```console
git fetch upstream
```
* Followed by
```console
git reset --hard upstream/operators
```
* To read more, go to this **[StackOverflow link](https://stackoverflow.com/questions/1125968/how-do-i-force-git-pull-to-overwrite-local-files)**.
* If you want to keep your work, and pull update from upstream, follow below.
#### Backing up uncommitted work:
* First back up your current work:
```console
git stash
```
#### Pull latest updates from `AITS dnnCompiler`
* Remember you added Upstream while setting up your repo, we will be using that now. If you haven't done that yet, go to [this section](#add-synchronization-steps-to-get-latest-updates-from-aits-dnncompiler)
- To fetch the latest updates from the **dnnCompiler** repo from **AITS**, use
```console
git fetch upstream
```
- You will see something like
```bash
From https://github.com/ai-techsystems/dnnCompiler
* [new branch] master -> upstream/master
* [new branch] operators -> upstream/operators
```
* Now based on which branch you are currently on, you have to merge `origin/branch_name` with `upstream/branch_name`. **Origin** means your forked local repo, and **Upstream** means the original repo from **AITS** here.
#### Merging the update from upstream
* If you followed all previous steps, you will be currently on `origin/operators` branch.
* Now we will merge the upstream operators branch.
```console
git merge upstream/operators
```
- There can be 2 possibilities:
1. If you are already upto date, you will see something like this.
```bash
Already up to date.
```
2. If there was some updates from upstream repo, you will see somthing like this.
```bash
Updating 5e128bb..daa1019
Fast-forward
include/operators/Reciprocal.h | 19 +++++++++++++++++--
src/operators/Reciprocal.cpp | 13 +++++++++++++
swig/dnnc.api | 12 ++++++++++++
swig/dnnc_api.cpp | 15 +++++++++++++++
swig/dnnc_swig_externs.h | 4 ++++
5 files changed, 61 insertions(+), 2 deletions(-)
```
- Else every update will be merged from operators branch.
* We will not merge the `upstream/master` as it is not required, but if you want to do that too, follow the steps below.
- First change to master branch
```console
git checkout master
```
- If you did `git fetch` previously, don't bother to do that again, or do a `git fetch upstream`.
- Then merge master branch
```console
git merge upstream/master
```
- Now your master branch will also be updated, before you forget, go back to `operators` branch, as we will modify that only.
```console
git checkout operators
```
- Now both of your branches are synchronized with the latest update from **AITS dnnCompiler** repo.
* Now your repo is synchronized with the latest update from upstream. Now sync your forked repo with upstream. Till now you synced your local repo with upstream, but not published it in your github forked repo, to do that simply type
```console
git push
```
* Now everything is in sync.
#### Get uncomitted code back
* Now get back the local changes you saved earlier with `git stash` command.
```console
git stash pop
```
* Here 2 things can happen:
- Either it will merge your saved work with recent update automatically, which will say like this, and doesn't need attention:
```bash
On branch operators
Your branch is ahead of 'origin/operators' by 25 commits.
(use "git push" to publish your local commits)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
modified: docs/DeveloperGettingStartedGuide.md
no changes added to commit (use "git add" and/or "git commit -a")
```
- Or it will show conflict while merge like this, this **needs your attention**.
```bash
Auto-merging swig/dnnc_swig_externs.h
CONFLICT (content): Merge conflict in swig/dnnc_swig_externs.h
Auto-merging swig/dnnc_api.cpp
CONFLICT (content): Merge conflict in swig/dnnc_api.cpp
Auto-merging swig/dnnc.api
CONFLICT (content): Merge conflict in swig/dnnc.api
Auto-merging include/operators/Or.h
```
#### Resolve merge conflict issue
In the previous step, if you hava faced the **merge conflict**, this is what you need to do:
* See the above message, says you have conflict in 3 files. So if you open these 3 files, you will see something like this:
```cpp
<<<<<<< Updated upstream
#include "operators/Mod.h"
#include "operators/Mul.h"
#include "operators/Neg.h"
#include "operators/Not.h"
#include "operators/NotEqual.h"
#include "operators/Or.h"
#include "operators/Pow.h"
=======
#include "operators/Reciprocal.h"
>>>>>>> Stashed changes
```
- What this means is,
```cpp
<<<<<<< Updated upstream
// the code you fetched from the upstream/or remote repository
=======
// the code you wrote earlier and stashed, which now creates
// merge conflict upon doing `git stash pop`
>>>>>>> Stashed changes
```
- So, change what necessary, and delete those symbols, git creates this to show you where the conflict is. So after removing conflict, the snippet will look like this:
```cpp
#include "operators/Mod.h"
#include "operators/Mul.h"
#include "operators/Neg.h"
#include "operators/Not.h"
#include "operators/NotEqual.h"
#include "operators/Or.h"
#include "operators/Pow.h"
#include "operators/Reciprocal.h"
```
* By doing this procedure to every file, which is showing conflict, you can manage to resolve the conflict.
#### Push your modified code to your forked repo in GitHub
* Now you will have your uncommitted work over the synced repo, just as you wanted. Do more modifications if required. And then do the usual commands to push your changes in your forked repo.
```console
git add .
git commit -m "commit message"
git push
```
* This will update your forked repo with your additions, Now if you want them to be added in the **AITS dnnCompiler** repo, see the Pull request sectionbelow.
## Create pull request
* If you followed previous instructions, you will have a forked repo which has the latest update from **AITS dnnCompiler** with your further modifications.
* Now go to your forked repo in GitHub in your browser.
* Change branch from master to operators.
* You will see something like
> Your branch is ahead of n commits of ai-techsystems:operators.
* Click on **pull request**
* You will be taken to a new page where in the top you can see
> merge [operator branch] [aits dnnCompiler] <-- [operator branch] [your_username dnnCompiler]
* You will also be able to see the changes you made in the comparison of files below that.
* Now click on **create pull request**
* It's done!
<file_sep>#!/usr/bin/env python3
import os, sys, glob
import subprocess
import unittest
import deepC
# This test runs compiler as a user would run on command line.
class mnistTest(unittest.TestCase):
def setUp(self):
self.debug=False;
test_dir = os.path.join("compiler", "mnist")
for wcard in [ '*bias', '*weight', '*out', '*cpp', '*exe']:
for filename in glob.glob(os.path.join(test_dir, wcard)):
os.remove(filename)
compile_scr = os.path.join(os.path.dirname(deepC.__file__), "compiler", "onnx2exe.py")
image_file = os.path.join(test_dir, "image.txt")
self.commands = [
# delete last generated file.
"rm -f " + os.path.join(test_dir, "mnist.exe"),
# compile onnx into exe
"python3 " + compile_scr + " " + os.path.join(test_dir, "mnist.onnx"),
# check executable file.
"ls -l " + os.path.join(test_dir, "mnist.exe"),
# run it with relative path.
os.path.join(test_dir, "mnist.exe") + " " + image_file,
# run it in bundle dir (current dir)
"cd " + test_dir + "; ./mnist.exe image.txt",
# run it with absolute path
os.path.join(os.getcwd(), test_dir, "mnist.exe") + " " + image_file
]
def test_runModel(self):
test_env = os.environ.copy();
if ( "PYTHONPATH" not in test_env ) :
test_env["PYTHONPATH"]=os.path.abspath(os.path.join(os.getcwd(),'../../'))
else:
test_env["PYTHONPATH"]+=":"+os.path.abspath(os.path.join(os.getcwd(),'../../'))
for cmd in self.commands:
test_proc=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, env=test_env)
try:
outs, errs = test_proc.communicate(timeout=30)
except subprocess.TimeoutExpired:
test_proc.kill()
outs, errs = test_proc.communicate()
if ( self.debug ):
print(cmd)
print(outs.decode())
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti1, typename Ti2>
class Conv : public baseOperator<To, Ti1, Ti2> {
// Conv attributes
protected:
std::string auto_pad;
std::vector<int> dilations;
int group;
std::vector<int> kernel_shape;
std::vector<int> pads;
std::vector<int> strides;
public:
Conv(std::string name = "opConv", std::string auto_pad = "NOTSET",
std::vector<int> dilations = {}, int group = 1,
std::vector<int> kernel_shape = {},
// The shape of the convolution kernel. If not present, should be
// inferred from input W.
std::vector<int> pads = {}, std::vector<int> strides = {})
: baseOperator<To, Ti1, Ti2>(opConv, name) {
this->auto_pad = auto_pad;
this->dilations = dilations;
this->group = group;
this->kernel_shape = kernel_shape;
this->pads = pads;
this->strides = strides;
}
bool getAttribute(OPATTR attrName, std::vector<int> &obj) override {
if (attrName == attr_kernel_shape) {
obj = kernel_shape;
return true;
} else if (attrName == attr_pads) {
obj = pads;
return true;
} else if (attrName == attr_strides) {
obj = strides;
return true;
} else if (attrName == attr_dilations) {
obj = dilations;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_group) {
obj = group;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, std::string &obj) override {
if (attrName == attr_auto_pad) {
obj = auto_pad;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::string obj) override {
if (attrName == attr_auto_pad) {
auto_pad = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::vector<int> obj) override {
if (attrName == attr_dilations) {
dilations = obj;
return true;
}
if (attrName == attr_kernel_shape) {
kernel_shape = obj;
return true;
}
if (attrName == attr_pads) {
pads = obj;
return true;
}
if (attrName == attr_strides) {
strides = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_group) {
group = obj;
return true;
}
return false;
}
tensor<To> compute(tensor<Ti1> &X, tensor<Ti1> &W,
tensor<Ti1> &B = NULL_TENSOR<Ti1>) {
//
// N - batch size
// C = number of channels
// H - Image height
// W - Image width
// M - number of feature maps
//
// X is Input data tensor from previous layer; has size (N x C x H x W),
// In General, Input data tensor size could be (N x C x D1 x D2 ... x Dn).
// For now supporting only for (N x C x H x W)
// W is the weight tensor that will be used in the convolutions; has size
// (M x C/group x kH x kW)
// B it the optional 1D bias to be added to the convolution, has size of M
// The output dimensions are functions of the kernel size, stride size, and
// pad lengths.
// Result shape is N x M x C x rH x rW
// Result rH and rW formula R =(X-K+2P)/S + 1
// Padding required on either side for same shape P = (X(S-1) - S + K)/2
std::stringstream errMsg;
//
// basic initializations
//
// batch size
size_t batchSize = X.shape()[0];
// channels
size_t numChannels = X.shape()[1];
// data height and width
size_t X_h = X.shape()[2];
size_t X_w = X.shape()[3];
// numFeatureMaps
size_t numFeatureMaps = W.shape()[0];
// result shape
std::vector<size_t> resultShape;
resultShape.push_back(batchSize);
resultShape.push_back(numFeatureMaps);
resultShape.push_back(numChannels);
//
// Process and check the arguments and inputs
//
// bias
if (B.length() != numFeatureMaps) {
errMsg << "Bias length (" << B.length()
<< "is different than number of feature maps (" << numFeatureMaps
<< ")" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// channels and groups
if (W.shape()[1] != numChannels / group) {
errMsg << "Weight tensor shape along second axis " << W.shape()[1]
<< "doesn't match " << numChannels / group
<< "(input channels/group)" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// stride
if (strides.empty()) {
// the stride defaults is 1 along each spatial axis.
strides.push_back(1);
strides.push_back(1);
} else if (strides.size() != 2) {
errMsg << "stride expected along 2 spatial axes, specified along"
<< strides.size() << "spatial axes" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// dilations
if (dilations.empty()) {
dilations = std::vector<int>(W.rank() - 2);
// dilations defaults is 1 along each spatial axis
for (size_t axis = 0; axis < W.rank() - 2; axis++) {
dilations[axis] = 1;
}
} else if (dilations.size() != 2) {
errMsg << "stride expected along 2 spatial axes, specified along"
<< dilations.size() << "spatial axes" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// Kernel
std::vector<size_t> kernelShape(W.rank());
kernelShape[0] = numFeatureMaps;
kernelShape[1] = numChannels;
for (size_t axis = 2; axis < W.rank(); axis++) {
if (kernel_shape.empty()) {
kernelShape[axis] = (W.shape()[axis]) * dilations[axis - 2];
} else {
kernelShape[axis] = kernel_shape[axis];
}
}
tensor<Ti1> kernel(kernelShape);
for (size_t featureMap = 0; featureMap < numFeatureMaps; featureMap++) {
for (size_t filterChannel = 0; filterChannel < numChannels;
filterChannel++) {
int channelIndx = filterChannel / group;
for (size_t i = 0; i < kernelShape[2]; i++) {
for (size_t j = 0; j < kernelShape[3]; j++) {
if (((i + 1) % dilations[0] == 0) &&
((j + 1) % dilations[1] == 0)) {
kernel(featureMap, filterChannel, i, j) =
W(featureMap, channelIndx, (((i + 1) / dilations[0]) - 1),
(((j + 1) / dilations[1]) - 1));
} else {
kernel(featureMap, filterChannel, i, j) = 0;
}
}
}
}
}
// auto_pad
char padType = '\0';
if (auto_pad == "VALID") {
// no padding
padType = 'N';
for (size_t axis = 2; axis < X.rank(); axis++) {
if (X.shape()[axis] <= kernelShape[axis]) {
errMsg << "Kernel is too big for the given input and paddings"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
resultShape.push_back(
((X.shape()[axis] - kernelShape[axis]) / strides[axis - 2]) + 1);
}
if (!pads.empty()) {
errMsg << "auto_pad and pads attribute can't be used simultaneously"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
} else if (auto_pad == "SAME_UPPER") {
padType = 'U';
resultShape.push_back(X.shape()[2]);
resultShape.push_back(X.shape()[3]);
// add extra padding at the end to match the
// output spatial size with the input
if (!pads.empty()) {
errMsg << "auto_pad and pads attribute can't be used simultaneously"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
} else if (auto_pad == "SAME_LOWER") {
padType = 'L';
resultShape.push_back(X.shape()[2]);
resultShape.push_back(X.shape()[3]);
// add extra padding at the beginning to match the
// output spatial size with the input
if (!pads.empty()) {
errMsg << "auto_pad and pads attribute can't be used simultaneously"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
} else if (auto_pad == "NOTSET") {
padType = 'P';
if (pads.empty()) {
errMsg << "explicit pads expected when auto_pad is \"NOTSET\""
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
for (size_t axis = 2; axis < X.rank(); axis++) {
if ((X.shape()[axis] + pads[axis] + pads[axis - 2]) <=
kernelShape[axis]) {
errMsg << "Kernel is too big for the given input and paddings"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
resultShape.push_back(((X.shape()[axis] - kernelShape[axis] +
pads[axis] + pads[axis - 2]) /
strides[axis - 2]) +
1);
}
} else {
errMsg << "auto_pad must be either \"NOTSET\", \"SAME_UPPER\", "
"\"SAME_LOWER\" or \"VALID\""
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// pads
size_t padsSize = 2 * (X.rank() - 2);
if (!pads.empty()) {
if (pads.size() != (2 * (X.rank() - 2))) {
errMsg << "pads expected format is [x1_begin, x2_begin...x1_end, "
"x2_end,...]"
<< "found " << pads.size() << " elements ( expected " << padsSize
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// above and below code is changed by Gunjan
for (size_t i = 0; i < padsSize; i++) {
if (pads[i] < 0) {
errMsg << "pads value at index " << i << " is less than 0 ("
<< pads[i] << ")" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
}
} else {
pads = std::vector<int>(padsSize);
int total_padding_required;
if (padType == 'N') {
for (size_t i = 0; i < padsSize; i++) {
pads[i] = 0;
}
} else if (padType == 'U') {
for (size_t i = 0; i < X.rank() - 2; i++) {
total_padding_required = X.shape()[i + 2] * (strides[i] - 1) -
strides[i] + W.shape()[i + 2];
pads[i] = pads[i + X.rank() - 2] = total_padding_required / 2;
if ((total_padding_required % 2) != 0) {
pads[i + X.rank() - 2] = pads[i + X.rank() - 2] + 1;
}
}
} else if (padType == 'L') {
for (size_t i = 0; i < X.rank() - 2; i++) {
total_padding_required = X.shape()[i + 2] * (strides[i] - 1) -
strides[i] + W.shape()[i + 2];
pads[i] = pads[i + X.rank() - 2] = total_padding_required / 2;
if ((total_padding_required % 2) != 0) {
pads[i] = pads[i] + 1;
}
}
}
}
// work out the result
tensor<To> result(resultShape);
tensor<Ti1> paddedInput({X_h + pads[0] + pads[2], X_w + pads[1] + pads[3]});
tensor<Ti1> convImage({resultShape[3], resultShape[4]});
tensor<Ti1> filter({kernelShape[2], kernelShape[3]});
std::vector<size_t> __pads;
for (size_t i = 0; i < pads.size(); i++) {
__pads.push_back((size_t)pads[i]); // need to do this for type conversion
}
for (size_t batchIndx = 0; batchIndx < batchSize; batchIndx++) {
for (size_t channelIndx = 0; channelIndx < numChannels; channelIndx++) {
// padded input image
for (size_t hIndx = 0; hIndx < X_h + __pads[0] + __pads[2]; hIndx++) {
for (size_t wIndx = 0; wIndx < X_w + __pads[1] + __pads[3]; wIndx++) {
if (hIndx < __pads[0] || hIndx >= (X_h + __pads[0]) ||
wIndx < __pads[1] || wIndx >= (X_w + __pads[1])) {
paddedInput(hIndx, wIndx) = 0;
} else {
paddedInput(hIndx, wIndx) = X(
batchIndx, channelIndx, hIndx - __pads[0], wIndx - __pads[1]);
}
}
}
for (size_t featureMapIndx = 0; featureMapIndx < numFeatureMaps;
featureMapIndx++) {
// convolve
for (size_t hIndx = 0; hIndx < resultShape[3]; hIndx = hIndx + 1) {
for (size_t wIndx = 0; wIndx < resultShape[4]; wIndx = wIndx + 1) {
result(batchIndx, featureMapIndx, channelIndx, hIndx, wIndx) = 0;
if (B != NULL_TENSOR<Ti1>) {
result(batchIndx, featureMapIndx, channelIndx, hIndx, wIndx) =
B(featureMapIndx);
}
for (size_t i = 0; i < kernelShape[2]; i++) {
for (size_t j = 0; j < kernelShape[3]; j++) {
result(batchIndx, featureMapIndx, channelIndx, hIndx,
wIndx) += kernel(featureMapIndx, channelIndx, i, j) *
paddedInput(hIndx * (size_t)strides[1] + i,
wIndx * (size_t)strides[1] + j);
}
}
}
}
// end convolve
}
}
}
return result;
}
}; // template class
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "operators/baseOperator.h"
//#define DNNC_OPERATOR_TEST 1
#ifdef DNNC_OPERATOR_TEST
#include <iostream>
namespace dnnc {
template <typename T> class fakeOperatorTest : public baseOperator<T> {
public:
fakeOperatorTest() : baseOperator<T>(opAbs) {}
void testEigenMatrix(tensor<T> &t) {
if (t.rank() == 1) {
DNNC_EIGEN_VECTOR(eigenVector, t);
std::cout << eigenVector << "\n";
} else if (t.rank() == 2) {
DNNC_EIGEN_MATRIX(eigenMatrix, t);
std::cout << eigenMatrix << "\n";
} else if (t.rank() == 3) {
DNNC_EIGEN_TENSOR(eigenTensor, t);
// std::cout << eigenTensor << "\n";
} else if (t.rank() == 4) {
DNNC_EIGEN_TENSOR4D(eigenTensor4D, t);
// std::cout << eigenTensor4D << "\n";
}
return;
}
};
} // namespace dnnc
using namespace dnnc;
int main() {
tensor<float> tf({3, 4});
fakeOperatorTest<float> fotf;
fotf.testEigenMatrix(tf);
tensor<double> td({3, 4});
fakeOperatorTest<double> fotd;
fotd.testEigenMatrix(td);
tensor<int> ti({3, 4});
fakeOperatorTest<int> foti;
foti.testEigenMatrix(ti);
tensor<float> tf1({2, 3, 4, 5});
fakeOperatorTest<float> fotf1;
fotf1.testEigenMatrix(tf1);
tensor<double> td1({3, 4, 6, 7});
fakeOperatorTest<double> fotd1;
fotd1.testEigenMatrix(td1);
return 0;
}
#endif
<file_sep>import os
separator = os.path.sep
def create_testcase (op_name, inputs, outputs, nodes, declarations):
py_file = '''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
\n\n'''
py_file += 'import os, sys\n\n'
py_file += 'import numpy as np\n'
py_file += 'separator = os.path.sep\n\n'
py_file += 'from onnx import *\n'
# py_file += 'sys.path.append("../../../../python/parser")\n'
py_file += 'sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")\n'
py_file += 'from onnx_parser import *\n\n'
py_file += 'op_name = \'' + op_name + '\'\n\n'
py_file += 'inputs = ' + inputs + '\n'
py_file += 'outputs = ' + outputs + '\n'
for declaration in declarations:
py_file += declaration + '\n'
py_file += 'nodes = []\n'
for node in nodes:
py_file+='nodes.append(' + node + ')\n'
py_file += 'graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)\n'
py_file += 'opset = (OperatorSetIdProto(version=11),)\n'
py_file += 'model = helper.make_model(graph, opset_imports=opset)\n'
py_file += 'onnx.checker.check_model(model)\n'
# py_file += 't_prefix = "../testcases/" + op_name + "/" + op_name\n'
py_file += 't_prefix = ".." + separator + "testcases" + separator + op_name + separator + op_name\n'
# py_file += 'g_prefix = "../gold_files/" + op_name\n'
py_file += 'g_prefix = ".." + separator + "gold_files" + separator + op_name\n'
py_file += 'onnx.save(model, t_prefix+".onnx")\n'
py_file += 'parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")\n'
# if not os.path.isdir(op_name):
if not os.path.exists(".." + separator + "testcases" + separator + op_name):
# os.system("mkdir -p " + "../testcases/" + op_name)
os.makedirs(".." + separator + "testcases" + separator + op_name)
# file_name = "../testcases/" + op_name + "/" + op_name + "_generator.py"
file_name = ".." + separator + "testcases" + separator + op_name + separator + op_name + "_generator.py"
with open(file_name, 'w') as f:
f.write(py_file)
os.system('python ' + file_name)
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
'''
This file is required by tensorOperatorsGenerator.py
This is the structure of the dictionary
"operator":{
"left operand" : [
"right operand 1",
"right operand 2",
"right operand 3",
"right operand 4",
],
}
'''
tensorOperators = {
"assignment" : {
"add" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"sub" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"mul" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"true_div" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Dnnc != Numpy
# "float_tensor_2", # Dnnc != Numpy
# "int_scalar", # Dnnc != Numpy
# "int_tensor_2", # Dnnc != Numpy
],
},
"floor_div" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"lshift" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"rshift" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"power" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"and" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"or" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"xor" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
},
"binary" : {
"add" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"sub" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"mul" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"true_div" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Dnnc != Numpy
# "float_tensor_2", # Dnnc != Numpy
# "int_scalar", # Dnnc != Numpy
# "int_tensor_2", # Dnnc != Numpy
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Dnnc != Numpy
# "float_tensor_2", # Dnnc != Numpy
# "int_scalar", # Dnnc != Numpy
# "int_tensor_2", # Dnnc != Numpy
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Dnnc != Numpy
# "float_tensor_2", # Dnnc != Numpy
# "int_scalar", # Dnnc != Numpy
# "int_tensor_2", # Dnnc != Numpy
],
},
"floor_div" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"mod" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"lshift" : {
"bool_tensor_1" : [
"bool_scalar",
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"rshift" : {
"bool_tensor_1" : [
"bool_scalar",
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"power" : {
"bool_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"and" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"or" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
"xor" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
# "bool_scalar", # Numpy doesn't support
# "bool_tensor_2", # Numpy doesn't support
# "float_scalar", # Numpy doesn't support
# "float_tensor_2", # Numpy doesn't support
# "int_scalar", # Numpy doesn't support
# "int_tensor_2", # Numpy doesn't support
],
},
},
"unary" : {
"pos" : [
# "bool_tensor_1", # Numpy doesn't support
"int_tensor_1",
"float_tensor_1",
],
"neg" : [
# "bool_tensor_1", # Numpy doesn't support
"int_tensor_1",
"float_tensor_1",
],
},
"comparison" : {
"greater_than" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"greater_equal" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"less_than" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"less_equal" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"equal" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
"not_equal" : {
"bool_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"int_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
"float_tensor_1" : [
"bool_scalar",
"bool_tensor_2",
"float_scalar",
"float_tensor_2",
"int_scalar",
"int_tensor_2",
],
},
},
}
# Don't change the section below, until you know what you are doing
operators = {
"assignment_add" : "+=",
"assignment_sub" : "-=",
"assignment_mul" : "*=",
"assignment_true_div" : "/=",
"assignment_floor_div" : "//=",
"assignment_mod" : "%=",
"assignment_power" : "**=",
"assignment_lshift" : "<<=",
"assignment_rshift" : ">>=",
"assignment_and" : "&=",
"assignment_or" : "|=",
"assignment_xor" : "^=",
"binary_add" : "+",
"binary_sub" : "-",
"binary_mul" : "*",
"binary_true_div" : "/",
"binary_floor_div" : "//",
"binary_mod" : "%",
"binary_power" : "**",
"binary_lshift" : "<<",
"binary_rshift" : ">>",
"binary_and" : "&",
"binary_or" : "|",
"binary_xor" : "^",
"unary_pos" : "+",
"unary_neg" : "-",
"unary_invert" : "~",
"comparison_greater_than" : ">",
"comparison_greater_equal" : ">=",
"comparison_less_than" : "<",
"comparison_less_equal" : "<=",
"comparison_equal" : "==",
"comparison_not_equal" : "!=",
}
tensorOperands = {
"bool_scalar" : "True",
"bool_tensor_1" : "bool_0_4",
"bool_tensor_2" : "bool_5_9",
"int_scalar" : "5",
"float_scalar" : "5.0",
"int_tensor_2" : "int_5_9",
"float_tensor_2" : "float_5_9",
"int_tensor_1" : "int_0_4",
"float_tensor_1" : "float_0_4",
}<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! \f$
* f(x)=\alpha\times(e^{x}-1),\;\;\;for\;x<0\;;\\f(x)=x,\;\;\;for\;x\geq0\;;
* \f$*/
/*! The formula shows how the Elu operator works.*/
/*! And this formulation became part of dnn compiler operator implementation.
* The operator is O(n) where n = Number of elements in the tensor*/
template <typename T> class Elu : public baseOperator<T, T, T> {
protected:
float alpha = 1.0; /*!< Coefficient of ELU. */
public:
Elu(std::string name = "opElu", float alpha = 1.0)
: baseOperator<T, T, T>(opElu, name) {
this->alpha = alpha;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_alpha) {
obj = alpha;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_alpha) {
alpha = obj;
return true;
}
return false;
}
/*! Element wise Elu-Function*/
static T elu_function(T x, float alpha) {
return (x < 0) ? (alpha * (exp(x) - 1.)) : x;
}
tensor<T> compute(tensor<T> &a /*!<[float,double]: ND tensor*/) {
if (!(this->template type_check<T, float, double>())) {
SPDLOG_ERROR("Constrain input and output types to float tensors.");
return NULL_TENSOR<T>;
}
if (a.rank() != 1) {
SPDLOG_ERROR("tensor dimenions not appropriate for Elu operator.");
return NULL_TENSOR<T>;
}
tensor<T> result(a.shape(), a.name());
DNNC_EIGEN_ARRAY_MAP(eigenVector, T, a);
DNNC_EIGEN_VECTOR_CTOR(T) eResult;
auto c0 = std::bind(elu_function, std::placeholders::_1, alpha);
eResult.array() = eigenVector.array().unaryExpr(c0);
result.load(eResult.data());
return result;
}
/*!<
\return The output tensor of the same shape as input.
*/
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/tensor.h"
namespace dnnc {
class irTypeData {
protected:
IR_DataType _type = IR_DataType::NOTYPE;
size_t *_ref; /*<! reference count of _data */
void *_data = 0x0;
public:
irTypeData(IR_DataType ty, std::vector<int> &d) : _type(ty) {
assert(ty == IR_DataType::INT8 || ty == IR_DataType::INT16 ||
ty == IR_DataType::INT32 || ty == IR_DataType::INT64);
_ref = new size_t;
*_ref = 1;
_data = new std::vector<int>(d.begin(), d.end());
}
irTypeData(IR_DataType ty, std::vector<float> &d) : _type(ty) {
assert(ty == IR_DataType::FLOAT || ty == IR_DataType::FLOAT16 ||
ty == IR_DataType::DOUBLE);
_ref = new size_t;
*_ref = 1;
_data = new std::vector<float>(d.begin(), d.end());
}
irTypeData(IR_DataType ty, std::vector<std::string> &d) : _type(ty) {
assert(ty == IR_DataType::STRING);
_ref = new size_t;
*_ref = 1;
_data = new std::vector<std::string>(d.begin(), d.end());
}
irTypeData(IR_DataType ty, std::vector<tensor<bool>> &d) : _type(ty) {
assert(ty == IR_DataType::BOOL);
_ref = new size_t;
*_ref = 1;
_data = new std::vector<tensor<bool>>(d.begin(), d.end());
}
irTypeData(IR_DataType ty, std::vector<tensor<int>> &d)
: _type(IR_DataType::TENSOR_INT) {
assert(ty == IR_DataType::INT8 || ty == IR_DataType::INT16 ||
ty == IR_DataType::INT32 || ty == IR_DataType::INT64);
_ref = new size_t;
*_ref = 1;
_data = new std::vector<tensor<int>>(d.begin(), d.end());
}
irTypeData(IR_DataType ty, std::vector<tensor<float>> &d)
: _type(IR_DataType::TENSOR_FLOAT) {
assert(ty == IR_DataType::FLOAT || ty == IR_DataType::FLOAT16 ||
ty == IR_DataType::DOUBLE);
_ref = new size_t;
*_ref = 1;
_data = new std::vector<tensor<float>>(d.begin(), d.end());
}
/// \brief copy constructor
irTypeData(const irTypeData &other) {
_ref = other._ref;
_type = other._type;
_data = other._data;
(*_ref)++;
}
/// \brief Assignment Operator
irTypeData &operator=(const irTypeData &other) {
if (this == &other)
return *this;
_ref = other._ref;
_type = other._type;
_data = other._data;
(*_ref)++;
return *this;
}
~irTypeData() {
if (_ref)
--(*_ref);
if (_ref && *_ref == 0 && _data) {
free(_ref);
switch (_type) {
case IR_DataType::INT8:
case IR_DataType::INT16:
case IR_DataType::INT32:
case IR_DataType::INT64:
delete static_cast<std::vector<int> *>(_data);
break;
case IR_DataType::UINT8:
case IR_DataType::UINT16:
case IR_DataType::UINT32:
case IR_DataType::UINT64:
delete static_cast<std::vector<unsigned int> *>(_data);
break;
case IR_DataType::FLOAT:
case IR_DataType::FLOAT16:
case IR_DataType::DOUBLE:
delete static_cast<std::vector<float> *>(_data);
break;
case IR_DataType::STRING:
delete static_cast<std::vector<std::string> *>(_data);
break;
case IR_DataType::TENSOR_BOOL:
delete static_cast<std::vector<tensor<bool>> *>(_data);
break;
case IR_DataType::TENSOR_INT:
delete static_cast<std::vector<tensor<int>> *>(_data);
break;
case IR_DataType::TENSOR_FLOAT:
delete static_cast<std::vector<tensor<double>> *>(_data);
break;
default:
assert(false && "irTypeData object created without type");
break;
}
}
}
#ifndef SWIGPYTHON
operator std::vector<int>() const {
if (_type != IR_DataType::INT8 && _type != IR_DataType::INT16 &&
_type != IR_DataType::INT32 && _type != IR_DataType::INT64)
throw std::bad_cast();
std::vector<int> ivec = *static_cast<std::vector<int> *>(_data);
return ivec;
}
operator std::vector<unsigned int>() const {
if (_type != IR_DataType::UINT8 && _type != IR_DataType::UINT16 &&
_type != IR_DataType::UINT32 && _type != IR_DataType::UINT64)
throw std::bad_cast();
std::vector<unsigned int> uivec =
*static_cast<std::vector<unsigned int> *>(_data);
return uivec;
}
operator std::vector<float>() const {
if (_type != IR_DataType::FLOAT16 && _type != IR_DataType::FLOAT &&
_type != IR_DataType::DOUBLE)
throw std::bad_cast();
std::vector<float> fvec = *static_cast<std::vector<float> *>(_data);
return fvec;
}
operator std::vector<std::string>() const {
if (_type != IR_DataType::STRING)
throw std::bad_cast();
std::vector<std::string> svec =
*static_cast<std::vector<std::string> *>(_data);
return svec;
}
operator std::string() const {
if (_type != IR_DataType::STRING)
throw std::bad_cast();
std::vector<std::string> svec =
*static_cast<std::vector<std::string> *>(_data);
return svec[0];
}
operator std::vector<tensor<bool>>() const {
if (_type != IR_DataType::TENSOR_BOOL)
throw std::bad_cast();
std::vector<tensor<bool>> tbvec =
*static_cast<std::vector<tensor<bool>> *>(_data);
if (tbvec.size() == 0)
throw std::out_of_range("vector of tensor_int with size 0");
return tbvec;
}
operator std::vector<tensor<int>>() const {
if (_type != IR_DataType::TENSOR_INT)
throw std::bad_cast();
std::vector<tensor<int>> tivec =
*static_cast<std::vector<tensor<int>> *>(_data);
if (tivec.size() == 0)
throw std::out_of_range("vector of tensor<int> with size 0");
return tivec;
}
operator std::vector<tensor<double>>() const {
if (_type != IR_DataType::TENSOR_FLOAT)
throw std::bad_cast();
std::vector<tensor<double>> tfvec =
*static_cast<std::vector<tensor<double>> *>(_data);
if (tfvec.size() == 0)
throw std::out_of_range("vector of tensor<float> with size 0");
return tfvec;
}
#endif
IR_DataType type() { return _type; }
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "codegen/cppCodeGen.h"
#include "graph/inferType.h"
#include <assert.h>
#include <fstream>
#include <regex>
#include <sys/stat.h>
#include <unistd.h>
bool dnnc::cppCodeGen::write() {
// std::cout << "DBG: " << _graph.getName() << " \n\t";
inferDataType typeInference(_graph);
typeInference.main();
std::string code = "";
for (dnnParameters param : _graph.parameters()) {
code += write(param);
}
size_t argv_index = 1;
for (ioNode *term : _graph.inputs()) {
code += write(*term, argv_index);
}
for (node *n : _graph) {
if (n->ntype() == node::OPERATOR)
code += write(*dynamic_cast<opNode *>(n));
}
// OUTPUTs are written with operators.
std::ofstream out(_bundleDir.size()
? (_bundleDir + FS_PATH_SEPARATOR + _outFile)
: _outFile);
if (!out.is_open() || out.fail()) {
std::cerr << "ERROR (CODEGEN): could not open file " + _outFile +
" to write.\n";
return false;
}
out << writeIncludes() << "\n";
out << writeUsageFunction() << "\n";
out << writeMainFunction(code) << "\n";
out.close();
return code.length();
}
// \brief replace all characters that can't appear
// in C++ variable name.
std::string dnnc::cppCodeGen::cppName(std::string str) {
std::string new_str = std::regex_replace(str, std::regex("\\."), "_dot_");
return new_str;
}
std::vector<dnnc::ioNode *> dnnc::cppCodeGen::modelInputs() {
std::vector<ioNode *> ins;
for (ioNode *term : _graph.inputs())
if (paramFile(term->name()).empty())
ins.push_back(term);
return ins;
}
// \brief get parametre file, given term/param name
std::string dnnc::cppCodeGen::paramFile(std::string param_name) {
// check if there is a param file to load in the bundle dir.
struct stat buffer;
std::string param_file =
(_bundleDir.size() ? _bundleDir + FS_PATH_SEPARATOR : "") + param_name;
return (stat(param_file.c_str(), &buffer) == 0) ? param_file : "";
}
std::string dnnc::cppCodeGen::nodeName(node *n) {
if (n->ntype() == node::OPERATOR)
return _prefix + cppName(n->name()) + "_" +
static_cast<opNode *>(n)->outputs()[0];
else if (n->ntype() == node::INPUT)
return _prefix + cppName(n->name());
else if (n->ntype() == node::OUTPUT)
return _prefix + cppName(n->name());
else
assert(false);
return _prefix + cppName(n->name());
}
std::string dnnc::cppCodeGen::shapeStr(std::vector<DIMENSION> shapeVec) {
std::string shapeStr;
for (size_t i = 0; i < shapeVec.size(); i++) {
shapeStr +=
std::to_string(shapeVec[i]) + (i == shapeVec.size() - 1 ? "" : ", ");
}
return shapeStr;
}
std::string dnnc::cppCodeGen::writeIncludes() {
std::string code;
for (auto &s : _includes)
code += std::string("#include \"") + s + "\"\n";
code += "\n\nusing namespace dnnc;\n\n";
return code;
}
// Use Model:
// _bundleDir : dirname("generated exe, i.e. a.out");
// parameter file(s) : in _bundleDir
// input file(s) : with a path relative to current dir.
// output file(s) : in current dir
std::string dnnc::cppCodeGen::writeUsageFunction() {
std::string code = "void usage(char** args) {\n";
code += _tab + "std::cout << \"\\nUsage: \" << args[0] <<\n";
std::vector<dnnc::ioNode *> modelIns = modelInputs();
for (ioNode *term : modelIns)
if (paramFile(term->name()).empty())
code += _tab + _tab + "\" <datafile for input \\\"" + term->name() +
"\\\">\" <<";
code += "\n" + _tab + _tab + "\"\\n\\n\";\n\n";
code += _tab + "std::cout << \"This model has \" << " +
std::to_string(modelIns.size()) + " << \" input(s):\\n\";\n";
size_t inIndex = 1;
for (ioNode *term : modelIns)
if (paramFile(term->name()).empty())
code += _tab + "std::cout << \"\\t " + std::to_string(inIndex++) +
". \\\"" + term->name() + "\\\" (shape " +
shapeStr(term->shape()) + "):\\n\";\n\n";
code += _tab + "std::cout << \"Output(s) will be written in file(s):\\n\";\n";
size_t outIndex = 1;
for (ioNode *term : _graph.outputs())
code += _tab + "std::cout << \"\\t " + std::to_string(outIndex++) +
". \\\"" + term->name() + ".out\\\" (shape " +
shapeStr(term->shape()) + "):\\n\";\n";
code += "}\n";
return code;
}
std::string dnnc::cppCodeGen::writeMainFunction(std::string body) {
std::string code = "int main(int argc, char** argv) {\n\n";
code += "#define BUNDLE_DIR std::string(argv[0]).substr(0,\\\n";
code += " std::string(argv[0]).find_last_of(\"" +
std::string(FS_PATH_SEPARATOR) + "\")) + \"" +
std::string(FS_PATH_SEPARATOR) + "\"\n\n";
size_t nInputs = modelInputs().size();
code += _tab + "if ( argc < " + std::to_string(nInputs + 1) +
" || std::string(argv[1]).substr(0,2) == \"-h\" ) {\n";
code += _tab + _tab + "usage(argv);\n";
code += _tab + _tab + "return 1;\n";
code += _tab + "}\n\n";
code += body + "\n";
code += _tab + "return 0;\n";
code += "}\n";
return code;
}
std::string dnnc::cppCodeGen::initializeData(irTypeData dtype, std::string name,
std::string fname) {
std::string varType; // int, float, std::vector<float> etc
std::string initData; // = {1.3, 1.5} etc
std::string code; // vector<int> value = {1, 4, 6};
switch (dtype.type()) {
case IR_DataType::INT8:
case IR_DataType::INT16:
case IR_DataType::INT32:
case IR_DataType::INT64: {
varType = getDNNC_IRTypeStr(dtype.type());
std::vector<int> values = std::vector<int>(dtype);
if (values.size() == 0)
return code;
if (values.size() == 1) {
initData = std::to_string(values[0]);
} else {
for (auto el : values)
initData += (initData.size() ? "," : "{") + std::to_string(el);
initData += values.size() ? "}" : "";
varType = "std::vector<" + varType + ">";
}
code = _tab + varType + " " + name + " = " + initData + " ;\n";
break;
}
case IR_DataType::UINT8:
case IR_DataType::UINT16:
case IR_DataType::UINT32:
case IR_DataType::UINT64: {
varType = getDNNC_IRTypeStr(dtype.type());
std::vector<unsigned int> values = std::vector<unsigned int>(dtype);
if (values.size() == 0)
return code;
if (values.size() == 1) {
initData = std::to_string(values[0]);
} else {
for (auto el : values) {
initData += (initData.size() ? "," : "{") + std::to_string(el);
}
initData += values.size() ? "}" : "";
varType = "std::vector<" + varType + ">";
}
code = _tab + varType + " " + name + " = " + initData + " ;\n";
break;
}
case IR_DataType::FLOAT:
case IR_DataType::FLOAT16:
case IR_DataType::DOUBLE: {
varType = getDNNC_IRTypeStr(dtype.type());
std::vector<float> values = std::vector<float>(dtype);
if (values.size() == 0)
return code;
if (values.size() == 1) {
initData = std::to_string(values[0]);
} else {
for (auto el : values) {
initData += (initData.size() ? "," : "{") + std::to_string(el);
}
initData += values.size() ? "}" : "";
varType = "std::vector<" + varType + ">";
}
code = _tab + varType + " " + name + " = " + initData + " ;\n";
break;
}
case IR_DataType::STRING:
varType = "std::string";
initData = std::string(dtype);
code = _tab + varType + " " + name + " = " + initData + " ;\n";
break;
case IR_DataType::TENSOR_BOOL:
// TODO:
break;
case IR_DataType::TENSOR_INT: {
tensor<int> values = std::vector<tensor<int>>(dtype)[0];
if (values.length() == 0)
return code;
std::string initShape;
for (auto el : values) {
initShape += (initShape.size() ? "," : "{") + std::to_string(el);
}
initShape += values.length() ? "}" : "";
std::string initVec = name + "_vec";
initData = "std::vector<int64_t> " + initVec + " = " + initShape + ";\n";
varType = getDNNC_IRTypeStr(dtype.type());
code = _tab + initData;
code += _tab + varType + " " + name + "({" +
std::to_string(values.length()) + "}); " + name + ".load(" +
initVec + ");\n";
if (fname.size()) {
code += _tab + name + ".read(\"BUNDLE_DIR +" + fname + "\");\n";
}
break;
}
case IR_DataType::TENSOR_FLOAT: {
tensor<double> values = std::vector<tensor<double>>(dtype)[0];
if (values.length() == 0)
return code;
std::string initShape;
for (auto el : values) {
initShape += (initShape.size() ? "," : "{") + std::to_string(el);
}
initShape += values.length() ? "}" : "";
std::string initVec = name + "_vec";
initData = "std::vector<double> " + initVec + " = " + initShape + ";\n";
varType = getDNNC_IRTypeStr(dtype.type());
code = _tab + initData;
code += _tab + varType + " " + name + "({" +
std::to_string(values.length()) + "}); " + name + ".load(" +
initVec + ");\n";
if (fname.size()) {
code += _tab + name + ".read(\"BUNDLE_DIR +" + fname + "\");\n";
}
break;
}
default:
assert(false && "irTypeData object created without type");
break;
}
return code;
}
std::string dnnc::cppCodeGen::write(dnnParameters param) {
return initializeData(param.data(), _prefix + cppName(param.name()),
paramFile(param.name()).empty() ? "" : param.name());
}
std::string dnnc::cppCodeGen::write(ioNode &term, size_t &index) {
// TODO: don't write this ioNode, if graph has initialier
// with the same name.
std::string dtype = getDNNC_DataTypeStr(term.dtype());
std::string code = _tab + "tensor<" + dtype + "> " + nodeName(&term) + "({" +
shapeStr(term.shape()) + "})" + ";\n";
std::string param_file = paramFile(term.name());
code += _tab + nodeName(&term) + ".read(" +
(param_file.size() ? "BUNDLE_DIR + \"" + term.name() + "\""
: "argv[" + std::to_string(index++) + "]") +
");\n";
return code;
}
std::string dnnc::cppCodeGen::write(opNode &computeNode) {
std::string code;
assert(computeNode.ntype() == node::OPERATOR);
assert(computeNode.symbol() != opInvalid);
std::string opCode = getOpCodeStr(computeNode.symbol());
std::string include_file = "operators/" + opCode + ".h";
if (std::find(_includes.begin(), _includes.end(), include_file) ==
_includes.end())
_includes.push_back(include_file);
std::string opName = computeNode.name();
assert(opName.length());
std::vector<node *> ins, outs;
if ((computeNode.symbol() != opConstant &&
false == computeNode.inputNodes(_graph, ins)) ||
false == computeNode.outputNodes(_graph, outs)) {
std::cerr
<< "ERROR (CODEGEN): cound not find all nodes for " << opName << ",\n"
<< " an instance of " << opCode << ".\n"
<< " Please check model's sanity and try again.\n";
return code;
}
std::vector<std::string> nodeIns = computeNode.inputs();
std::vector<std::string> nodeOuts = computeNode.outputs();
if (nodeIns.size() == 0) {
code = writeConstantOperator(computeNode, outs);
} else if (nodeIns.size() == 1 && nodeOuts.size() == 1) {
code = writeUnaryOperator(computeNode, ins, outs);
} else if (nodeIns.size() == 2 && nodeOuts.size() == 1) {
code = writeBinaryOperator(computeNode, ins, outs);
} else if (nodeIns.size() == 3 && nodeOuts.size() == 1) {
code = writeTernaryOperator(computeNode, ins, outs);
} else {
code = writeCustomOperator(computeNode, ins, outs);
}
return code + "\n";
}
std::string dnnc::cppCodeGen::writeConstantOperator(opNode &computeNode,
std::vector<node *> &outs) {
// std::cout << "DBG: " << outs.size() << "\n";
std::string code;
assert(outs.size() == 1);
std::string opCode = getOpCodeStr(computeNode.symbol());
std::string opName = computeNode.name();
assert(opName.length());
std::string outType = getDNNC_DataTypeStr(computeNode.dtype());
// Step 1: Instantiate opterator
code += "\n";
code +=
_tab + opCode + "<" + outType + "> " + opName + "(\"" + opName + "\");\n";
// Step 2: Add attribute
for (nodeAttribute attr : computeNode) {
std::string attrName = getAttrNameStr(attr.name());
std::string attrVar = opName + "_" + attrName;
code += initializeData(attr.data(), attrVar);
code += _tab + opName + ".setAttribute ( attr_" + attrName + ", " +
attrVar + " );\n";
}
// Step 3: Add compute function.
std::string outTensor = nodeName(&computeNode);
code += _tab + "tensor<" + outType + "> " + outTensor + " = " + opName +
".compute ();\n";
if (_graph.isOutput(computeNode.outputs()[0])) {
code += "\n" + _tab + "// Write the output tensor in a file.\n";
code += _tab + outTensor + ".write(\"" + computeNode.outputs()[0] +
".out\");\n";
}
return code;
}
std::string dnnc::cppCodeGen::writeUnaryOperator(opNode &computeNode,
std::vector<node *> &ins,
std::vector<node *> &outs) {
// std::cout << "DBG: " << computeNode.name() << " " << ins.size() << " " <<
// outs.size() << "\n";
std::string code;
assert(ins.size() == 1);
std::string opCode = getOpCodeStr(computeNode.symbol());
std::string opName = computeNode.name();
assert(opName.length());
std::string outType = getDNNC_DataTypeStr(computeNode.dtype());
std::string inType = getDNNC_DataTypeStr(ins[0]->dtype());
// Step 1: Instantiate opterator
code += "\n";
code += _tab + opCode + "<" + outType + ", " + inType + "> " + opName +
"(\"" + opName + "\");\n";
// Step 2: Add attribute
for (nodeAttribute attr : computeNode) {
std::string attrName = getAttrNameStr(attr.name());
std::string attrVar = opName + "_" + attrName;
code += initializeData(attr.data(), attrVar);
code += _tab + opName + ".setAttribute ( attr_" + attrName + ", " +
attrVar + " );\n";
}
// Step 3: Add compute function.
std::string outTensor = nodeName(&computeNode);
code += _tab + "tensor<" + outType + "> " + outTensor + " = " + opName +
".compute ( " + nodeName(ins[0]) + ");\n";
if (_graph.isOutput(computeNode.outputs()[0])) {
code += "\n" + _tab + "// Write the output tensor in a file.\n";
code += _tab + outTensor + ".write(\"" + computeNode.outputs()[0] +
".out\");\n";
}
return code;
}
std::string dnnc::cppCodeGen::writeBinaryOperator(opNode &computeNode,
std::vector<node *> &ins,
std::vector<node *> &outs) {
// std::cout << "DBG: " << computeNode.name() << " " << ins.size() << " " <<
// outs.size() << "\n";
std::string code;
assert(ins.size() == 2);
std::string opCode = getOpCodeStr(computeNode.symbol());
std::string opName = computeNode.name();
assert(opName.length());
std::string outType = getDNNC_DataTypeStr(computeNode.dtype());
std::string in1Type = getDNNC_DataTypeStr(ins[0]->dtype());
std::string in2Type = getDNNC_DataTypeStr(ins[1]->dtype());
// Step 1: Instantiate opterator
code += "\n";
code += _tab + opCode + "<" + outType + ", " + in1Type + ", " + in2Type +
"> " + opName + "(\"" + opName + "\");\n";
// Step 2: Add attribute
for (nodeAttribute attr : computeNode) {
std::string attrName = getAttrNameStr(attr.name());
std::string attrVar = opName + "_" + attrName;
code += initializeData(attr.data(), attrVar);
code += _tab + opName + ".setAttribute ( attr_" + attrName + ", " +
attrVar + " );\n";
}
// Step 3: Add compute function.
std::string outTensor = nodeName(&computeNode);
code += _tab + "tensor<" + outType + "> " + outTensor + " = " + opName +
".compute ( " + nodeName(ins[0]) + ", " + nodeName(ins[1]) + ");\n";
if (_graph.isOutput(computeNode.outputs()[0])) {
code += "\n" + _tab + "// Write the output tensor in a file.\n";
code += _tab + outTensor + ".write(\"" + computeNode.outputs()[0] +
".out\");\n";
}
return code;
}
std::string dnnc::cppCodeGen::writeTernaryOperator(opNode &computeNode,
std::vector<node *> &ins,
std::vector<node *> &outs) {
// std::cout << "DBG: " << computeNode.name() << " " << ins.size() << " " <<
// outs.size() << "\n";
std::string code;
assert(ins.size() == 3);
std::string opCode = getOpCodeStr(computeNode.symbol());
std::string opName = computeNode.name();
assert(opName.length());
std::string outType = getDNNC_DataTypeStr(computeNode.dtype());
std::string in1Type = getDNNC_DataTypeStr(ins[0]->dtype());
std::string in2Type = getDNNC_DataTypeStr(ins[1]->dtype());
// Step 1: Instantiate opterator
code += "\n";
code += _tab + opCode + "<" + outType + ", " + in1Type + ", " + in2Type +
"> " + opName + "(\"" + opName + "\");\n";
// Step 2: Add attribute
for (nodeAttribute attr : computeNode) {
std::string attrName = getAttrNameStr(attr.name());
std::string attrVar = opName + "_" + attrName;
code += initializeData(attr.data(), attrVar);
code += _tab + opName + ".setAttribute ( attr_" + attrName + ", " +
attrVar + " );\n";
}
// Step 3: Add compute function.
std::string outTensor = nodeName(&computeNode);
code += _tab + "tensor<" + outType + "> " + outTensor + " = " + opName +
".compute ( " + nodeName(ins[0]) + ", " + nodeName(ins[1]) + ", " +
nodeName(ins[2]) + ");\n";
if (_graph.isOutput(computeNode.outputs()[0])) {
code += "\n" + _tab + "// Write the output tensor in a file.\n";
code += _tab + outTensor + ".write(\"" + computeNode.outputs()[0] +
".out\");\n";
}
return code;
}
std::string dnnc::cppCodeGen::writeCustomOperator(opNode &computeNode,
std::vector<node *> &ins,
std::vector<node *> &outs) {
std::string opCode = getOpCodeStr(computeNode.symbol());
std::string code =
_tab + "// operator " + opCode + " is not supported yet.\n";
code += _tab + "// Please file a enhancement request at \n";
code += _tab + "// https://github.com/ai-techsystems/deepC/issues \n";
return code;
}
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
#pragma once
#include <string>
#include <typeinfo>
namespace dnnc {
// enum for target machine.
#define DNNC_Basic_DType \
/*<! This code is for ONNX TensorProto.DataType \
Reference: onnx/onnx.proto3, line 319 */ \
NOTYPE = 0, /*!< invalid */ \
FLOAT, /*!< float */ \
UINT8, /*!< uint8_t */ \
INT8, /*!< int8_t */ \
UINT16, /*!< uint16_t */ \
INT16, /*!< int16_t */ \
INT32, /*!< int32_t */ \
INT64, /*!< int64_t */ \
STRING, /*!< string */ \
BOOL, /*!< bool */ \
\
/*!< IEEE754 half-precision floating-point format (16 bits wide). \
This format has 1 sign bit, 5 exponent bits, and 10 mantissa \
bits.*/ \
FLOAT16, /*! half-float */ \
DOUBLE, /*! double precision, aka float64 */ \
UINT32, /*! uint32_t */ \
UINT64, /*! uint64_t */ \
COMPLEX64, /*!< complex with float32 real and imaginary components */ \
COMPLEX128, /*!< complex with float64 real and imaginary components */ \
\
/*!< Non-IEEE floating-point format based on IEEE754 single-precision \
floating-point number truncated to 16 bits. \
This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. \
*/ \
BFLOAT16
enum DNNC_DataType { DNNC_Basic_DType };
/*!< reserved for advanced usage for nodes that represent multiple/generic
* types. Don't use it, if you don't know what it means.
* */
enum class IR_DataType {
DNNC_Basic_DType,
TENSOR_BOOL,
TENSOR_INT,
TENSOR_FLOAT,
GRAPH,
};
static const char *dtype_str[] = {
"int8_t", /* a */
"bool", /* b */
"char", /* c */
"double", /* d */
"long double", /* e */
"float", /* f */
"", /* g */
"uint8_t", /* h */
"int32_t", /* i */
"uint32_t", /* j */
"", /* k */
"int64_t", /* l */
"uint64_t", /* m */
"", /* n */
"", /* o */
"", /* p */
"", /* q */
"", /* r */
"int16_t", /* s */
"uint16_t", /* t */
"", /* u */
"", /* v */
"", /* w */
"", /* x */
"", /* y */
"" /* z */
};
inline const char *dTypeName(int8_t v) { return dtype_str[0]; }
inline const char *dTypeName(bool v) { return dtype_str[1]; }
inline const char *dTypeName(char v) { return dtype_str[2]; }
inline const char *dTypeName(double v) { return dtype_str[3]; }
inline const char *dTypeName(long double v) { return dtype_str[4]; }
inline const char *dTypeName(float v) { return dtype_str[5]; }
inline const char *dTypeName(uint8_t v) { return dtype_str[7]; }
inline const char *dTypeName(int32_t v) { return dtype_str[8]; }
inline const char *dTypeName(uint32_t v) { return dtype_str[9]; }
inline const char *dTypeName(int64_t v) { return dtype_str[11]; }
inline const char *dTypeName(uint64_t v) { return dtype_str[12]; }
inline const char *dTypeName(int16_t v) { return dtype_str[18]; }
inline const char *dTypeName(uint16_t v) { return dtype_str[19]; }
#ifdef __APPLE__
inline const char *dTypeName(unsigned long v) { return dtype_str[12]; }
#endif
template <typename T> DNNC_DataType getDNNC_DataType(T var) {
std::string type_str = dTypeName(var);
return getDNNC_DataType(type_str);
}
DNNC_DataType getDNNC_DataType(std::string stype);
std::string getDNNC_DataTypeStr(DNNC_DataType dtype);
IR_DataType getDNNC_IRType(std::string stype);
std::string getDNNC_IRTypeStr(IR_DataType dtype);
bool typePrecedence(DNNC_DataType ty1, DNNC_DataType ty2);
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti>
class ReduceL1 : public baseOperator<To, Ti, Ti> {
// ReduceL1 attributes
protected:
std::vector<int> axes = {};
int keepdims = 1;
public:
ReduceL1(std::string name = "opReduceL1")
: baseOperator<To, Ti, Ti>(opReduceL1, name) {
this->axes = axes;
this->keepdims = keepdims;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_keepdims) {
obj = keepdims;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_keepdims) {
keepdims = obj;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, std::vector<int> &obj) override {
if (attrName == attr_axis) {
obj = axes;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::vector<int> obj) override {
if (attrName == attr_axis) {
axes = obj;
return true;
}
return false;
}
tensor<To> compute(tensor<Ti> a /*!< : N D tensor input*/) override {
int rank = a.rank();
int reductions = axes.size();
std::vector<int> arr(rank, 0);
for (int i = 0; i < axes.size(); i++) {
if (axes.at(i) >= rank || arr.at(axes.at(i)) >= 1) {
SPDLOG_ERROR("Inputted axes not appropriate for Reduce operator.");
return NULL_TENSOR<To>;
} else {
arr.at(axes.at(i))++;
}
}
if (reductions == 0) {
for (int i = 0; i < arr.size(); i++) {
arr.at(i) = 1;
}
}
std::vector<unsigned long> dimensions;
for (int j = 0; j < arr.size(); j++) {
if (arr.at(j) == 0) {
dimensions.push_back(a.shape()[j]);
} else if (keepdims) {
dimensions.push_back(1);
}
}
if (dimensions.size() == 0) {
dimensions.push_back(1);
}
if (rank < reductions) {
SPDLOG_ERROR("tensor dimenions not appropriate for Reduce operator.");
return NULL_TENSOR<To>;
}
if (rank == 4) {
tensor<To> result(dimensions);
DNNC_EIGEN_TENSOR4D_MAP(tensor4D, Ti, a);
// tensor4D = tensor4D.abs();
if (reductions == 0) {
std::array<int, 4> dims = {0, 1, 2, 3};
Tensor<To, 0, RowMajor> b = tensor4D.abs().sum(dims);
result.load(b.data());
} else if (reductions == 1) {
std::array<int, 1> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 3, RowMajor> b = tensor4D.abs().sum(dims);
result.load(b.data());
} else if (reductions == 2) {
std::array<int, 2> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 2, RowMajor> b = tensor4D.abs().sum(dims);
result.load(b.data());
} else if (reductions == 3) {
std::array<int, 3> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 1, RowMajor> b = tensor4D.abs().sum(dims);
result.load(b.data());
} else if (reductions == 4) {
std::array<int, 4> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 0, RowMajor> b = tensor4D.abs().sum(dims);
result.load(b.data());
}
return result;
} else if (rank == 3) {
tensor<To> result(dimensions);
DNNC_EIGEN_TENSOR_MAP(tensor, Ti, a);
// tensor = tensor.abs();
if (reductions == 0) {
std::array<int, 3> dims = {0, 1, 2};
Tensor<To, 0, RowMajor> b = tensor.abs().sum(dims);
result.load(b.data());
} else if (reductions == 1) {
std::array<int, 1> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 2, RowMajor> b = tensor.abs().sum(dims);
result.load(b.data());
} else if (reductions == 2) {
std::array<int, 2> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 1, RowMajor> b = tensor.abs().sum(dims);
result.load(b.data());
} else if (reductions == 3) {
std::array<int, 3> dims;
std::copy_n(axes.begin(), reductions, dims.begin());
Tensor<To, 0, RowMajor> b = tensor.abs().sum(dims);
result.load(b.data());
}
return result;
}
if (rank == 2) {
DNNC_EIGEN_MATRIX(matrix, Ti, a);
tensor<To> result(dimensions);
if (reductions == 2 || reductions == 0) {
Matrix<To, 1, Dynamic, RowMajor> colReduced =
matrix.cwiseAbs().colwise().sum();
Matrix<To, 1, RowMajor> fullReduced =
colReduced.cwiseAbs().rowwise().sum();
result.load(fullReduced.data());
} else if (axes[0] == 0) {
Matrix<To, 1, Dynamic, RowMajor> colReduced =
matrix.cwiseAbs().colwise().sum();
result.load(colReduced.data());
} else if (axes[0] == 1) {
Matrix<To, 1, Dynamic, RowMajor> rowReduced =
matrix.cwiseAbs().rowwise().sum();
result.load(rowReduced.data());
}
return result;
}
if (rank == 1) {
DNNC_EIGEN_VECTOR(vector, Ti, a);
tensor<To> result(dimensions);
Matrix<To, 1, RowMajor> b = vector.cwiseAbs().rowwise().sum();
result.load(b.data());
return result;
}
return a;
// CHANGE return-type and args
}
};
} // namespace dnnc<file_sep>#include <iostream>
#include <fstream>
#include <vector>
#include <Eigen/Core>
#include "../../BenchTimer.h"
using namespace Eigen;
#ifndef SCALAR
#error SCALAR must be defined
#endif
typedef SCALAR Scalar;
typedef Matrix<Scalar,Dynamic,Dynamic> Mat;
EIGEN_DONT_INLINE
void gemm(const Mat &A, const Mat &B, Mat &C)
{
C.noalias() += A * B;
}
EIGEN_DONT_INLINE
double bench(long m, long n, long k)
{
Mat A(m,k);
Mat B(k,n);
Mat C(m,n);
A.setRandom();
B.setRandom();
C.setZero();
BenchTimer t;
double up = 1e8*4/sizeof(Scalar);
double tm0 = 4, tm1 = 10;
if(NumTraits<Scalar>::IsComplex)
{
up /= 4;
tm0 = 2;
tm1 = 4;
}
double flops = 2. * m * n * k;
long rep = std::max(1., std::min(100., up/flops) );
long tries = std::max(tm0, std::min(tm1, up/flops) );
BENCH(t, tries, rep, gemm(A,B,C));
return 1e-9 * rep * flops / t.best();
}
int main(int argc, char **argv)
{
std::vector<double> results;
std::ifstream settings("gemm_settings.txt");
long m, n, k;
while(settings >> m >> n >> k)
{
//std::cerr << " Testing " << m << " " << n << " " << k << std::endl;
results.push_back( bench(m, n, k) );
}
std::cout << RowVectorXd::Map(results.data(), results.size());
return 0;
}
<file_sep>#include "operators/Constant.h"
#include "operators/Reshape.h"
#include "operators/Gemm.h"
#include "operators/Relu.h"
#include "operators/LogSoftmax.h"
using namespace dnnc;
void usage(char** args) {
std::cout << "\nUsage: " << args[0] <<
" <datafile for input \"0\">" <<
"\n\n";
std::cout << "This model has " << 1 << " input(s):\n";
std::cout << "\t 1. \"0\" (shape 784):\n";
std::cout << "Output(s) will be written in file(s):\n";
std::cout << "\t 1. \"10.out\" (shape 1, 10):\n";
}
int main(int argc, char** argv) {
#define BUNDLE_DIR std::string(argv[0]).substr(0,\
std::string(argv[0]).find_last_of("/")) + "/"
if ( argc < 2 || std::string(argv[1]).substr(0,2) == "-h" ) {
usage(argv);
return 1;
}
tensor<float> dnnc_0({784});
dnnc_0.read(argv[1]);
tensor<float> dnnc_fc_dot_weight({100, 784});
dnnc_fc_dot_weight.read(BUNDLE_DIR + "fc.weight");
tensor<float> dnnc_fc_dot_bias({100});
dnnc_fc_dot_bias.read(BUNDLE_DIR + "fc.bias");
tensor<float> dnnc_fc2_dot_weight({10, 100});
dnnc_fc2_dot_weight.read(BUNDLE_DIR + "fc2.weight");
tensor<float> dnnc_fc2_dot_bias({10});
dnnc_fc2_dot_bias.read(BUNDLE_DIR + "fc2.bias");
Constant<int64_t> dnnc___1("dnnc___1");
std::vector<int64_t> dnnc___1_value_vec = {-1,784};
tensor<int64_t> dnnc___1_value({2}); dnnc___1_value.load(dnnc___1_value_vec);
dnnc___1.setAttribute ( attr_value, dnnc___1_value );
tensor<int64_t> dnnc_dnnc___1_5 = dnnc___1.compute ();
Reshape<float, float, int64_t> dnnc___2("dnnc___2");
tensor<float> dnnc_dnnc___2_6 = dnnc___2.compute ( dnnc_0, dnnc_dnnc___1_5);
Gemm<float, float, float> dnnc___3("dnnc___3");
float dnnc___3_alpha = 1.000000 ;
dnnc___3.setAttribute ( attr_alpha, dnnc___3_alpha );
float dnnc___3_beta = 1.000000 ;
dnnc___3.setAttribute ( attr_beta, dnnc___3_beta );
int32_t dnnc___3_transB = 1 ;
dnnc___3.setAttribute ( attr_transB, dnnc___3_transB );
tensor<float> dnnc_dnnc___3_7 = dnnc___3.compute ( dnnc_dnnc___2_6, dnnc_fc_dot_weight, dnnc_fc_dot_bias);
Relu<float, float> dnnc___4("dnnc___4");
tensor<float> dnnc_dnnc___4_8 = dnnc___4.compute ( dnnc_dnnc___3_7);
Gemm<float, float, float> dnnc___5("dnnc___5");
float dnnc___5_alpha = 1.000000 ;
dnnc___5.setAttribute ( attr_alpha, dnnc___5_alpha );
float dnnc___5_beta = 1.000000 ;
dnnc___5.setAttribute ( attr_beta, dnnc___5_beta );
int32_t dnnc___5_transB = 1 ;
dnnc___5.setAttribute ( attr_transB, dnnc___5_transB );
tensor<float> dnnc_dnnc___5_9 = dnnc___5.compute ( dnnc_dnnc___4_8, dnnc_fc2_dot_weight, dnnc_fc2_dot_bias);
LogSoftmax<float, float> dnnc___6("dnnc___6");
int32_t dnnc___6_axis = 1 ;
dnnc___6.setAttribute ( attr_axis, dnnc___6_axis );
tensor<float> dnnc_dnnc___6_10 = dnnc___6.compute ( dnnc_dnnc___5_9);
// Write the output tensor in a file.
dnnc_dnnc___6_10.write("10.out");
return 0;
}
<file_sep>import os
def main():
# Dockerfile is at base directory
os.chdir("..")
# Mac and Linux
if os.name == "posix":
os.system('sudo docker build -t dnnc .')
os.system('sudo docker run -it dnnc /bin/bash -c "cd /dnnCompiler/deepC && make clean && make"')
# Windows
elif os.name == "nt":
os.system('docker build -t dnnc .')
# don't use single quotes inside command, always use duble quotes, similar problem listed below
# https://stackoverflow.com/questions/24673698/unexpected-eof-while-looking-for-matching-while-using-sed
os.system('docker run -it dnnc /bin/bash -c "cd /dnnCompiler/deepC && make clean && make"')
if __name__ == "__main__":
main()<file_sep>
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import os, sys
import numpy as np
separator = os.path.sep
from onnx import *
sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")
# from onnx_parser import *
op_name = 'LSTM'
seq_length = 3
batch_size = 3
input_size = 4
hidden_size = 3
num_directions = 1
weight_scale = 0.1
number_of_gates = 4
number_of_peepholes = 3
number_of_peepholes = 3
inputs = [helper.make_tensor_value_info('X',TensorProto.FLOAT,[seq_length, batch_size, input_size]),
helper.make_tensor_value_info('W',TensorProto.FLOAT,[num_directions, number_of_gates*hidden_size, input_size]),
helper.make_tensor_value_info('R',TensorProto.FLOAT,[num_directions, number_of_gates*hidden_size, hidden_size]),
helper.make_tensor_value_info('B',TensorProto.FLOAT,[num_directions, 2*number_of_gates*hidden_size]),
helper.make_tensor_value_info('sequence_lens',TensorProto.INT32,[batch_size]),
helper.make_tensor_value_info('initial_h',TensorProto.FLOAT,[num_directions, batch_size, hidden_size]),
helper.make_tensor_value_info('initial_c',TensorProto.FLOAT,[num_directions, batch_size, hidden_size]),
helper.make_tensor_value_info('P',TensorProto.FLOAT,[num_directions, number_of_peepholes*hidden_size])]
outputs = [helper.make_tensor_value_info('Y',TensorProto.FLOAT,[seq_length, num_directions, batch_size, hidden_size])]
nodes = []
nodes.append(helper.make_node('LSTM',inputs=['X', 'W', 'R', 'B', 'sequence_lens', 'initial_h', 'initial_c', 'P'], outputs=['', 'Y'], activations=["sigmoid","tanh","tanh"], direction="forward", hidden_size=3))
graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)
opset = (OperatorSetIdProto(version=11),)
model = helper.make_model(graph, opset_imports=opset)
onnx.checker.check_model(model)
t_prefix = ".." + separator + separator + op_name + separator + op_name
g_prefix = ".." + separator + "gold_files" + separator + op_name
onnx.save(model, t_prefix+".onnx")
# parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")
<file_sep># deepC on 
## Bring your own Model (BYOM)
1. Train your machine learnig model.
1. [Convert it to onnx](https://github.com/onnx/tutorials#converting-to-onnx-format)
## deepC Compile to C++
Download and install deepC and run ```onnx2cpp``` command
```% onnx2cpp <model>.onnx```
## Embed deepC model
Use setup and loop to include C++ file compiled by deepC
1. [setup()](https://www.arduino.cc/reference/en/language/structure/sketch/setup/)
1. [loop()](https://www.arduino.cc/reference/en/language/structure/sketch/loop/)
1. [Build on Arduino](https://github.com/arduino/Arduino/wiki/Build-Process)
Consult [Arduino Reference](https://www.arduino.cc/reference/en/) to complete your [application/sketch](https://www.arduino.cc/en/tutorial/sketch).
## Run
1. Upload the sketch: Sketch -> Upload
1. Open the Serial Monitor: Tools -> Serial Monitor

### KPNS
1. [Replace Eigen lib of deepC with Arduino port](https://github.com/bolderflight/Eigen), in case you run into Eigen errors.
### Reference
1. [Getting Started with Arduino products](https://www.arduino.cc/en/Guide/HomePage)
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include <graph/graph.h>
#include <set>
namespace dnnc {
class inferDataType {
protected:
graph &_graph;
bool propagate(opNode &computeNode, DNNC_DataType inType) {
if (computeNode.isMarked(node::VISITED))
return true;
// cycle detected.
if (computeNode.isMarked(node::VISITING))
return true;
computeNode.mark(node::VISITING);
assert(computeNode.ntype() == node::OPERATOR);
assert(computeNode.symbol() != opInvalid);
std::vector<node *> outs;
if (false == computeNode.outputNodes(_graph, outs)) {
std::cerr << "ERROR (TYPE INFER): cound not find all nodes for "
<< computeNode.name() << ",\n";
}
// infer data type and set it on the node.
// TODO: performance tuneup. do not propagate forward, if
// 1. old and new dtype are same.
// 2. inType is NOTYPE
computeNode.dtype(typePrecedence(inType, computeNode.dtype())
? inType
: computeNode.dtype());
for (auto next : outs) {
if (next->ntype() == node::OPERATOR)
propagate(*dynamic_cast<opNode *>(next), computeNode.dtype());
}
computeNode.mark(node::VISITED);
return true;
}
public:
inferDataType(graph &graph) : _graph(graph) {}
bool main() {
bool inferred = bool(_graph.nNodes());
_graph.resetNodeMarks();
for (ioNode *n : _graph.inputs()) {
std::vector<node *> nextLevelNodes;
if (n->outputNodes(_graph, nextLevelNodes))
for (node *next : nextLevelNodes) {
if (next->ntype() == node::OPERATOR) {
inferred &= propagate(*dynamic_cast<opNode *>(next), n->dtype());
}
}
}
for (node *n : _graph) {
if (n->ntype() == node::OPERATOR && n->symbol() == opConstant)
inferred &= propagate(*dynamic_cast<opNode *>(n), n->dtype());
}
return inferred;
}
}; // class inferDataType
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common; # DNNC path setup
import deepC.dnnc as dc
import numpy as np
import unittest, random, math
def temp_softsign(x):
return (x / (1 + np.abs(x)));
def temp_erf(x):
y = np.vectorize(math.erf)(x).astype(np.float32)
return y
class nnScalarOperatorsTest(unittest.TestCase):
def setUp(self):
self.random_number1 = random.randrange(20, 50, 3)
self.random_number2 = random.randrange(200, 500, 1)
self.random_number3 = random.randrange(10, 500, 2)
# self.np_a = np.array(self.random_number1).astype(np.float32)
# self.np_b = np.array(self.random_number2).astype(np.float32)
# self.dc_a = dc.array([self.random_number1])
# self.dc_b = dc.array([self.random_number2])
self.np_a = self.random_number1
self.np_b = self.random_number2
self.dc_a = self.random_number1
self.dc_b = self.random_number2
def test_nnScalar_asin (self):
np.testing.assert_allclose(np.arcsin(1), dc.asin(1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsin(0), dc.asin(0), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsin(-1), dc.asin(-1), rtol=1e-3, atol=1e-3)
def test_nnScalar_acos (self):
np.testing.assert_allclose(np.arccos(1), dc.acos(1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccos(0), dc.acos(0), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccos(-1), dc.acos(-1), rtol=1e-3, atol=1e-3)
def test_nnScalar_atan (self):
np.testing.assert_allclose(np.arctan(self.random_number1), dc.atan(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arctan(self.random_number2), dc.atan(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arctan(self.random_number3), dc.atan(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_asinh (self):
np.testing.assert_allclose(np.arcsinh(self.random_number1), dc.asinh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsinh(self.random_number2), dc.asinh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arcsinh(self.random_number3), dc.asinh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_acosh (self):
np.testing.assert_allclose(np.arccosh(self.random_number1), dc.acosh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccosh(self.random_number2), dc.acosh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.arccosh(self.random_number3), dc.acosh(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_atanh (self):
# np.testing.assert_allclose(np.arctanh(self.random_number1), dc.atanh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.arctanh(self.random_number2), dc.atanh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.arctanh(self.random_number3), dc.atanh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_sin (self):
np.testing.assert_allclose(np.sin(self.random_number1), dc.sin(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sin(self.random_number2), dc.sin(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sin(self.random_number3), dc.sin(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_cos (self):
np.testing.assert_allclose(np.cos(self.random_number1), dc.cos(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.cos(self.random_number2), dc.cos(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.cos(self.random_number3), dc.cos(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_tan (self):
np.testing.assert_allclose(np.tan(self.random_number1), dc.tan(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tan(self.random_number2), dc.tan(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tan(self.random_number3), dc.tan(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_sinh (self):
# np.testing.assert_allclose(np.sinh(self.random_number1), dc.sinh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.sinh(self.random_number2), dc.sinh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.sinh(self.random_number3), dc.sinh(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_cosh (self):
# np.testing.assert_allclose(np.cosh(self.random_number1), dc.cosh(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.cosh(self.random_number2), dc.cosh(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.cosh(self.random_number3), dc.cosh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_tanh (self):
np.testing.assert_allclose(np.tanh(self.random_number1), dc.tanh(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tanh(self.random_number2), dc.tanh(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.tanh(self.random_number3), dc.tanh(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_erf (self):
np.testing.assert_allclose(temp_erf(self.random_number1), dc.erf(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_erf(self.random_number2), dc.erf(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_erf(self.random_number3), dc.erf(self.random_number3), rtol=1e-3, atol=1e-3)
# def test_nnScalar_exp (self):
# np.testing.assert_allclose(np.exp(self.random_number1), dc.exp(self.random_number1), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.exp(self.random_number2), dc.exp(self.random_number2), rtol=1e-3, atol=1e-3)
# np.testing.assert_allclose(np.exp(self.random_number3), dc.exp(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_log (self):
np.testing.assert_allclose(np.log(self.random_number1), dc.log(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.log(self.random_number2), dc.log(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.log(self.random_number3), dc.log(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_logical_not (self):
np.testing.assert_allclose(np.logical_not(self.random_number1), dc.logical_not(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.logical_not(self.random_number2), dc.logical_not(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.logical_not(self.random_number3), dc.logical_not(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_sign (self):
np.testing.assert_allclose(np.sign(self.random_number1), dc.sign(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sign(self.random_number2), dc.sign(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(np.sign(self.random_number3), dc.sign(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_softsign (self):
np.testing.assert_allclose(temp_softsign(self.random_number1), dc.softsign(self.random_number1), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_softsign(self.random_number2), dc.softsign(self.random_number2), rtol=1e-3, atol=1e-3)
np.testing.assert_allclose(temp_softsign(self.random_number3), dc.softsign(self.random_number3), rtol=1e-3, atol=1e-3)
def test_nnScalar_max (self):
npr = np.maximum(self.np_a, self.np_b)
dcr = dc.max([self.dc_a,self.dc_b])
np.testing.assert_allclose(npr, np.array(dcr).astype(np.float32),rtol=1e-3, atol=1e-3)
def test_nnScalar_min (self):
npr = np.minimum(self.np_a, self.np_b)
dcr = dc.min([self.dc_a,self.dc_b])
np.testing.assert_allclose(npr, np.array(dcr).astype(np.float32),rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>import os, sys
DNNC_ROOT=os.path.abspath(os.path.dirname(__file__) +
os.path.sep + '..' +
os.path.sep + '..')
sys.path.append(DNNC_ROOT);
import argparse
import deepC.dnnc as dc
import numpy as np
def test_multiply(a,b):
c = dc.matmul(a, b)
#print(c)
def test_non_detailed():
t1=dc.array(2,3)
t2=dc.array(3,2)
mul = dc.matmul(t1,t2)
#print ("multiplication : " , mul.to_string())
t3 = dc.array(2,3,4)
#print("old shape", t1.shape())
new_shape = dc.vectorSizeT([2,12])
t3.reshape(new_shape)
t3.reshape(4,6)
t3.reshape((4,6))
t3.reshape([4,6])
#print("new shape", t1.shape())
py_list = list(t3); # convert tensor to python list
py_tuple = tuple(t3); # convert tensor to python tuple
np_ary = t3.numpy(); # convert to numpy array
#t4 = dc.thresholded_relu(t1);
#print("relu", t4.to_string())
#replace first few values in tensor with new values.
data = dc.vectorFloat([1.0, 2.0, 3.0, 4.0])
t3.load(data)
#print(t3.to_string())
arr = dc.array([1, 2])
#print(arr)
arr2D = dc.array([[1, 2], [10, 20]]).asTypeInt()
#print(arr2D)
arrRand = dc.random(2, 3);
#print(arrRand)
empty = dc.empty(3, 2);
#print(empty)
zeros = dc.zeros(2, 2);
#print(zeros);
ones = dc.ones(2, 2);
#print(ones)
ranges = dc.arange(15, 3, 2)
#print(ranges)
dc.reshape(arr2D, (1,4))
#3D MatMul Test1
a = dc.array(2, 2, 2)
b = dc.array(2, 2, 2)
adata = dc.vectorFloat([1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0])
bdata = dc.vectorFloat([8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0])
a.load(adata)
b.load(bdata)
test_multiply(a,b)
#3D MatMul Test2
a = dc.array(2, 2 ,3)
b = dc.array(2 ,3, 2)
adata = dc.vectorFloat([1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0])
bdata = dc.vectorFloat([12.0,11.0,10.0,9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0])
a.load(adata)
b.load(bdata)
test_multiply(a,b)
def test_detailed():
t_dc = dc.array([[0,1,2],[3,4,5],[6,7,8],[9,10,11]])
t_dc1 = dc.array([[21,22],[23,24],[26,27]])
t_dc[2]
t_dc[2,1]
int(t_dc[2,1])
t_dc[2:3,:]
t_dc[2]
t_dc[2,:]
t_dc[2:3,1:2]
t_dc[1,::2]
t_dc[1:2:1,1:2]
t_dc[1:2:1,...]
t_dc[...,1]
t_dc[...,::-2]
t_dc[1:2:-1,::-2]
t_dc[2:3,1:2] = 30
t_dc[2,1] = 1
t_dc[0:3,0:2] = t_dc1
t1 = dc.array(2,3).asTypeFloat()
t2 = dc.array(2,3).asTypeInt()
add = dc.add(t1,t1)
add = t1 + t1
#print ("addition : " , add.to_string())
def main():
parser = argparse.ArgumentParser(description="basic testing of deepC.dnnc")
parser.add_argument("-dev", "--developer", action="store_true", help="skip testing binary operators only for faster development purposes")
args = parser.parse_args()
test_non_detailed()
if not args.developer:
test_detailed()
if __name__ == "__main__":
main()
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
def Isinf(np_a,detect_positive,detect_negative):
if (detect_positive and not(detect_negative) ):
np_a[np_a<0] = 0
return np.isinf(np_a)
elif (detect_negative and not(detect_positive) ):
np_a[np_a>0] = 0
return np.isinf(np_a)
elif ( not(detect_positive) and not(detect_negative) ):
return np.zeros_like(np_a)
else:
return np.isinf(np_a)
class IsInfTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.np_a = np.random.randn(self.len)
self.np_a.ravel()[np.random.choice(self.np_a.size, 5, replace=False)] = np.inf
self.np_a.ravel()[np.random.choice(self.np_a.size, 5, replace=False)] = -np.inf
self.dc_a = dc.array(list(self.np_a))
self.detect_positive = 0
self.detect_negative = 1
def test_IsInf1D (self):
npr = Isinf(self.np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(self.dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr, np.array(dcr.data()))
def test_IsInf2D_1 (self):
np_a = np.reshape(self.np_a, (6,4))
dc_a = dc.reshape(self.dc_a, (6,4))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf2D_2 (self):
np_a = np.reshape(self.np_a, (3,8))
dc_a = dc.reshape(self.dc_a, (3,8))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf2D_3 (self):
np_a = np.reshape(self.np_a, (12,2))
dc_a = dc.reshape(self.dc_a, (12,2))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf3D_1 (self):
np_a = np.reshape(self.np_a, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf3D_2 (self):
np_a = np.reshape(self.np_a, (2,2,6))
dc_a = dc.reshape(self.dc_a, (2,2,6))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf3D_3 (self):
np_a = np.reshape(self.np_a, (4,2,3))
dc_a = dc.reshape(self.dc_a, (4,2,3))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf3D_4 (self):
np_a = np.reshape(self.np_a, (4,2,3))
dc_a = dc.reshape(self.dc_a, (4,2,3))
self.detect_positive = 1
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf3D_5 (self):
np_a = np.reshape(self.np_a, (4,2,3))
dc_a = dc.reshape(self.dc_a, (4,2,3))
self.detect_positive = 1
self.detect_negative = 0
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf4D_1 (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf4D_2 (self):
np_a = np.reshape(self.np_a, (2,2,1,6))
dc_a = dc.reshape(self.dc_a, (2,2,1,6))
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_IsInf4D_3 (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
self.detect_positive = 1
self.detect_negative = 0
npr = Isinf(np_a,self.detect_positive,self.detect_negative)
dcr = dc.isinf(dc_a,self.detect_positive,self.detect_negative)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! Generate a 2D tensor (matrix) with ones on the diagonal and zeros
everywhere else. Only 2D tensors are supported, i.e. input T1 must
be of rank 2. The shape of the output tensor is the same as the input
tensor. The data type can be specified by the 'dtype' argument. If
'dtype' is not specified, then the type of input tensor is used. By
default, the main diagonal is populated with ones, but attribute
'k' can be used to populate upper or lower diagonals*/
/*! The formula for fillung up Eye Like matrix is*/
/*! \f$ A_{(i,j)}=1\;,\;\;\;for\;\;i=j-k\;;\\A_{(i,j)}=0\;,\;\;\;for\;\;i\neq
* j-k \f$*/
template <typename T> class EyeLike : public baseOperator<T, T, T> {
protected:
int k = 0; /*!< (Optional) Index of the diagonal to be populated
with ones. Default is 0. If T2 is the output, this
op sets T2[i, i+k] = 1. k = 0 populates the main
diagonal, k > 0 populates an upper diagonal, and
k < 0 populates a lower diagonal.*/
public:
EyeLike(std::string name = "opEyeLike", int k = 0)
: baseOperator<T, T, T>(opEyeLike, name) {
this->k = k;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_k) {
obj = k;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_k) {
k = obj;
return true;
}
return false;
}
tensor<T> compute(tensor<T> &a /*!< D input tensor to copy shape, and
optionally, type information from*/) {
if (a.rank() != 2) {
SPDLOG_ERROR("tensor dimenions not appropriate for EyeLike operator.");
return NULL_TENSOR<T>;
}
int row = a.shape()[0];
int col = a.shape()[1];
tensor<T> result(a.shape(), a.name());
// DNNC_EIGEN_VECTOR_CTOR(T) eResult(row,col);
Matrix<T, Dynamic, Dynamic, RowMajor> eResult(row, col);
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
eResult(i, j) = (i == (j - k)) ? 1. : 0.;
}
}
result.load(eResult.data());
return result;
}
/*!<
\return The output tensor of the same shape as input.
*/
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
// normalize(https://en.wikipedia.org/wiki/Norm_(mathematics))
// Eigen cwise unsupported-tensors(written TODO in original doc)
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! Given a matrix, apply Lp-normalization along the provided axis.*/
/*! The formula for Lp-norm is given by:
\f$ \left \| x \right \|_{1} = \sum_{i=1}^{n}\left | x_{i} \right | \f$ */
/* \f$ \left \| x \right \|_{2} = \sum_{i=1}^{n}\sqrt{\left ( x_{i} \right
* )^{2}} \f$ */
template <typename T> class LpNormalization : public baseOperator<T, T, T> {
// LpNormalization attributes
protected:
int p = 2; /*!< p value of the Lp norm used to pool over the input data.Only
L1 norm and L2 norm are supported */
int axis = -1; /*!< axis to apply normalization.
* Since axis is int it can be 0 or 1(-1 indicates last axis
* i.e. 1). */
public:
LpNormalization(std::string name = "opLpNormalization", int p = 2,
int axis = -1)
: baseOperator<T, T, T>(opLpNormalization, name) {
this->p = p;
this->axis = axis;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_p) {
obj = p;
return true;
} else if (attrName == attr_axis) {
obj = axis;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_p) {
p = obj;
return true;
} else if (attrName == attr_axis) {
axis = obj;
return true;
}
return false;
}
tensor<T> compute(tensor<T> &a /*!<[float,double]: 2D tensor*/) {
if (!(this->template type_check<T, float, double>())) {
SPDLOG_ERROR("Constrain input and output types to float tensors.");
return NULL_TENSOR<T>;
}
if (a.rank() != 2) {
SPDLOG_ERROR("Constrain input and output types should be matrix.");
return NULL_TENSOR<T>;
}
if (p != 2 && p != 1) {
SPDLOG_ERROR("Constrain input(norm) not supported.");
return NULL_TENSOR<T>;
}
tensor<T> result(a.shape(), a.name());
DNNC_EIGEN_MATRIX(eigenMatrixA, T, a);
if (axis == 0 && p == 1) {
int i, j;
for (i = 0; i < int(a.shape()[1]); i++) {
float sum = 0;
for (j = 0; j < int(a.shape()[0]); j++) {
sum += abs(eigenMatrixA(j, i));
}
for (j = 0; j < int(a.shape()[0]); j++) {
result(j, i) = eigenMatrixA(j, i) / sum;
}
}
}
else if ((axis == 1 || axis == -1) && p == 1) {
int i, j;
for (i = 0; i < int(a.shape()[0]); i++) {
float sum = 0;
for (j = 0; j < int(a.shape()[1]); j++) {
sum += abs(eigenMatrixA(i, j));
}
for (j = 0; j < int(a.shape()[1]); j++) {
result(i, j) = eigenMatrixA(i, j) / sum;
}
}
}
else if (axis == 0 && p == 2) {
int i, j;
for (i = 0; i < int(a.shape()[1]); i++) {
float sum = 0;
for (j = 0; j < int(a.shape()[0]); j++) {
sum += (eigenMatrixA(j, i) * eigenMatrixA(j, i));
}
for (j = 0; j < int(a.shape()[0]); j++) {
result(j, i) = eigenMatrixA(j, i) / sqrt(sum);
}
}
}
// default cases
else if ((axis == 1 || axis == -1) && p == 2) {
int i, j;
for (i = 0; i < int(a.shape()[0]); i++) {
float sum = 0;
for (j = 0; j < int(a.shape()[1]); j++) {
sum += (eigenMatrixA(i, j) * eigenMatrixA(i, j));
}
for (j = 0; j < int(a.shape()[1]); j++) {
result(i, j) = eigenMatrixA(i, j) / sqrt(sum);
}
}
}
return result;
}
/*!<
\return The output matrix after normalization.
*/
};
} // namespace dnnc
<file_sep>#define EIGEN_USE_SYCL
#include <SYCL/sycl.hpp>
#include <iostream>
#include "tensor_benchmarks.h"
using Eigen::array;
using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
// Simple functions
template <typename device_selector>
cl::sycl::queue sycl_queue() {
return cl::sycl::queue(device_selector(), [=](cl::sycl::exception_list l) {
for (const auto& e : l) {
try {
std::rethrow_exception(e);
} catch (cl::sycl::exception e) {
std::cout << e.what() << std::endl;
}
}
});
}
#define BM_FuncGPU(FUNC) \
static void BM_##FUNC(int iters, int N) { \
StopBenchmarkTiming(); \
cl::sycl::queue q = sycl_queue<cl::sycl::gpu_selector>(); \
Eigen::SyclDevice device(q); \
BenchmarkSuite<Eigen::SyclDevice, float> suite(device, N); \
suite.FUNC(iters); \
} \
BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
BM_FuncGPU(broadcasting);
BM_FuncGPU(coeffWiseOp);
<file_sep># HOW TO USE THIS FILE TO BUILD python extension package
# command: python3 setup.py build_ext --inplace
#
## TODO: remove -g flag from compilation.
import os
os.environ["DISTUTILS_DEBUG"]="1"
os.environ["Py_DEBUG"]="0"
os.environ["CC"] = "clang++-8"
os.environ["CXX"] = "clang++-8"
os.environ["LINKCC"] = "clang++-8"
os.environ["LDSHARED"] = "clang++-8 -shared"
os.environ["OPT"] = "-fwrapv -O2 -Wall"
os.environ["CFLAGS"] = "-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -fstack-protector-strong -Wformat -Werror=format-security -flto "
os.environ["PY_CFLAGS"] = "-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -fstack-protector-strong -Wformat -Werror=format-security -flto "
os.environ["PY_CORE_CFLAGS"] = "-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall -fstack-protector-strong -Wformat -Werror=format-security -flto -fuse-linker-plugin -ffat-lto-objects -std=c99 -Wextra -Wno-unused-result -Wno-unused-parameter -Wno-missing-field-initializers -Wno-cast-function-type -IObjects -IInclude -IPython -I. -I../Include -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DPy_BUILD_CORE"
if ( os.environ["CC"] != "clang++-8" ):
os.environ["CFLAGS"] += "-fuse-linker-plugin -ffat-lto-objects"
from distutils.core import setup, Extension
import op_gen
op_gen.main()
# DNNC variables
DNNC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
SRC = os.path.join(DNNC_ROOT, 'src')
INCLUDES = os.path.join(DNNC_ROOT, 'include')
ISYSTEM = os.path.join(DNNC_ROOT, 'packages', 'eigen-eigen-323c052e1731')
OTHER_OBJECTS = [
os.path.join(SRC, 'core', 'obj', 'datatypes.o'),
os.path.join(SRC, 'operators', 'obj', 'opTypes.o'),
os.path.join(SRC, 'graph', 'obj', 'node.o'),
os.path.join(SRC, 'graph', 'obj', 'graph.o'),
os.path.join(SRC, 'codegen', 'obj', 'cppCodeGen.o')
]
NAME = 'deepC'
EXT = '_'+NAME
VERSION = 0.1
_DEBUG=False
_DEBUG_LEVEL = 0
long_description = ""
with open("../README.md", "r") as fh:
long_description = fh.read()
install_requires = []
install_requires.extend([
'numpy',
'onnx',
])
dnnc_module = Extension(EXT,
language='C++17',
sources=['dnnc.i', 'dnnc_api.cpp', 'dnnc_pyutils.cpp'],
define_macros=[('NDEBUG', '1')],
include_dirs =['../include'],
extra_compile_args=['-isystem' + ISYSTEM,
'-O3', '-Wall', '-std=c++17', '-fPIC', '-march=native', '-msse2' ],
extra_objects=OTHER_OBJECTS,
swig_opts=['-c++', '-Wall', '-I'+INCLUDES],
)
setup(
name=NAME,
version=VERSION,
description="deepC: Deep Neural Network Compiler",
long_description=long_description,
author='<NAME> et. al.',
author_email='<EMAIL>',
url='https://github.com/ai-techsystems/dnnCompiler',
keywords='AITS deepC machine learning',
classifiers=[
'Development Status :: 0.1 - prealpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
],
ext_packages = [NAME],
ext_modules = [dnnc_module],
py_modules = ['dnnc'],
)
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
def prelu_util(x, slope):
arr = x.copy()
l = len(slope)
for i, e in enumerate(arr):
if e < 0:
if l == 1:
arr[i] = e * slope[0]
else:
arr[i] = e * slope[i]
return arr
class PReluTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.np_a = np.random.randn(self.len).astype(np.float32)
self.dc_a = dc.array(list(self.np_a))
self.np_slope_1 = np.random.randn(1).astype(np.float32)
self.dc_slope_1 = dc.array(list(self.np_slope_1))
self.np_slope = np.random.randn(self.len).astype(np.float32)
self.dc_slope = dc.array(list(self.np_slope))
self.prelu_true = prelu_util(self.np_a, self.np_slope)
self.prelu_true_1 = prelu_util(self.np_a, self.np_slope_1)
def test_prelu_1d (self):
dcr = dc.prelu(self.dc_a, self.dc_slope)
np.testing.assert_allclose(self.prelu_true, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_1d_broadcast (self):
dcr = dc.prelu(self.dc_a,self.dc_slope_1)
np.testing.assert_allclose(self.prelu_true_1, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_2d (self):
dc_a_reshaped = dc.reshape(self.dc_a, (6, 4))
dc_slope_reshaped = dc.reshape(self.dc_slope, (6, 4))
np_test = np.reshape(self.prelu_true, (6, 4))
dc_test = dc.prelu(dc_a_reshaped, dc_slope_reshaped)
np.testing.assert_allclose(np_test.flatten(), np.array(dc_test.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_2d_broadcast (self):
dc_a_reshaped = dc.reshape(self.dc_a, (6, 4))
np_test = self.prelu_true_1.copy()
dc_test = dc.prelu(dc_a_reshaped, self.dc_slope_1)
np.testing.assert_allclose(np_test.flatten(), np.array(dc_test.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_3d (self):
dc_a_reshaped = dc.reshape(self.dc_a, (2, 4, 3))
dc_slope_reshaped = dc.reshape(self.dc_slope, (2, 4, 3))
np_test = np.reshape(self.prelu_true, (2, 4, 3))
dc_test = dc.prelu(dc_a_reshaped, dc_slope_reshaped)
np.testing.assert_allclose(np_test.flatten(), np.array(dc_test.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_3d_broadcast (self):
dc_a_reshaped = dc.reshape(self.dc_a, (2, 4, 3))
np_test = self.prelu_true_1.copy()
dc_test = dc.prelu(dc_a_reshaped, self.dc_slope_1)
np.testing.assert_allclose(np_test.flatten(), np.array(dc_test.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_4d (self):
dc_a_reshaped = dc.reshape(self.dc_a, (2, 3, 2, 2))
dc_slope_reshaped = dc.reshape(self.dc_slope, (2, 3, 2, 2))
np_test = np.reshape(self.prelu_true, (2, 3, 2, 2))
dc_test = dc.prelu(dc_a_reshaped, dc_slope_reshaped)
np.testing.assert_allclose(np_test.flatten(), np.array(dc_test.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_prelu_4d_broadcast (self):
dc_a_reshaped = dc.reshape(self.dc_a, (2, 1, 4, 3))
np_test = self.prelu_true_1.copy()
dc_test = dc.prelu(dc_a_reshaped, self.dc_slope_1)
np.testing.assert_allclose(np_test.flatten(), np.array(dc_test.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>import os, sys, fnmatch
import unittest
import deepC.compiler.read_onnx as read_onnx
import deepC.dnnc as dnnc
class unitOperatorsTest(unittest.TestCase):
def find(self, pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def test_readAllOnnxFiles(self):
debug = False;
if debug == False :
# mute stdout
sys_stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
onnx_files = self.find('*.onnx', 'parser')
for onnx_file in onnx_files:
if ( debug ):
print("testing ", onnx_file, flush=True)
cpp_file = os.path.splitext(os.path.basename(onnx_file))[0]+'.cpp'
bundle_dir = os.path.dirname(onnx_file);
parser = read_onnx.pbReader()
dc_graph = parser.main(onnx_file)
cppCode = dnnc.cppCodeGen(dc_graph, bundle_dir, cpp_file);
cppCode.write();
dc_graph.destroy();
# remove generated cpp files.
for cpp_file in self.find('*.cpp', 'parser'):
os.remove(cpp_file)
# unmute stdout
if debug == False :
sys.stdout = sys_stdout
print("read %d files." %len(onnx_files))
assert(len(onnx_files)==130)
<file_sep># Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
############################
# Description:
# DNNC AOT Compiler script
#############################
import os, sys
import deepC.dnnc as dnnc
import deepC.compiler.onnx2cpp as onnx2cpp
class compilerWrapper:
"""Compiler class for models in ONNX binary/protobuf format."""
def __init__ (self):
dnnc_path = os.path.abspath(os.path.dirname(dnnc.__file__))
self.inc_path = "-I " + os.path.join(dnnc_path, "include")
self.isys_path = "-isystem " + os.path.join(dnnc_path, "packages", "eigen-eigen-323c052e1731")
self.compiler = "g++"
self.cpp_flags = "-O3"
def cmd (self, cppFile, exeFile):
return ' '.join([self.compiler,
self.cpp_flags,
self.inc_path,
self.isys_path,
cppFile,
'-o', exeFile])
def compile(self, cppFile):
from subprocess import PIPE, run
exeFile = os.path.splitext(cppFile)[0]+".exe"
command = self.cmd(cppFile, exeFile);
print(command)
compileProcess = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
sys.stdout.write(compileProcess.stdout)
if ( compileProcess.returncode ):
sys.stderr.write(compileProcess.stderr)
sys.stderr.write("\ndnnc compilation failed. please file this bug with model/script file at\n");
sys.stderr.write(" https://github.com/ai-techsystems/dnnCompiler/issues\n");
return None;
return exeFile
# Use Model:
# _bundleDir : dirname("generated exe, i.e. a.out");
# parameter file(s) : in _bundleDir
# input file(s) : with a path relative to current dir.
# output file(s) : in current dir
def main():
onnx_file = None
if len(sys.argv) >= 2:
onnx_file = sys.argv[1]
compile_flags = None
if len(sys.argv) >= 4:
compile_flags = sys.argv[3]
sys.argv[3] = None
if ( onnx_file is None ) :
print("\nUsage: "+sys.argv[0]+ " <onnx_model_file>.onnx [bundle_dir] [compile_flags] \n")
exit(0)
(bundleDir, cppFile) = onnx2cpp.main();
onnxCC = compilerWrapper();
exe = onnxCC.compile(os.path.join(bundleDir, cppFile));
if ( exe is not None and exe ):
print("model executable ", exe);
else:
print("\nUsage: "+sys.argv[0]+ " <onnx_model_file>.onnx \n")
if __name__ == "__main__":
sys.exit(main())
<file_sep># How to Use LLVM
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti1, typename Ti2>
class LSTM : public baseOperator<To, Ti1, Ti2> {
//
// Type Constraints
//
// Ti1, To : tensor(float16), tensor(float), tensor(double)
// Constrain input and output types to float tensors.
//
// Ti2 : tensor(int32)
// Constrain seq_lens to integer tensor.
//
//
//
// LSTM attributes
//
protected:
std::vector<float> activation_alpha;
std::vector<float> activation_beta;
// Optional scaling values used by some activation functions. The values are
// consumed in the order of activation functions, for example (f, g, h) in
// DLSTM. efault values are the same as of corresponding ONNX operators.
std::vector<std::string> activations;
// A list of 3 (or 6 if bidirectional) activation functions for input, output,
// forget, cell, and hidden.
float clip;
// Cell clip threshold. Clipping bounds the elements of a tensor in the range
// of [-threshold, +threshold] and is applied to the input of activations. No
// clip if not specified.
std::string direction;
// Whether the RNN is forward, reverse, or bidirectional. Default is forward
int hidden_size;
// Number of neurons in the hidden layer
int input_forget;
// Couple input and forget gate if 1. Default is 0;
int num_directions;
// 2 - bidirectional, 1 - unidirectional
public:
LSTM(std::string name = "opLSTM", std::vector<float> activation_alpha = {},
std::vector<float> activation_beta = {},
std::vector<std::string> activations = {}, float clip = 0,
std::string direction = "forward", int hidden_size = 0,
int input_forget = 0)
: baseOperator<To, Ti1, Ti2>(opLSTM, name) {
std::stringstream errMsg;
std::vector<std::string> supported_activations = {"Relu", "Tanh",
"Sigmoid"};
std::vector<std::string> valid_directions = {"forward", "reverse",
"bidirectional"};
num_directions = (direction == "bidirectional") ? 2 : 1;
// alpha and beta for activations
if (activation_alpha.size() > 3 * num_directions) {
errMsg << "Number of activation_alpha values (" << activation_alpha.size()
<< ") is more than " << 3 * num_directions << "for ";
if (num_directions == 1) {
errMsg << "unidirectional LSTM";
} else {
errMsg << "bidiretional LSTM";
}
errMsg << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
}
this->activation_alpha = activation_alpha;
if (activation_beta.size() > 3 * num_directions) {
errMsg << "Number of activation_beta values (" << activation_beta.size()
<< ") is more than " << 3 * num_directions << "for ";
if (num_directions == 1) {
errMsg << "unidirectional LSTM";
} else {
errMsg << "bidiretional LSTM";
}
SPDLOG_ERROR(errMsg.str().c_str());
}
this->activation_beta = activation_beta;
// activation functions
for (auto activation : activations) {
if (std::find(activations.begin(), activations.end(), activation) ==
supported_activations.end()) {
errMsg << activation << " is not a supported activation funtion"
<< std::endl;
}
SPDLOG_ERROR(errMsg.str().c_str());
}
this->activations = activations;
// clip
this->clip = clip;
// direction
if (std::find(valid_directions.begin(), valid_directions.end(),
direction) == valid_directions.end()) {
errMsg << direction << " is not a valid direction" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
}
this->direction = direction;
// hidden_size?
if (hidden_size < 0) {
errMsg << hidden_size << "is not a valid value for hidden_size"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
}
this->hidden_size = hidden_size;
// input_forget?
if ((input_forget < 0) || (input_forget > 1)) {
errMsg << input_forget
<< "is not a valid value for input_forget ( must be 0 or 1)"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
}
this->input_forget = input_forget;
}
bool getAttribute(OPATTR attrName, std::vector<float> &obj) override {
if (attrName == attr_activation_alpha) {
obj = activation_alpha;
return true;
} else if (attrName == attr_activation_beta) {
obj = activation_beta;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, std::vector<std::string> &obj) override {
if (attrName == attr_activations) {
obj = activations;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_clip) {
obj = clip;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, std::string &obj) override {
if (attrName == attr_direction) {
obj = direction;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_hidden_size) {
obj = hidden_size;
return true;
} else if (attrName == attr_input_forget) {
obj = input_forget;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::vector<float> obj) override {
if (attrName == attr_activation_alpha) {
activation_alpha = obj;
return true;
} else if (attrName == attr_activation_beta) {
activation_beta = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::vector<std::string> obj) override {
if (attrName == attr_activations) {
activations = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_clip) {
clip = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::string obj) override {
if (attrName == attr_direction) {
direction = obj;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_hidden_size) {
hidden_size = obj;
return true;
} else if (attrName == attr_input_forget) {
input_forget = obj;
return true;
}
return false;
}
static Ti1 sigmoid_func(Ti1 x) { return (1 / (1 + exp(-x))); }
//
// The compute funtion returns vector with 3 optional tensor outputs
//
// tensor<T> Y [seq_length, num_directions, batch_size, hidden_size]
// A tensor that concats all the intermediate output values of the hidden.
//
// tensor<T> Y_h [num_directions, batch_size, hidden_size]
// The last output value of the hidden
//
// tensor<T> Y_c [num_directions, batch_size, hidden_size]
// The last output value of the cell.
//
std::vector<tensor<To>> compute(
//
// Mandatory Inputs
//
tensor<Ti1> &X,
// [seq_length, batch_size, input_size]
// The input sequences 3-D tensor.
tensor<Ti1> &W,
// [num_directions, 4*hidden_size, input_size]
// The weight tensor for the gates.
// Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along
// dimension 0
tensor<Ti1> &R,
// [num_directions, 4*hidden_size, hidden_size]
// The recurrence weight tensor.
// Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along
// dimension 0.
//
// Optional Inputs
//
tensor<Ti1> &B = NULL_TENSOR<Ti1>,
// [num_directions, 8*hidden_size]
// The bias tensor for input gate.
// Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]`
// (if bidirectional) along dimension 0.
tensor<Ti2> &sequence_lens = NULL_TENSOR<Ti2>,
// [batch_size]
// Lengths of the sequences in a batch.
// If unspecified - assumed all sequences in the batch to have length
// `seq_length`
tensor<Ti1> &initial_h = NULL_TENSOR<Ti1>,
// [num_directions, batch_size, hidden_size]
// Initial value of the hidden. If unspecified - assumed to be 0
tensor<Ti1> &initial_c = NULL_TENSOR<Ti1>,
// [num_directions, batch_size, hidden_size]
// Initial value of the cell. If unspecified - assumed to be 0
tensor<Ti1> &P = NULL_TENSOR<Ti1>
// [num_directions, batch_size, hidden_size]
// The weight tensor for peepholes. If unspecified - assumed to be 0
)
{
std::vector<tensor<To>> retVal;
if (X.rank() != 3 || W.rank() != 3) {
std::cout << "Dimension Error" << std::endl;
}
int batch = X.shape()[1];
int input = X.shape()[2];
for (int i = 0; i < X.shape()[0]; i++) {
tensor<Ti1> Y = X.slice(0, i, i);
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_X(
this->tensorMem(Y), Y.shape()[1], Y.shape()[2]);
if (num_directions == 2 || direction.compare("bidirectional") == 0) {
tensor<Ti1> W1 = W.slice(0, 0, 0);
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_W(
this->tensorMem(W1), W.shape()[1], W.shape()[2]);
tensor<Ti1> W2 = W.slice(0, 1, 1);
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_Wb(
this->tensorMem(W2), W.shape()[1], W.shape()[2]);
} else {
int hidden = W.shape()[1] / 4;
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_W(
this->tensorMem(W), W.shape()[1], W.shape()[2]);
DNNC_EIGEN_MATRIX_CTOR(Ti1)
mat_Ht = DNNC_EIGEN_MATRIX_CTOR(Ti1)::Zero(batch, hidden);
DNNC_EIGEN_MATRIX_CTOR(Ti1)
mat_Ct = DNNC_EIGEN_MATRIX_CTOR(Ti1)::Zero(batch, hidden);
DNNC_EIGEN_VECTOR_CTOR(Ti1)
mat_Pt = DNNC_EIGEN_VECTOR_CTOR(Ti1)::Zero(3 * hidden);
DNNC_EIGEN_MATRIX_CTOR(Ti1)
Xi = mat_X * mat_W.topRows(hidden).transpose();
DNNC_EIGEN_MATRIX_CTOR(Ti1)
Xo = mat_X * mat_W.middleRows(W.shape()[1] / 4, hidden).transpose();
DNNC_EIGEN_MATRIX_CTOR(Ti1)
Xf = mat_X * mat_W.middleRows(2 * W.shape()[1] / 4, hidden).transpose();
DNNC_EIGEN_MATRIX_CTOR(Ti1)
Xc = mat_X * mat_W.bottomRows(hidden).transpose();
if (B != NULL_TENSOR<Ti1>) {
DNNC_EIGEN_ARRAY_MAP(mat_B, Ti1, B);
Xi = Xi.rowwise() + mat_B.leftCols(hidden);
Xo = Xo.rowwise() + mat_B.middleCols(B.shape()[1] / 8, hidden);
Xf = Xf.rowwise() + mat_B.middleCols(2 * B.shape()[1] / 8, hidden);
Xc = Xc.rowwise() + mat_B.middleCols(3 * B.shape()[1] / 8, hidden);
Xi = Xi.rowwise() + mat_B.middleCols(4 * B.shape()[1] / 8, hidden);
Xo = Xo.rowwise() + mat_B.middleCols(5 * B.shape()[1] / 8, hidden);
Xf = Xf.rowwise() + mat_B.middleCols(6 * B.shape()[1] / 8, hidden);
Xc = Xc.rowwise() + mat_B.rightCols(hidden);
}
if (initial_h != NULL_TENSOR<Ti1>) {
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_H(
this->tensorMem(initial_h), initial_h.shape()[1],
initial_h.shape()[2]);
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_R(
this->tensorMem(R), R.shape()[1], R.shape()[2]);
mat_Ht = mat_H;
Xi += mat_H * mat_R.topRows(hidden).transpose();
Xo += mat_H * mat_R.middleRows(R.shape()[1] / 4, hidden).transpose();
Xf += mat_H *
mat_R.middleRows(2 * R.shape()[1] / 4, hidden).transpose();
Xc += mat_H * mat_R.bottomRows(hidden).transpose();
}
if (initial_c != NULL_TENSOR<Ti1> && P != NULL_TENSOR<Ti1>) {
Map<Matrix<Ti1, Dynamic, Dynamic, RowMajor>> mat_C(
this->tensorMem(initial_c), initial_c.shape()[1],
initial_c.shape()[2]);
DNNC_EIGEN_ARRAY_MAP(mat_P, Ti1, P);
mat_Ct = mat_C;
mat_Pt = mat_P;
}
Xi.array() +=
(mat_Ct.array().rowwise() * mat_Pt.leftCols(hidden).array());
Xf.array() +=
(mat_Ct.array().rowwise() * mat_Pt.rightCols(hidden).array());
Xi = Xi.unaryExpr(&sigmoid_func);
Xf = Xf.unaryExpr(&sigmoid_func);
Xc.array() = tanh(Xc.array());
mat_Ct.array() =
(mat_Ct.array() * Xf.array() + Xi.array() * Xc.array());
Xo.array() += (mat_Ct.array().rowwise() *
mat_Pt.middleCols(P.shape()[1] / 3, hidden).array());
Xo = Xo.unaryExpr(&sigmoid_func);
mat_Ht.array() = (Xo.array() * tanh(mat_Ct.array()));
tensor<To> ret({Y.shape()[1], W.shape()[1] / 4});
ret.load(mat_Ht.data());
retVal.push_back(ret);
}
//
// Process Attributes and Inputs
//
//
// Compute
//
// Compute Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):
// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
// Ct = ft (.) Ct-1 + it (.) ct
// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
// Ht = ot (.) h(Ct)
}
// TODO: bidirectional, sequence_len, nondefault activations
return retVal;
}
}; // class LSTM
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "core/tensor.h"
#include <float.h>
#include <iostream>
#include <stdint.h>
using namespace dnnc;
template <typename T> tensor<T> NULL_TENSOR;
//#define DNNC_TENSOR_TEST 1
#ifdef DNNC_TENSOR_TEST
template <typename T> void print_tensor(tensor<T> &v) {
std::cout << v << "\n";
}
template <typename T> void type_test() {
std::cout << "==================================\n";
size_t x1 = 2, x2 = 3, x3 = 4, x4 = 5;
tensor<T> t1({x1, x2, x3, x4});
size_t sz = x1 * x2;
for (size_t i = 0; i < sz; i++)
t1[i] = static_cast<T>(i + sz);
std::cout << "size " << t1.length() << std::endl;
const std::vector<DIMENSION> shape = t1.shape();
std::cout << "shape (";
for (size_t i = 0; i < shape.size(); i++)
std::cout << shape[i] << ", ";
std::cout << ")\n";
std::cout << std::to_string(t1[0]) << std::endl; // print first element
std::cout << std::to_string(t1(1, 2, 3, 4))
<< std::endl; // print last element
print_tensor(t1);
std::cout << "==================================\n";
}
int main() {
std::cout << "short------\n";
type_test<short>();
std::cout << "int8_t------\n";
type_test<int8_t>();
std::cout << "int16_t------\n";
type_test<int16_t>();
std::cout << "int32_t------\n";
type_test<int32_t>();
std::cout << "int64_t------\n";
type_test<int64_t>();
std::cout << "float------\n";
type_test<float>();
std::cout << "double------\n";
type_test<double>();
// need tests for float11_t, float16_t and float64_t
//
std::vector<size_t> shape = {2, 2};
tensor<float> t1(shape, "", dnnc::INIT_ONE);
auto t2 = t1.asType<int>();
std::cout << t1 << "\n";
std::cout << t2 << "\n";
return 0;
}
#endif
<file_sep># dnn Compiler Documentation
1. [Developer Getting Started Guide](DeveloperGettingStartedGuide.md)
1. [How to Contribute](contribute.md)
1. [Parser Documentation](ParserDocumentation.md)
1. [High Level Design Doc](highLevelDesign.md)
1. [How to Fix Memory Leaks](howToFindMemoryLeaks.md)
1. [How to Profile to Speed Up](howToProfile.md)
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti1, typename Ti2>
class Reshape : public baseOperator<To, Ti1, Ti2> {
protected:
int64_t shape_length(tensor<int64_t> &shape) {
int64_t new_length = 1;
for (size_t i = 0; i < shape.length(); i++)
new_length = new_length * shape[i];
return new_length;
}
public:
Reshape(std::string name = "opReshape")
: baseOperator<To, Ti1, Ti2>(opReshape, name) {}
tensor<To> compute(tensor<Ti1> input, tensor<int64_t> shape) {
// A dimension could also be 0, in which case
// the actual dimension value is unchanged,
// i.e. taken from the input tensor
for (size_t i = 0; i < shape.shape().size(); i++) {
if (shape[i] == 0 && input.rank() > i)
shape[i] = input.shape()[i];
}
// At most one dimension of the new shape can be -1.
// In this case, the value is inferred from the
// size of the tensor and the remaining dimensions.
for (size_t i = 0; i < shape.shape().size(); i++) {
if (shape[i] == -1) {
shape[i] = 1;
shape[i] = static_cast<int64_t>(input.length() / shape_length(shape));
}
}
tensor<To> newTensor = input.template asType<To>();
std::vector<size_t> shape_vec = shape.asType<size_t>();
newTensor.reshape(shape_vec);
return newTensor;
}
};
} // namespace dnnc
<file_sep>
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import os, sys
import numpy as np
separator = os.path.sep
from onnx import *
sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")
from onnx_parser import *
op_name = 'ReduceLogSum'
inputs = [helper.make_tensor_value_info("D",TensorProto.FLOAT,(1,2,3))]
outputs = [helper.make_tensor_value_info("R",TensorProto.FLOAT,(1,2))]
nodes = []
nodes.append(helper.make_node("ReduceLogSum",["D"],["R"],axes=[0,1],keepdims=0))
graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)
opset = (OperatorSetIdProto(version=11),)
model = helper.make_model(graph, opset_imports=opset)
onnx.checker.check_model(model)
t_prefix = ".." + separator + "testcases" + separator + op_name + separator + op_name
g_prefix = ".." + separator + "gold_files" + separator + op_name
onnx.save(model, t_prefix+".onnx")
parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! This does element wise binary not operation of given N D tensor.*/
template <typename To, typename Ti>
class Not : public baseOperator<To, Ti, Ti> {
// Not attributes
public:
Not(std::string name = "opNot") : baseOperator<To, Ti, Ti>(opNot, name) {}
tensor<To> compute(tensor<Ti> &a /*!< [bool]: N D tensor input*/) {
// This check is for ONNX standard
// if (!(this->template type_check<T, bool>() ))
// SPDLOG_ERROR("Constrain input tensors to bool types.");
tensor<To> result(a.shape(), a.name());
DNNC_EIGEN_ARRAY_MAP(eigenVector, Ti, a);
DNNC_EIGEN_VECTOR_CTOR(To) eResult;
eResult.array() = !eigenVector.template cast<bool>().array();
result.load(eResult.data());
return result;
}
/*!<
\return The output tensor of the same shape as input with dtype bool.
*/
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class sliceAndIndexTest(unittest.TestCase):
def setUp(self):
self.np_1D = np.arange(12).astype(np.int)
self.dc_1D = dc.arange(12).asTypeInt()
self.np_2D = np.arange(12).reshape(4,3).astype(np.int)
self.dc_2D = dc.arange(12).reshape(4,3).asTypeInt()
self.np_3D = np.arange(48).reshape(4,3,4).astype(np.int)
self.dc_3D = dc.arange(48).reshape(4,3,4).asTypeInt()
self.np_4D = np.arange(96).reshape(4,3,2,4).astype(np.int)
self.dc_4D = dc.arange(96).reshape(4,3,2,4).asTypeInt()
def test_getitem(self):
# Indexing
np.testing.assert_array_equal(self.np_1D[2], self.dc_1D[2])
np.testing.assert_array_equal(self.np_2D[2], (self.dc_2D[2]).numpy())
np.testing.assert_array_equal(self.np_3D[2,1], (self.dc_3D[2,1]).numpy())
np.testing.assert_array_equal(self.np_4D[1,2], (self.dc_4D[1,2]).numpy())
# special case as dnnc prints python datatype, but numpy prints numpy datatype
np.testing.assert_array_equal(int(self.np_2D[2,1]), int(self.dc_2D[2,1]))
# Slicing
np.testing.assert_array_equal(self.np_1D[2:3], (self.dc_1D[2:3]).numpy())
np.testing.assert_array_equal(self.np_1D[2:], (self.dc_1D[2:]).numpy())
np.testing.assert_array_equal(self.np_1D[::3], (self.dc_1D[::3]).numpy())
np.testing.assert_array_equal(self.np_2D[2:3,:], (self.dc_2D[2:3,:]).numpy())
np.testing.assert_array_equal(self.np_2D[2:,1:], (self.dc_2D[2:,1:]).numpy())
np.testing.assert_array_equal(self.np_2D[:,::3], (self.dc_2D[:,::3]).numpy())
np.testing.assert_array_equal(self.np_3D[2:3,:,1:2], (self.dc_3D[2:3,:,1:2]).numpy())
np.testing.assert_array_equal(self.np_3D[2:,1:,1:2], (self.dc_3D[2:,1:,1:2]).numpy())
np.testing.assert_array_equal(self.np_3D[:,::3,1:2], (self.dc_3D[:,::3,1:2]).numpy())
np.testing.assert_array_equal(self.np_4D[2:3,:,:1:2,::2], (self.dc_4D[2:3,:,:1:2,::2]).numpy())
np.testing.assert_array_equal(self.np_4D[2:,1:,:1:2,::2], (self.dc_4D[2:,1:,:1:2,::2]).numpy())
np.testing.assert_array_equal(self.np_4D[:,::3,:1:2], (self.dc_4D[:,::3,:1:2]).numpy())
# Slicing with Indexing
np.testing.assert_array_equal(self.np_2D[2:,1], (self.dc_2D[2:,1]).numpy())
np.testing.assert_array_equal(self.np_2D[2,::3], (self.dc_2D[2,::3]).numpy())
np.testing.assert_array_equal(self.np_3D[2:,1,::2], (self.dc_3D[2:,1,::2]).numpy())
## BUG numpy has shape with even null tensors
# np.testing.assert_array_equal(self.np_3D[2,::3,3:1], (self.dc_3D[2,::3,3:1]).numpy())
# np.testing.assert_array_equal(self.np_4D[2:,1,0,2:1:3], (self.dc_4D[2:,1,0,2:1:3]).numpy())
np.testing.assert_array_equal(self.np_4D[2,::3,0:-1:3], (self.dc_4D[2,::3,0:-1:3]).numpy())
# Ellipsis with Slicing
np.testing.assert_array_equal(self.np_2D[...,::-1], (self.dc_2D[...,::-1]).numpy())
np.testing.assert_array_equal(self.np_2D[1:,...], (self.dc_2D[1:,...]).numpy())
np.testing.assert_array_equal(self.np_3D[...,::-1], (self.dc_3D[...,::-1]).numpy())
np.testing.assert_array_equal(self.np_3D[1:,...], (self.dc_3D[1:,...]).numpy())
np.testing.assert_array_equal(self.np_4D[...,::-1], (self.dc_4D[...,::-1]).numpy())
np.testing.assert_array_equal(self.np_4D[1:,...], (self.dc_4D[1:,...]).numpy())
# Ellipsis with Indexing
np.testing.assert_array_equal(self.np_2D[...,1], (self.dc_2D[...,1]).numpy())
np.testing.assert_array_equal(self.np_2D[2,...], (self.dc_2D[2,...]).numpy())
np.testing.assert_array_equal(self.np_3D[...,1], (self.dc_3D[...,1]).numpy())
np.testing.assert_array_equal(self.np_3D[2,...], (self.dc_3D[2,...]).numpy())
np.testing.assert_array_equal(self.np_4D[...,1], (self.dc_4D[...,1]).numpy())
np.testing.assert_array_equal(self.np_4D[2,...], (self.dc_4D[2,...]).numpy())
def test_setitem(self):
# Indexing
self.np_2D[2] = 200
self.dc_2D[2] = 200
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[1,2] = 500
self.dc_2D[1,2] = 500
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_3D[2,1] = 200
self.dc_3D[2,1] = 200
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[1,2] = 500
self.dc_3D[1,2] = 500
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_4D[2,1] = 200
self.dc_4D[2,1] = 200
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[1,2] = 500
self.dc_4D[1,2] = 500
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
# Slicing
self.np_2D[2:3,:] = [500, 200, 30]
self.dc_2D[2:3,:] = [500, 200, 30]
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[2:,1:] = [50, 30]
self.dc_2D[2:,1:] = [50, 30]
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[:,::3] = 25
self.dc_2D[:,::3] = 25
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[:,::-2] = 45
self.dc_2D[:,::-2] = 45
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_3D[2:3,:,1] = 500
self.dc_3D[2:3,:,1] = 500
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[2:,1:] = 30
self.dc_3D[2:,1:] = 30
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[:,::3] = 25
self.dc_3D[:,::3] = 25
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[:,::3] = 45
self.dc_3D[:,::3] = 45
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_4D[2:3,:,1] = 500
self.dc_4D[2:3,:,1] = 500
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[2:,1:] = 30
self.dc_4D[2:,1:] = 30
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[:,::3] = 25
self.dc_4D[:,::3] = 25
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[:,0:1:-1] = 45
self.dc_4D[:,0:1:-1] = 45
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
# Slicing with Indexing
self.np_2D[2:,1] = 65
self.dc_2D[2:,1] = 65
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[2,::3] = 75
self.dc_2D[2,::3] = 75
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_3D[2:,1] = 65
self.dc_3D[2:,1] = 65
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[2,::3] = 75
self.dc_3D[2,::3] = 75
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_4D[2:,1] = 65
self.dc_4D[2:,1] = 65
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[2,::3] = 75
self.dc_4D[2,::3] = 75
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
# Ellipsis with Slicing
self.np_2D[...,::-1] = 62
self.dc_2D[...,::-1] = 62
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[1:,...] = 73
self.dc_2D[1:,...] = 73
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_3D[...,::-1] = 62
self.dc_3D[...,::-1] = 62
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[1:,...] = 73
self.dc_3D[1:,...] = 73
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_4D[...,::-1] = 62
self.dc_4D[...,::-1] = 62
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[1:,...] = 73
self.dc_4D[1:,...] = 73
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
# Ellipsis with Indexing
self.np_2D[...,1] = 63
self.dc_2D[...,1] = 63
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_2D[2,...] = 71
self.dc_2D[2,...] = 71
np.testing.assert_array_equal(self.np_2D, (self.dc_2D).numpy())
self.np_3D[...,1] = 63
self.dc_3D[...,1] = 63
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_3D[2,...] = 71
self.dc_3D[2,...] = 71
np.testing.assert_array_equal(self.np_3D, (self.dc_3D).numpy())
self.np_4D[...,1] = 63
self.dc_4D[...,1] = 63
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
self.np_4D[2,...] = 71
self.dc_4D[2,...] = 71
np.testing.assert_array_equal(self.np_4D, (self.dc_4D).numpy())
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()<file_sep># how to run this script: python setup.py bdist_wheel
# how to test install: python -m pip install ~/dnnc/master/deepC/dist/dnnc-0.1-py3-none-any.whl --root pip_install_test
import os, sys, glob
import shutil, errno, subprocess, multiprocessing
import setuptools
NAME='deepC'
VERSION=0.13
long_description = ""
with open("README.md", "r") as fh:
long_description = fh.read()
# to tag the whl file with platform
class binaryDist(setuptools.dist.Distribution):
def is_pure(self):
return False;
def has_ext_modules(self):
return True;
from setuptools.command.build_ext import build_ext as buildext
class make_build(setuptools.Command):
def initialize_options(self):
self.jobs = multiprocessing.cpu_count()
def finalize_options(self):
self.jobs = int(self.jobs)
def run(self):
cmd = "make CC=g++ SRC -j " + str(self.jobs)
subprocess.call(cmd, shell=True)
cmd = "make CC=g++ all "
subprocess.call(cmd, shell=True)
class build_ext(buildext):
def run(self):
self.run_command('make_build')
#create the links to src dir inside deepC for proper installation.
def link_dir(dir_name):
try:
os.symlink(os.path.abspath(os.path.join(os.path.dirname(__file__),
dir_name)),
os.path.join(NAME, dir_name))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
link_dir('include')
link_dir('packages')
# add source files for model compiler
def source_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join(path, filename))
return paths
cmdclass = {
'make_build': make_build,
'build_ext': build_ext,
}
ext_modules = [
setuptools.Extension(
name=str(NAME+".dnnc"),
sources=[
str(source_files('include')) +
str(source_files('packages'))
])
]
packages = setuptools.find_packages()
tests_require = []
tests_require.append('unittest')
tests_require.append('mnist')
install_requires = []
install_requires.extend([
'onnx==1.5.0',
])
setuptools.setup(
name=NAME,
version=VERSION,
description="DeepC: Deep Neural Network Compiler",
long_description_content_type="text/markdown",
long_description=long_description,
packages=packages,
ext_modules=ext_modules,
cmdclass=cmdclass,
include_package_data=True,
package_data={'':['_dnnc.so'] +
source_files('include') +
source_files('packages')
},
install_requires=install_requires,
tests_require=tests_require,
author='<NAME> et. al.',
author_email='<EMAIL>',
url='https://github.com/ai-techsystems/deepC',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: C++',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
],
python_requires='>=3.5',
distclass=binaryDist,
entry_points={
'console_scripts': [
'onnx-cpp = deepC.compiler.onnx2cpp:main',
'compile-onnx = deepC.compiler.onnx2exe:main',
]
},
dependency_links=[]
)
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include <graph/graph.h>
namespace dnnc {
/*<! cppCodeGen class works on DNNC Directed Acyclic Graph to generate c++ code
* as an intermediate step for debugging purposes. We skip the step of AST
* (Abstract Syntax Tree) generation for now. It'll be easy enough to generate
* AST, when needed in future.
* */
class cppCodeGen {
protected:
std::string _tab = " ";
std::string _prefix = "dnnc_";
graph &_graph;
std::string _bundleDir;
std::string _outFile;
std::vector<std::string> _includes;
std::string initializeData(irTypeData, std::string, std::string fname = "");
std::string writeIncludes();
std::string writeUsageFunction();
std::string writeMainFunction(std::string);
std::vector<ioNode *> modelInputs();
std::string paramFile(std::string str);
std::string cppName(std::string str);
std::string nodeName(node *n);
std::string shapeStr(std::vector<DIMENSION>);
std::string write(opNode &);
std::string write(ioNode &, size_t &);
std::string writeConstantOperator(opNode &computeNode,
std::vector<node *> &outs);
std::string writeUnaryOperator(opNode &computeNode, std::vector<node *> &ins,
std::vector<node *> &outs);
std::string writeBinaryOperator(opNode &computeNode, std::vector<node *> &ins,
std::vector<node *> &outs);
std::string writeTernaryOperator(opNode &computeNode,
std::vector<node *> &ins,
std::vector<node *> &outs);
std::string writeCustomOperator(opNode &computeNode, std::vector<node *> &ins,
std::vector<node *> &outs);
std::string write(dnnParameters);
std::string write(nodeAttribute &, std::string);
public:
cppCodeGen(graph &graph, std::string bundleDir, std::string outFile)
: _graph(graph), _bundleDir(bundleDir), _outFile(outFile) {}
bool write();
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "operators/BitwiseAnd.h"
using namespace dnnc;
using namespace Eigen;
#ifdef DNNC_BITWISEAND_TEST
#include <iostream>
int main() {
int i1[6] = {1, 2, 3, 4, 5, 6};
int i2[6] = {0, 2, 3, 4, 5, 6};
float f1[6] = {1., 2., 3., 4., 5., 6.};
float f2[6] = {0., 2., 3., 4., 5., 6.};
double d1[6] = {1., 2., 3., 4., 5., 6.};
double d2[6] = {0., 2., 3., 4., 5., 6.};
bool b1[6] = {true, false, true, true, true, false};
bool b2[6] = {false, true, true, true, false, true};
tensor<bool> tb1({2, 3});
tb1.load(b1);
tensor<bool> tb2({2, 3});
tb2.load(b2);
tensor<int> ti1({2, 3});
ti1.load(i1);
tensor<int> ti2({2, 3});
ti2.load(i2);
tensor<float> tf1({2, 3});
tf1.load(f1);
tensor<float> tf2({2, 3});
tf2.load(f2);
tensor<double> td1({2, 3});
td1.load(d1);
tensor<double> td2({2, 3});
td2.load(d2);
BitwiseAnd<bool, bool> BITWISEANDbb("localOpName");
std::cout << BITWISEANDbb.compute(tb1, tb2) << std::endl;
BitwiseAnd<int, int> BITWISEANDbi("localOpName");
std::cout << BITWISEANDbi.compute(ti1, ti2) << std::endl;
BitwiseAnd<int, float> BITWISEANDbf("localOpName");
std::cout << BITWISEANDbf.compute(tf1, tf2) << std::endl;
BitwiseAnd<int, double> BITWISEANDbd("localOpName");
std::cout << BITWISEANDbd.compute(td1, td2) << std::endl;
// BitwiseAnd<double, float> BITWISEAND3("localOpName");
// std::cout << BITWISEAND3.compute(a, b) << std::endl ;
// BitwiseAnd<bool, double> BITWISEAND3("name4");
// std::cout << BITWISEAND3.compute(a, b) << std::endl ;
return 0;
}
#endif
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "operators/MatMulInteger.h"
using namespace dnnc;
using namespace Eigen;
#ifdef DNNC_MATMULINTEGER_TEST
#include <iostream>
int main() {
int d1[6] = {1, 2, 3, 4, 5, 6};
int d2[6] = {1, 2, 3, 4, 5, 6};
int d3[1] = {0};
int d4[1] = {0};
tensor<int> a({2, 3});
a.load(d1);
tensor<int> b({3, 2});
b.load(d2);
tensor<int> c({1});
c.load(d3);
tensor<int> d({1});
d.load(d4);
MatMulInteger<int> m("localOpName");
auto result = m.compute(a, b, c, d);
std::cout << result;
std::cout << "\n";
#define DNNC_HIGHRANK_SUPPORT
int i1[8] = {1, 2, 3, 4, 5, 6, 7, 8};
int i2[8] = {1, 2, 3, 4, 5, 6, 7, 8};
int i3[1] = {0};
int i4[1] = {0};
dnnc::tensor<int> e({2, 2, 2});
e.load(i1);
dnnc::tensor<int> f({2, 2, 2});
f.load(i2);
dnnc::tensor<int> g({1});
g.load(i3);
dnnc::tensor<int> h({1});
h.load(i4);
MatMulInteger<int> m1("localint");
dnnc::tensor<int> iresult = m1.compute(e, f, g, h);
std::cout << iresult << "\n";
return 0;
}
#endif
<file_sep># Generic Steps to build Python Dist Wheel
## Compile and Test
```
% cd <dnnCompiler-repo>
% make clean
% make
% make TEST; # make sure tests are clean.
```
## Build
```
% rm -fr dist
% python setup.py bdist_wheel
```
## pip install locally
```
% cd /tmp
% python -m pip install <dnnCompiler-repo>/dist/deepC-0.1-py3-none-any.whl --root .
```
This will install
1. python package in /tmp/usr/local/lib/python3.X/dist-packages/deepC
1. binaries in /tmp/usr/local/bin/
## Test the installation.
```
% setenv PYTHONPATH /tmp/usr/local/lib/python3.6/dist-packages
% python -c 'import deepC.dnnc as dc; print(dc.arange(5))'
> [0.000000 1.000000 2.000000 3.000000 4.000000]
```
## Upload on pip
```
% cd <dnnCompiler-repo>
% python -m twine upload dist/*
```
Steps to build Python Dist Wheel on CentOS with manylinux
===================================================
```
## Build and Run Docker
env PLATFORM=`uname -m` TRAVIS_COMMIT=latest ./build.sh ; # 15 min
docker run -it --name deepC-0.13-pypi quay.io/pypa/manylinux2010_x86_64
mkdir /pypi-deepC && cd /pypi-deepC
## Install required packages
export PATH=/opt/python/cp36-cp36m/bin:${PATH}
yum install wget
pip3 install numpy wheel twine
## Install protobuf (needed by onnx) - 10 mins
yum remove protoc protobuf
git clone https://github.com/google/protobuf.git
cd protobuf
git submodule update --init --recursive
./autogen.sh
./configure --prefix=/usr
make -j8
make check -j8
make install
## Install cmake (needed by onnx) - 5 min
yum remove cmake -y
wget https://cmake.org/files/v3.6/cmake-3.6.2.tar.gz
tar -zxvf cmake-3.6.2.tar.gz
cd cmake-3.6.2
./bootstrap --prefix=/usr/local
make && make install
cd ..
# Install onnx (needed by deepC)
git clone https://github.com/onnx/onnx.git
cd onnx
git submodule update --init --recursive
python setup.py install
# Install swig (needed by deepC)
wget https://downloads.sourceforge.net/swig/swig-3.0.12.tar.gz
tar xvfz swig-3.0.12.tar.gz
cd swig-3.0.12
./configure --prefix=/usr --without-clisp --without-maximum-compile-warnings --without-pcre && make && make install
cd ..
# Prepare deepC
git clone https://github.com/ai-techsystems/deepC.git
cd deepC
# open setup.py and update 'VERSION=0.XX'
python setup.py bdist_wheel
auditwheel repair dist/deepC*whl
# Test deepC
cd /tmp
python -m pip install /pypi-deepC/deepC/dist/deepC-0.13-cp36-cp36m-linux_x86_64.whl --root .
export PYTHONPATH=/tmp/opt/python/cp36-cp36m/lib/python3.6/site-packages
python -c 'import deepC.dnnc as dc; print(dc.arange(5))'
>>> [0.000000 1.000000 2.000000 3.000000 4.000000]
# Upload deepC
cd /pypi-deepC/deepC/
python3 -m twine upload wheelhouse/deepC*whl
```
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class EyeLikeTest(unittest.TestCase):
def setUp(self):
self.len = 48
self.k = np.random.randint(low=-10, high=10)
self.np_bool_a = np.random.randn(self.len).astype(np.bool)
self.np_bool_b = np.random.randn(self.len).astype(np.bool)
self.dc_bool_a = dc.array(list(self.np_bool_a))
self.dc_bool_b = dc.array(list(self.np_bool_b))
self.np_int_a = np.random.randn(self.len).astype(np.int)
self.np_int_b = np.random.randn(self.len).astype(np.int)
self.dc_int_a = dc.array(list(self.np_int_a))
self.dc_int_b = dc.array(list(self.np_int_b))
self.np_float_a = np.random.randn(self.len).astype(np.float32)
self.np_float_b = np.random.randn(self.len).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.dc_float_b = dc.array(list(self.np_float_b))
self.np_double_a = np.random.randn(self.len).astype(np.float64)
self.np_double_b = np.random.randn(self.len).astype(np.float64)
self.dc_double_a = dc.array(list(self.np_double_a))
self.dc_double_b = dc.array(list(self.np_double_b))
# EyeLike by default takes 2D tensor only
def test_EyeLike2D_bool_1 (self):
np_a = np.reshape(self.np_bool_a, (12,4))
dc_a = dc.reshape(self.dc_bool_a, (12,4))
npr = np.eye(12, 4, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_bool_2 (self):
np_a = np.reshape(self.np_bool_a, (2,24))
dc_a = dc.reshape(self.dc_bool_a, (2,24))
npr = np.eye(2, 24, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_bool_3 (self):
np_a = np.reshape(self.np_bool_a, (6,8))
dc_a = dc.reshape(self.dc_bool_a, (6,8))
npr = np.eye(6, 8, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_int_1 (self):
np_a = np.reshape(self.np_int_a, (12,4))
dc_a = dc.reshape(self.dc_int_a, (12,4))
npr = np.eye(12, 4, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_int_2 (self):
np_a = np.reshape(self.np_int_a, (2,24))
dc_a = dc.reshape(self.dc_int_a, (2,24))
npr = np.eye(2, 24, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_int_3 (self):
np_a = np.reshape(self.np_int_a, (6,8))
dc_a = dc.reshape(self.dc_int_a, (6,8))
npr = np.eye(6, 8, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.int),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_float_1 (self):
np_a = np.reshape(self.np_float_a, (12,4))
dc_a = dc.reshape(self.dc_float_a, (12,4))
npr = np.eye(12, 4, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_float_2 (self):
np_a = np.reshape(self.np_float_a, (2,24))
dc_a = dc.reshape(self.dc_float_a, (2,24))
npr = np.eye(2, 24, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_float_3 (self):
np_a = np.reshape(self.np_float_a, (6,8))
dc_a = dc.reshape(self.dc_float_a, (6,8))
npr = np.eye(6, 8, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_double_1 (self):
np_a = np.reshape(self.np_double_a, (12,4))
dc_a = dc.reshape(self.dc_double_a, (12,4))
npr = np.eye(12, 4, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_double_2 (self):
np_a = np.reshape(self.np_double_a, (2,24))
dc_a = dc.reshape(self.dc_double_a, (2,24))
npr = np.eye(2, 24, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_EyeLike2D_double_3 (self):
np_a = np.reshape(self.np_double_a, (6,8))
dc_a = dc.reshape(self.dc_double_a, (6,8))
npr = np.eye(6, 8, k=self.k)
dcr = dc.eye_like(dc_a,self.k)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>import os, sys
import common
import unittest
import importlib
if __name__ == '__main__':
tests_failed = 0
for folder in ['swig', 'parser', 'compiler'] :
print("\nRunning tests in ===|" + folder + "|===")
loader = unittest.TestLoader()
tests = []
pkg = importlib.import_module(folder)
pkg.load_tests(loader,tests)
if ( len(tests) == 0 ):
continue;
suite = unittest.TestSuite(tests)
runner = unittest.TextTestRunner(verbosity=0)
result = runner.run(suite)
tests_failed = tests_failed + int(not result.wasSuccessful())
exit(tests_failed)
<file_sep>import os,sys
DNNC_ROOT=os.path.abspath(os.path.dirname(__file__))
sys.path.append(DNNC_ROOT)
sys.path.append(DNNC_ROOT+os.path.sep+'deepC')
sys.path.append(DNNC_ROOT+os.path.sep+'compiler')
#from swig import dnnc
#from python import read_onnx
<file_sep>// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 <NAME> <<EMAIL>>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include <main.h>
#include <iostream>
#include <GL/glew.h>
#include <Eigen/OpenGLSupport>
#include <GL/glut.h>
using namespace Eigen;
#define VERIFY_MATRIX(CODE,REF) { \
glLoadIdentity(); \
CODE; \
Matrix<float,4,4,ColMajor> m; m.setZero(); \
glGet(GL_MODELVIEW_MATRIX, m); \
if(!(REF).cast<float>().isApprox(m)) { \
std::cerr << "Expected:\n" << ((REF).cast<float>()) << "\n" << "got\n" << m << "\n\n"; \
} \
VERIFY_IS_APPROX((REF).cast<float>(), m); \
}
#define VERIFY_UNIFORM(SUFFIX,NAME,TYPE) { \
TYPE value; value.setRandom(); \
TYPE data; \
int loc = glGetUniformLocation(prg_id, #NAME); \
VERIFY((loc!=-1) && "uniform not found"); \
glUniform(loc,value); \
EIGEN_CAT(glGetUniform,SUFFIX)(prg_id,loc,data.data()); \
if(!value.isApprox(data)) { \
std::cerr << "Expected:\n" << value << "\n" << "got\n" << data << "\n\n"; \
} \
VERIFY_IS_APPROX(value, data); \
}
#define VERIFY_UNIFORMi(NAME,TYPE) { \
TYPE value = TYPE::Random().eval().cast<float>().cast<TYPE::Scalar>(); \
TYPE data; \
int loc = glGetUniformLocation(prg_id, #NAME); \
VERIFY((loc!=-1) && "uniform not found"); \
glUniform(loc,value); \
glGetUniformiv(prg_id,loc,(GLint*)data.data()); \
if(!value.isApprox(data)) { \
std::cerr << "Expected:\n" << value << "\n" << "got\n" << data << "\n\n"; \
} \
VERIFY_IS_APPROX(value, data); \
}
void printInfoLog(GLuint objectID)
{
int infologLength, charsWritten;
GLchar *infoLog;
glGetProgramiv(objectID,GL_INFO_LOG_LENGTH, &infologLength);
if(infologLength > 0)
{
infoLog = new GLchar[infologLength];
glGetProgramInfoLog(objectID, infologLength, &charsWritten, infoLog);
if (charsWritten>0)
std::cerr << "Shader info : \n" << infoLog << std::endl;
delete[] infoLog;
}
}
GLint createShader(const char* vtx, const char* frg)
{
GLint prg_id = glCreateProgram();
GLint vtx_id = glCreateShader(GL_VERTEX_SHADER);
GLint frg_id = glCreateShader(GL_FRAGMENT_SHADER);
GLint ok;
glShaderSource(vtx_id, 1, &vtx, 0);
glCompileShader(vtx_id);
glGetShaderiv(vtx_id,GL_COMPILE_STATUS,&ok);
if(!ok)
{
std::cerr << "vtx compilation failed\n";
}
glShaderSource(frg_id, 1, &frg, 0);
glCompileShader(frg_id);
glGetShaderiv(frg_id,GL_COMPILE_STATUS,&ok);
if(!ok)
{
std::cerr << "frg compilation failed\n";
}
glAttachShader(prg_id, vtx_id);
glAttachShader(prg_id, frg_id);
glLinkProgram(prg_id);
glGetProgramiv(prg_id,GL_LINK_STATUS,&ok);
if(!ok)
{
std::cerr << "linking failed\n";
}
printInfoLog(prg_id);
glUseProgram(prg_id);
return prg_id;
}
void test_openglsupport()
{
int argc = 0;
glutInit(&argc, 0);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowPosition (0,0);
glutInitWindowSize(10, 10);
if(glutCreateWindow("Eigen") <= 0)
{
std::cerr << "Error: Unable to create GLUT Window.\n";
exit(1);
}
glewExperimental = GL_TRUE;
if(glewInit() != GLEW_OK)
{
std::cerr << "Warning: Failed to initialize GLEW\n";
}
Vector3f v3f;
Matrix3f rot;
glBegin(GL_POINTS);
glVertex(v3f);
glVertex(2*v3f+v3f);
glVertex(rot*v3f);
glEnd();
// 4x4 matrices
Matrix4f mf44; mf44.setRandom();
VERIFY_MATRIX(glLoadMatrix(mf44), mf44);
VERIFY_MATRIX(glMultMatrix(mf44), mf44);
Matrix4d md44; md44.setRandom();
VERIFY_MATRIX(glLoadMatrix(md44), md44);
VERIFY_MATRIX(glMultMatrix(md44), md44);
// Quaternion
Quaterniond qd(AngleAxisd(internal::random<double>(), Vector3d::Random()));
VERIFY_MATRIX(glRotate(qd), Projective3d(qd).matrix());
Quaternionf qf(AngleAxisf(internal::random<double>(), Vector3f::Random()));
VERIFY_MATRIX(glRotate(qf), Projective3f(qf).matrix());
// 3D Transform
Transform<float,3,AffineCompact> acf3; acf3.matrix().setRandom();
VERIFY_MATRIX(glLoadMatrix(acf3), Projective3f(acf3).matrix());
VERIFY_MATRIX(glMultMatrix(acf3), Projective3f(acf3).matrix());
Transform<float,3,Affine> af3(acf3);
VERIFY_MATRIX(glLoadMatrix(af3), Projective3f(af3).matrix());
VERIFY_MATRIX(glMultMatrix(af3), Projective3f(af3).matrix());
Transform<float,3,Projective> pf3; pf3.matrix().setRandom();
VERIFY_MATRIX(glLoadMatrix(pf3), Projective3f(pf3).matrix());
VERIFY_MATRIX(glMultMatrix(pf3), Projective3f(pf3).matrix());
Transform<double,3,AffineCompact> acd3; acd3.matrix().setRandom();
VERIFY_MATRIX(glLoadMatrix(acd3), Projective3d(acd3).matrix());
VERIFY_MATRIX(glMultMatrix(acd3), Projective3d(acd3).matrix());
Transform<double,3,Affine> ad3(acd3);
VERIFY_MATRIX(glLoadMatrix(ad3), Projective3d(ad3).matrix());
VERIFY_MATRIX(glMultMatrix(ad3), Projective3d(ad3).matrix());
Transform<double,3,Projective> pd3; pd3.matrix().setRandom();
VERIFY_MATRIX(glLoadMatrix(pd3), Projective3d(pd3).matrix());
VERIFY_MATRIX(glMultMatrix(pd3), Projective3d(pd3).matrix());
// translations (2D and 3D)
{
Vector2f vf2; vf2.setRandom(); Vector3f vf23; vf23 << vf2, 0;
VERIFY_MATRIX(glTranslate(vf2), Projective3f(Translation3f(vf23)).matrix());
Vector2d vd2; vd2.setRandom(); Vector3d vd23; vd23 << vd2, 0;
VERIFY_MATRIX(glTranslate(vd2), Projective3d(Translation3d(vd23)).matrix());
Vector3f vf3; vf3.setRandom();
VERIFY_MATRIX(glTranslate(vf3), Projective3f(Translation3f(vf3)).matrix());
Vector3d vd3; vd3.setRandom();
VERIFY_MATRIX(glTranslate(vd3), Projective3d(Translation3d(vd3)).matrix());
Translation<float,3> tf3; tf3.vector().setRandom();
VERIFY_MATRIX(glTranslate(tf3), Projective3f(tf3).matrix());
Translation<double,3> td3; td3.vector().setRandom();
VERIFY_MATRIX(glTranslate(td3), Projective3d(td3).matrix());
}
// scaling (2D and 3D)
{
Vector2f vf2; vf2.setRandom(); Vector3f vf23; vf23 << vf2, 1;
VERIFY_MATRIX(glScale(vf2), Projective3f(Scaling(vf23)).matrix());
Vector2d vd2; vd2.setRandom(); Vector3d vd23; vd23 << vd2, 1;
VERIFY_MATRIX(glScale(vd2), Projective3d(Scaling(vd23)).matrix());
Vector3f vf3; vf3.setRandom();
VERIFY_MATRIX(glScale(vf3), Projective3f(Scaling(vf3)).matrix());
Vector3d vd3; vd3.setRandom();
VERIFY_MATRIX(glScale(vd3), Projective3d(Scaling(vd3)).matrix());
UniformScaling<float> usf(internal::random<float>());
VERIFY_MATRIX(glScale(usf), Projective3f(usf).matrix());
UniformScaling<double> usd(internal::random<double>());
VERIFY_MATRIX(glScale(usd), Projective3d(usd).matrix());
}
// uniform
{
const char* vtx = "void main(void) { gl_Position = gl_Vertex; }\n";
if(GLEW_VERSION_2_0)
{
#ifdef GL_VERSION_2_0
const char* frg = ""
"uniform vec2 v2f;\n"
"uniform vec3 v3f;\n"
"uniform vec4 v4f;\n"
"uniform ivec2 v2i;\n"
"uniform ivec3 v3i;\n"
"uniform ivec4 v4i;\n"
"uniform mat2 m2f;\n"
"uniform mat3 m3f;\n"
"uniform mat4 m4f;\n"
"void main(void) { gl_FragColor = vec4(v2f[0]+v3f[0]+v4f[0])+vec4(v2i[0]+v3i[0]+v4i[0])+vec4(m2f[0][0]+m3f[0][0]+m4f[0][0]); }\n";
GLint prg_id = createShader(vtx,frg);
VERIFY_UNIFORM(fv,v2f, Vector2f);
VERIFY_UNIFORM(fv,v3f, Vector3f);
VERIFY_UNIFORM(fv,v4f, Vector4f);
VERIFY_UNIFORMi(v2i, Vector2i);
VERIFY_UNIFORMi(v3i, Vector3i);
VERIFY_UNIFORMi(v4i, Vector4i);
VERIFY_UNIFORM(fv,m2f, Matrix2f);
VERIFY_UNIFORM(fv,m3f, Matrix3f);
VERIFY_UNIFORM(fv,m4f, Matrix4f);
#endif
}
else
std::cerr << "Warning: opengl 2.0 was not tested\n";
if(GLEW_VERSION_2_1)
{
#ifdef GL_VERSION_2_1
const char* frg = "#version 120\n"
"uniform mat2x3 m23f;\n"
"uniform mat3x2 m32f;\n"
"uniform mat2x4 m24f;\n"
"uniform mat4x2 m42f;\n"
"uniform mat3x4 m34f;\n"
"uniform mat4x3 m43f;\n"
"void main(void) { gl_FragColor = vec4(m23f[0][0]+m32f[0][0]+m24f[0][0]+m42f[0][0]+m34f[0][0]+m43f[0][0]); }\n";
GLint prg_id = createShader(vtx,frg);
typedef Matrix<float,2,3> Matrix23f;
typedef Matrix<float,3,2> Matrix32f;
typedef Matrix<float,2,4> Matrix24f;
typedef Matrix<float,4,2> Matrix42f;
typedef Matrix<float,3,4> Matrix34f;
typedef Matrix<float,4,3> Matrix43f;
VERIFY_UNIFORM(fv,m23f, Matrix23f);
VERIFY_UNIFORM(fv,m32f, Matrix32f);
VERIFY_UNIFORM(fv,m24f, Matrix24f);
VERIFY_UNIFORM(fv,m42f, Matrix42f);
VERIFY_UNIFORM(fv,m34f, Matrix34f);
VERIFY_UNIFORM(fv,m43f, Matrix43f);
#endif
}
else
std::cerr << "Warning: opengl 2.1 was not tested\n";
if(GLEW_VERSION_3_0)
{
#ifdef GL_VERSION_3_0
const char* frg = "#version 150\n"
"uniform uvec2 v2ui;\n"
"uniform uvec3 v3ui;\n"
"uniform uvec4 v4ui;\n"
"out vec4 data;\n"
"void main(void) { data = vec4(v2ui[0]+v3ui[0]+v4ui[0]); }\n";
GLint prg_id = createShader(vtx,frg);
typedef Matrix<unsigned int,2,1> Vector2ui;
typedef Matrix<unsigned int,3,1> Vector3ui;
typedef Matrix<unsigned int,4,1> Vector4ui;
VERIFY_UNIFORMi(v2ui, Vector2ui);
VERIFY_UNIFORMi(v3ui, Vector3ui);
VERIFY_UNIFORMi(v4ui, Vector4ui);
#endif
}
else
std::cerr << "Warning: opengl 3.0 was not tested\n";
#ifdef GLEW_ARB_gpu_shader_fp64
if(GLEW_ARB_gpu_shader_fp64)
{
#ifdef GL_ARB_gpu_shader_fp64
const char* frg = "#version 150\n"
"uniform dvec2 v2d;\n"
"uniform dvec3 v3d;\n"
"uniform dvec4 v4d;\n"
"out vec4 data;\n"
"void main(void) { data = vec4(v2d[0]+v3d[0]+v4d[0]); }\n";
GLint prg_id = createShader(vtx,frg);
VERIFY_UNIFORM(dv,v2d, Vector2d);
VERIFY_UNIFORM(dv,v3d, Vector3d);
VERIFY_UNIFORM(dv,v4d, Vector4d);
#endif
}
else
std::cerr << "Warning: GLEW_ARB_gpu_shader_fp64 was not tested\n";
#else
std::cerr << "Warning: GLEW_ARB_gpu_shader_fp64 was not tested\n";
#endif
}
}
<file_sep>
project(EigenLapack CXX)
include("../cmake/language_support.cmake")
workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
if(EIGEN_Fortran_COMPILER_WORKS)
enable_language(Fortran OPTIONAL)
if(NOT CMAKE_Fortran_COMPILER)
set(EIGEN_Fortran_COMPILER_WORKS OFF)
endif()
endif()
add_custom_target(lapack)
include_directories(../blas)
set(EigenLapack_SRCS
single.cpp double.cpp complex_single.cpp complex_double.cpp ../blas/xerbla.cpp
)
if(EIGEN_Fortran_COMPILER_WORKS)
set(EigenLapack_SRCS ${EigenLapack_SRCS}
slarft.f dlarft.f clarft.f zlarft.f
slarfb.f dlarfb.f clarfb.f zlarfb.f
slarfg.f dlarfg.f clarfg.f zlarfg.f
slarf.f dlarf.f clarf.f zlarf.f
sladiv.f dladiv.f cladiv.f zladiv.f
ilaslr.f iladlr.f ilaclr.f ilazlr.f
ilaslc.f iladlc.f ilaclc.f ilazlc.f
dlapy2.f dlapy3.f slapy2.f slapy3.f
clacgv.f zlacgv.f
slamch.f dlamch.f
second_NONE.f dsecnd_NONE.f
)
option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enbale the Lapack unit tests")
if(EIGEN_ENABLE_LAPACK_TESTS)
get_filename_component(eigen_full_path_to_reference_lapack "./reference/" ABSOLUTE)
if(NOT EXISTS ${eigen_full_path_to_reference_lapack})
# Download lapack and install sources and testing at the right place
message(STATUS "Download lapack_addons_3.4.1.tgz...")
file(DOWNLOAD "http://downloads.tuxfamily.org/eigen/lapack_addons_3.4.1.tgz"
"${CMAKE_CURRENT_SOURCE_DIR}/lapack_addons_3.4.1.tgz"
INACTIVITY_TIMEOUT 15
TIMEOUT 240
STATUS download_status
EXPECTED_MD5 ab5742640617e3221a873aba44bbdc93
SHOW_PROGRESS)
message(STATUS ${download_status})
list(GET download_status 0 download_status_num)
set(download_status_num 0)
if(download_status_num EQUAL 0)
message(STATUS "Setup lapack reference and lapack unit tests")
execute_process(COMMAND tar xzf "lapack_addons_3.4.1.tgz" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
else()
message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests wont be enabled")
set(EIGEN_ENABLE_LAPACK_TESTS false)
endif()
endif()
get_filename_component(eigen_full_path_to_reference_lapack "./reference/" ABSOLUTE)
if(EXISTS ${eigen_full_path_to_reference_lapack})
set(EigenLapack_funcfilenames
ssyev.f dsyev.f csyev.f zsyev.f
spotrf.f dpotrf.f cpotrf.f zpotrf.f
spotrs.f dpotrs.f cpotrs.f zpotrs.f
sgetrf.f dgetrf.f cgetrf.f zgetrf.f
sgetrs.f dgetrs.f cgetrs.f zgetrs.f)
FILE(GLOB ReferenceLapack_SRCS0 RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "reference/*.f")
foreach(filename1 IN LISTS ReferenceLapack_SRCS0)
string(REPLACE "reference/" "" filename ${filename1})
list(FIND EigenLapack_SRCS ${filename} id1)
list(FIND EigenLapack_funcfilenames ${filename} id2)
if((id1 EQUAL -1) AND (id2 EQUAL -1))
set(ReferenceLapack_SRCS ${ReferenceLapack_SRCS} reference/${filename})
endif()
endforeach()
endif()
endif(EIGEN_ENABLE_LAPACK_TESTS)
endif(EIGEN_Fortran_COMPILER_WORKS)
add_library(eigen_lapack_static ${EigenLapack_SRCS} ${ReferenceLapack_SRCS})
add_library(eigen_lapack SHARED ${EigenLapack_SRCS})
target_link_libraries(eigen_lapack eigen_blas)
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(eigen_lapack_static ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
target_link_libraries(eigen_lapack ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
add_dependencies(lapack eigen_lapack eigen_lapack_static)
install(TARGETS eigen_lapack eigen_lapack_static
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib)
get_filename_component(eigen_full_path_to_testing_lapack "./testing/" ABSOLUTE)
if(EXISTS ${eigen_full_path_to_testing_lapack})
# The following comes from lapack/TESTING/CMakeLists.txt
# Get Python
find_package(PythonInterp)
message(STATUS "Looking for Python found - ${PYTHONINTERP_FOUND}")
if (PYTHONINTERP_FOUND)
message(STATUS "Using Python version ${PYTHON_VERSION_STRING}")
endif()
set(LAPACK_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(LAPACK_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(BUILD_SINGLE true)
set(BUILD_DOUBLE true)
set(BUILD_COMPLEX true)
set(BUILD_COMPLEX16E true)
if(MSVC_VERSION)
# string(REPLACE "/STACK:10000000" "/STACK:900000000000000000"
# CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
string(REGEX REPLACE "(.*)/STACK:(.*) (.*)" "\\1/STACK:900000000000000000 \\3"
CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
endif()
add_subdirectory(testing/MATGEN)
add_subdirectory(testing/LIN)
add_subdirectory(testing/EIG)
macro(add_lapack_test output input target)
set(TEST_INPUT "${LAPACK_SOURCE_DIR}/testing/${input}")
set(TEST_OUTPUT "${LAPACK_BINARY_DIR}/testing/${output}")
get_target_property(TEST_LOC ${target} LOCATION)
string(REPLACE "." "_" input_name ${input})
set(testName "${target}_${input_name}")
if(EXISTS "${TEST_INPUT}")
add_test(LAPACK-${testName} "${CMAKE_COMMAND}"
-DTEST=${TEST_LOC}
-DINPUT=${TEST_INPUT}
-DOUTPUT=${TEST_OUTPUT}
-DINTDIR=${CMAKE_CFG_INTDIR}
-P "${LAPACK_SOURCE_DIR}/testing/runtest.cmake")
endif()
endmacro(add_lapack_test)
if (BUILD_SINGLE)
add_lapack_test(stest.out stest.in xlintsts)
#
# ======== SINGLE RFP LIN TESTS ========================
add_lapack_test(stest_rfp.out stest_rfp.in xlintstrfs)
#
#
# ======== SINGLE EIG TESTS ===========================
#
add_lapack_test(snep.out nep.in xeigtsts)
add_lapack_test(ssep.out sep.in xeigtsts)
add_lapack_test(ssvd.out svd.in xeigtsts)
add_lapack_test(sec.out sec.in xeigtsts)
add_lapack_test(sed.out sed.in xeigtsts)
add_lapack_test(sgg.out sgg.in xeigtsts)
add_lapack_test(sgd.out sgd.in xeigtsts)
add_lapack_test(ssb.out ssb.in xeigtsts)
add_lapack_test(ssg.out ssg.in xeigtsts)
add_lapack_test(sbal.out sbal.in xeigtsts)
add_lapack_test(sbak.out sbak.in xeigtsts)
add_lapack_test(sgbal.out sgbal.in xeigtsts)
add_lapack_test(sgbak.out sgbak.in xeigtsts)
add_lapack_test(sbb.out sbb.in xeigtsts)
add_lapack_test(sglm.out glm.in xeigtsts)
add_lapack_test(sgqr.out gqr.in xeigtsts)
add_lapack_test(sgsv.out gsv.in xeigtsts)
add_lapack_test(scsd.out csd.in xeigtsts)
add_lapack_test(slse.out lse.in xeigtsts)
endif()
if (BUILD_DOUBLE)
#
# ======== DOUBLE LIN TESTS ===========================
add_lapack_test(dtest.out dtest.in xlintstd)
#
# ======== DOUBLE RFP LIN TESTS ========================
add_lapack_test(dtest_rfp.out dtest_rfp.in xlintstrfd)
#
# ======== DOUBLE EIG TESTS ===========================
add_lapack_test(dnep.out nep.in xeigtstd)
add_lapack_test(dsep.out sep.in xeigtstd)
add_lapack_test(dsvd.out svd.in xeigtstd)
add_lapack_test(dec.out dec.in xeigtstd)
add_lapack_test(ded.out ded.in xeigtstd)
add_lapack_test(dgg.out dgg.in xeigtstd)
add_lapack_test(dgd.out dgd.in xeigtstd)
add_lapack_test(dsb.out dsb.in xeigtstd)
add_lapack_test(dsg.out dsg.in xeigtstd)
add_lapack_test(dbal.out dbal.in xeigtstd)
add_lapack_test(dbak.out dbak.in xeigtstd)
add_lapack_test(dgbal.out dgbal.in xeigtstd)
add_lapack_test(dgbak.out dgbak.in xeigtstd)
add_lapack_test(dbb.out dbb.in xeigtstd)
add_lapack_test(dglm.out glm.in xeigtstd)
add_lapack_test(dgqr.out gqr.in xeigtstd)
add_lapack_test(dgsv.out gsv.in xeigtstd)
add_lapack_test(dcsd.out csd.in xeigtstd)
add_lapack_test(dlse.out lse.in xeigtstd)
endif()
if (BUILD_COMPLEX)
add_lapack_test(ctest.out ctest.in xlintstc)
#
# ======== COMPLEX RFP LIN TESTS ========================
add_lapack_test(ctest_rfp.out ctest_rfp.in xlintstrfc)
#
# ======== COMPLEX EIG TESTS ===========================
add_lapack_test(cnep.out nep.in xeigtstc)
add_lapack_test(csep.out sep.in xeigtstc)
add_lapack_test(csvd.out svd.in xeigtstc)
add_lapack_test(cec.out cec.in xeigtstc)
add_lapack_test(ced.out ced.in xeigtstc)
add_lapack_test(cgg.out cgg.in xeigtstc)
add_lapack_test(cgd.out cgd.in xeigtstc)
add_lapack_test(csb.out csb.in xeigtstc)
add_lapack_test(csg.out csg.in xeigtstc)
add_lapack_test(cbal.out cbal.in xeigtstc)
add_lapack_test(cbak.out cbak.in xeigtstc)
add_lapack_test(cgbal.out cgbal.in xeigtstc)
add_lapack_test(cgbak.out cgbak.in xeigtstc)
add_lapack_test(cbb.out cbb.in xeigtstc)
add_lapack_test(cglm.out glm.in xeigtstc)
add_lapack_test(cgqr.out gqr.in xeigtstc)
add_lapack_test(cgsv.out gsv.in xeigtstc)
add_lapack_test(ccsd.out csd.in xeigtstc)
add_lapack_test(clse.out lse.in xeigtstc)
endif()
if (BUILD_COMPLEX16)
#
# ======== COMPLEX16 LIN TESTS ========================
add_lapack_test(ztest.out ztest.in xlintstz)
#
# ======== COMPLEX16 RFP LIN TESTS ========================
add_lapack_test(ztest_rfp.out ztest_rfp.in xlintstrfz)
#
# ======== COMPLEX16 EIG TESTS ===========================
add_lapack_test(znep.out nep.in xeigtstz)
add_lapack_test(zsep.out sep.in xeigtstz)
add_lapack_test(zsvd.out svd.in xeigtstz)
add_lapack_test(zec.out zec.in xeigtstz)
add_lapack_test(zed.out zed.in xeigtstz)
add_lapack_test(zgg.out zgg.in xeigtstz)
add_lapack_test(zgd.out zgd.in xeigtstz)
add_lapack_test(zsb.out zsb.in xeigtstz)
add_lapack_test(zsg.out zsg.in xeigtstz)
add_lapack_test(zbal.out zbal.in xeigtstz)
add_lapack_test(zbak.out zbak.in xeigtstz)
add_lapack_test(zgbal.out zgbal.in xeigtstz)
add_lapack_test(zgbak.out zgbak.in xeigtstz)
add_lapack_test(zbb.out zbb.in xeigtstz)
add_lapack_test(zglm.out glm.in xeigtstz)
add_lapack_test(zgqr.out gqr.in xeigtstz)
add_lapack_test(zgsv.out gsv.in xeigtstz)
add_lapack_test(zcsd.out csd.in xeigtstz)
add_lapack_test(zlse.out lse.in xeigtstz)
endif()
if (BUILD_SIMPLE)
if (BUILD_DOUBLE)
#
# ======== SINGLE-DOUBLE PROTO LIN TESTS ==============
add_lapack_test(dstest.out dstest.in xlintstds)
endif()
endif()
if (BUILD_COMPLEX)
if (BUILD_COMPLEX16)
#
# ======== COMPLEX-COMPLEX16 LIN TESTS ========================
add_lapack_test(zctest.out zctest.in xlintstzc)
endif()
endif()
# ==============================================================================
execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${LAPACK_SOURCE_DIR}/testing/lapack_testing.py ${LAPACK_BINARY_DIR})
add_test(
NAME LAPACK_Test_Summary
WORKING_DIRECTORY ${LAPACK_BINARY_DIR}
COMMAND ${PYTHON_EXECUTABLE} "lapack_testing.py"
)
endif()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include <string>
namespace dnnc {
enum OPCODE {
opAbs = 1,
opAcos,
opAcosh,
opAdd,
opAnd,
opArgMax,
opArgMin,
opAsin,
opAsinh,
opAtan,
opAtanh,
opAveragePool,
opBatchNormalization,
opBitShift,
opBitwiseAnd,
opBitwiseOr,
opBitwiseXor,
opCast,
opCeil,
opClip,
opCompress,
opConcat,
opConstant,
opConstantOfShape,
opConv,
opConvInteger,
opConvTranspose,
opCos,
opCosh,
opCumSum,
opDepthToSpace,
opDequantizeLinear,
opDiv,
opDropout,
opElu,
opEqual,
opErf,
opExp,
opExpand,
opEyeLike,
opFlatten,
opFloor,
opFloorDiv,
opGRU,
opGather,
opGemm,
opGlobalAveragePool,
opGlobalLpPool,
opGlobalMaxPool,
opGreater,
opGreaterEqual,
opHardSigmoid,
opHardmax,
opIdentity,
opIf,
opInstanceNormalization,
opIsInf,
opIsNaN,
opLRN,
opLSTM,
opLeakyRelu,
opLess,
opLessEqual,
opLog,
opLogSoftmax,
opLoop,
opLpNormalization,
opLpPool,
opMatMul,
opMatMulInteger,
opMax,
opMaxPool,
opMaxRoiPool,
opMaxUnpool,
opMean,
opMin,
opMod,
opMul,
opMultinomial,
opNeg,
opNonMaxSuppression,
opNonZero,
opNot,
opNotEqual,
opOneHot,
opOr,
opPRelu,
opPad,
opPow,
opQLinearConv,
opQLinearMatMul,
opQuantizeLinear,
opRNN,
opRandomNormal,
opRandomNormalLike,
opRandomUniform,
opRandomUniformLike,
opReciprocal,
opReduceL1,
opReduceL2,
opReduceLogSum,
opReduceLogSumExp,
opReduceMax,
opReduceMean,
opReduceMin,
opReduceProd,
opReduceSum,
opReduceSumSquare,
opRelu,
opRemainder,
opReshape,
opResize,
opReverseSequence,
opRoiAlign,
opRound,
opScan,
opScatter,
opSelu,
opShape,
opShrink,
opSigmoid,
opSign,
opSin,
opSinh,
opSize,
opSlice,
opSetSlice,
opSoftmax,
opSoftplus,
opSoftsign,
opSpaceToDepth,
opSplit,
opSqrt,
opSqueeze,
opStringNormalizer,
opSub,
opSum,
opTan,
opTanh,
opTfIdfVectorizer,
opThresholdedRelu,
opTile,
opTopK,
opTranspose,
opTrueDiv,
opUnsqueeze,
opUpsample,
opWhere,
opXor,
opInvalid
};
enum OPATTR {
attr_activation_alpha = 1,
attr_activation_beta,
attr_activations,
attr_alpha,
attr_auto_pad,
attr_axes,
attr_axis,
attr_batch_axis,
attr_beta,
attr_bias,
attr_blocksize,
attr_body,
attr_case_change_action,
attr_ceil_mode,
attr_center_point_box,
attr_clip,
attr_count_include_pad,
attr_detect_negative,
attr_detect_positive,
attr_dilations,
attr_direction,
attr_dtype,
attr_else_branch,
attr_epsilon,
attr_exclusive,
attr_fmod,
attr_gamma,
attr_group,
attr_hidden_size,
attr_high,
attr_input_forget,
attr_is_case_sensitive,
attr_k,
attr_keepdims,
attr_kernel_shape,
attr_lambd,
attr_larges,
attr_linear_before_reset,
attr_locale,
attr_low,
attr_max_gram_length,
attr_max_skip_count,
attr_mean,
attr_min_gram_length,
attr_mode,
attr_momentum,
attr_ngram_counts,
attr_ngram_indexes,
attr_num_scan_inputs,
attr_output_height,
attr_output_padding,
attr_output_shape,
attr_output_width,
attr_p,
attr_pads,
attr_perm,
attr_pool_int64s,
attr_pool_strings,
attr_pooled_shape,
attr_ratio,
attr_reverse,
attr_sample_size,
attr_sampling_ratio,
attr_scale,
attr_scan_input_axes,
attr_scan_input_directions,
attr_scan_output_axes,
attr_scan_output_directions,
attr_seed,
attr_shape,
attr_size,
attr_sorted,
attr_spatial_scale,
attr_split,
attr_stopwords,
attr_storage_order,
attr_strides,
attr_then_branch,
attr_time_axis,
attr_to,
attr_transA,
attr_transB,
attr_value,
attr_weights,
attr_invalid
};
OPATTR getAttrName(std::string attrStr);
std::string getAttrNameStr(OPATTR attr);
OPCODE getOpCode(std::string opCodeStr);
std::string getOpCodeStr(OPCODE opCode);
} // namespace dnnc
<file_sep># Short coding projects
1. Convert make-based build to Cmake based build
# Training and documentation projects
1. Getting started guide
1. How to contribute guide
# Tutorials
1. Speech for microcontrollers.
1. Vision for microcontrollers.
1. [Inference framework for micropython](https://micropython.org/)
# Microcontroller
1. Support
1. [MBED OS](https://www.mbed.com/en/platform/mbed-os/)
1. [Arduino IDE](https://www.arduino.cc/en/main/software)
1. [SparkFun Edge](https://www.sparkfun.com/categories/419)
1. [Other STM32F7 Series microcontollers based on ARM® Cortex®-M7 core](https://www.st.com/en/evaluation-tools/32f746gdiscovery.html)
# How to use projects
1. [Deploy Docker](https://help.github.com/en/articles/configuring-docker-for-use-with-github-package-registry)
<file_sep># Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
#
MAKEFLAGS += -j1
include ../Makefile.common
LIB=$(LIB_DIR)/libdnnc.so
PYMOD=_dnnc.so
all:$(LIB) VERIFY
# generate extern definitions and apis for swig
API_MODULES=dnnc.api
DNNC_API_CPPS=$(API_MODULES:%.api=%_api.cpp)
$(DNNC_API_CPPS): $(API_MODULES)
@echo "generating APIs from $<"
$(PYTHON) $(API_GENERATOR)
dnnc.i:tensor.i graph.i
dnnc_swig.cpp: dnnc.i
SWIG_MODULES=dnnc.i
SWIG_CPPS=$(SWIG_MODULES:%.i=%_swig.cpp)
$(SWIG_CPPS):$(SWIG_MODULES)
@echo "running swig with $<"
$(SWIG) $(SWIG_FLAGS) $(DNNC_INCLUDES) -o $@ $<
CPP_SRCS=$(DNNC_API_CPPS) $(SWIG_CPPS) dnnc_pyutils.cpp
OBJECTS=$(CPP_SRCS:%.cpp=$(OBJ_DIR)/%.o)
$(OBJ_DIR)/%.o:%.cpp
test -d $(OBJ_DIR) || $(MKDIR_P) $(OBJ_DIR)
@echo "compiling $<"
$(CC) $(CPP_FLAGS) $(PY_INCLUDES) $(DNNC_INCLUDES) $(EIGEN_INCLUDES) -c $< -o $@
OTHER_OBJECTS = $(CORE_OBJ)/datatypes.o \
$(OPER_OBJ)/opTypes.o \
$(GRPH_OBJ)/node.o \
$(GRPH_OBJ)/graph.o \
$(CODE_OBJ)/cppCodeGen.o
$(OTHER_OBJECTS):
make -C ../src
$(LIB):$(OTHER_OBJECTS) $(OBJECTS)
test -d $(LIB_DIR) || $(MKDIR_P) $(LIB_DIR)
$(CC) $(LD_FLAGS) $(OTHER_OBJECTS) $(OBJECTS) -o $@
$(LN_S) -f $@ $(PYMOD)
VERIFY:
$(PYTHON) $(BASIC_TEST)
clean:
$(RM_F) $(OBJ_DIR) $(LIB_DIR) dnnc_swig.cpp dnnc_swig_externs.h dnnc.py dnnc.pyc $(DNNC_API_CPPS) $(PYMOD) __pycache__
.DEFAULT: all
.PHONY: print_vars
print_vars:
@echo "DNNC_API : " $(DNNC_API_CPPS)
@echo "SWIG_CPPS: " $(SWIG_CPPS)
@echo "CPP_SRCS : " $(CPP_SRCS)
@echo "OBJECTS : " $(OBJECTS)
<file_sep>
set(BLAS_FOUND TRUE)
set(LAPACK_FOUND TRUE)
set(BLAS_LIBRARIES eigen_blas_static)
set(LAPACK_LIBRARIES eigen_lapack_static)
set(SPARSE_LIBS "")
# find_library(PARDISO_LIBRARIES pardiso412-GNU450-X86-64)
# if(PARDISO_LIBRARIES)
# add_definitions("-DEIGEN_PARDISO_SUPPORT")
# set(SPARSE_LIBS ${SPARSE_LIBS} ${PARDISO_LIBRARIES})
# endif(PARDISO_LIBRARIES)
find_package(Cholmod)
if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND)
add_definitions("-DEIGEN_CHOLMOD_SUPPORT")
include_directories(${CHOLMOD_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
set(CHOLMOD_ALL_LIBS ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES})
endif()
find_package(Umfpack)
if(UMFPACK_FOUND AND BLAS_FOUND)
add_definitions("-DEIGEN_UMFPACK_SUPPORT")
include_directories(${UMFPACK_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES})
endif()
find_package(SuperLU 4.0)
if(SUPERLU_FOUND AND BLAS_FOUND)
add_definitions("-DEIGEN_SUPERLU_SUPPORT")
include_directories(${SUPERLU_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES})
endif()
find_package(PASTIX QUIET COMPONENTS METIS SCOTCH)
# check that the PASTIX found is a version without MPI
find_path(PASTIX_pastix_nompi.h_INCLUDE_DIRS
NAMES pastix_nompi.h
HINTS ${PASTIX_INCLUDE_DIRS}
)
if (NOT PASTIX_pastix_nompi.h_INCLUDE_DIRS)
message(STATUS "A version of Pastix has been found but pastix_nompi.h does not exist in the include directory."
" Because Eigen tests require a version without MPI, we disable the Pastix backend.")
endif()
if(PASTIX_FOUND AND PASTIX_pastix_nompi.h_INCLUDE_DIRS AND BLAS_FOUND)
add_definitions("-DEIGEN_PASTIX_SUPPORT")
include_directories(${PASTIX_INCLUDE_DIRS_DEP})
if(SCOTCH_FOUND)
include_directories(${SCOTCH_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES})
elseif(METIS_FOUND)
include_directories(${METIS_INCLUDE_DIRS})
set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES})
endif(SCOTCH_FOUND)
set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES_DEP} ${ORDERING_LIBRARIES})
set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES_DEP})
endif()
if(METIS_FOUND)
include_directories(${METIS_INCLUDE_DIRS})
set (SPARSE_LIBS ${SPARSE_LIBS} ${METIS_LIBRARIES})
add_definitions("-DEIGEN_METIS_SUPPORT")
endif(METIS_FOUND)
find_library(RT_LIBRARY rt)
if(RT_LIBRARY)
set(SPARSE_LIBS ${SPARSE_LIBS} ${RT_LIBRARY})
endif(RT_LIBRARY)
add_executable(spbenchsolver spbenchsolver.cpp)
target_link_libraries (spbenchsolver ${SPARSE_LIBS})
add_executable(spsolver sp_solver.cpp)
target_link_libraries (spsolver ${SPARSE_LIBS})
add_executable(test_sparseLU test_sparseLU.cpp)
target_link_libraries (test_sparseLU ${SPARSE_LIBS})
<file_sep>
class utils:
def __init__():
return
def assert_less(small, big):
assert small.shape() == big.shape()
small = small.data()
big = big.data()
for (a,d) in zip(small, big):
assert (a<d), "ASSERT failed on assert_less"
return True;
def assert_equal(actual, desired):
assert actual.shape() == desired.shape()
actual = actual.data()
desired = desired.data()
for (a,d) in zip(actual, desired):
assert (a==d), "ASSERT failed on assert_equal"
return True;
def assert_allclose(actual, desired, rtol=1e-07, atol=0):
assert actual.shape() == desired.shape()
actual = actual.data()
desired = desired.data()
# actual = atol + rtol * abs(desired)
for (a,d) in zip(actual, desired):
assert (abs(a-d) <= atol + rtol * abs(d)), "ASSERT failed on assert_allclose"
return True;
<file_sep># Guide to find memory leaks
## Using Valgrind
```
% cd dnnCompiler
% make clean
% make DEBUG=Y; # debug symbols.
% valgrind --tool=memcheck --leak-check=yes --show-reachable=yes -\
-num-callers=20 --track-fds=yes <dnnc-exe> |& tee valrind.log
```
## gcc option lmcheck
You can ask malloc to check the consistency of dynamic memory by using the mcheck function. This function is a GNU extension,
declared in mcheck.h.
```
% cd dnnCompiler
% make clean
% make LMCHECK=Y; % instrumentation
% cd <test-dir>
% <dnnc-exe> test.py
```
## glibc env variable "MALLOC_CHECK_"
Another possibility to check for and guard against bugs in the use of malloc, realloc and free is to set the environment
variable MALLOC_CHECK_.
```
% cd dnnCompiler
% make clean
% make DEBUG=Y; % instrumentation
% cd <test-dir>
% export MALLOC_CHECK_ 3
% <dnnc-exe> test.py
```
Read More at [Heap Consistency Checking](https://www.gnu.org/software/libc/manual/html_node/Heap-Consistency-Checking.html)
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class GreaterTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.np_a = np.random.randint(24, size=self.len)
self.np_b = np.random.randint(24, size=self.len)
self.dc_a = dc.array(list(self.np_a))
self.dc_b = dc.array(list(self.np_b))
def test_Greater1D (self):
npr = np.greater(self.np_a, self.np_b)
dcr = dc.greater(self.dc_a, self.dc_b)
np.testing.assert_array_equal(npr, np.array(dcr.data()))
def test_Greater2D_1 (self):
np_a = np.reshape(self.np_a, (6,4))
np_b = np.reshape(self.np_b, (6,4))
dc_a = dc.reshape(self.dc_a, (6,4))
dc_b = dc.reshape(self.dc_b, (6,4))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater2D_2 (self):
np_a = np.reshape(self.np_a, (3,8))
np_b = np.reshape(self.np_b, (3,8))
dc_a = dc.reshape(self.dc_a, (3,8))
dc_b = dc.reshape(self.dc_b, (3,8))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater2D_3 (self):
np_a = np.reshape(self.np_a, (12,2))
np_b = np.reshape(self.np_b, (12,2))
dc_a = dc.reshape(self.dc_a, (12,2))
dc_b = dc.reshape(self.dc_b, (12,2))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater3D_1 (self):
np_a = np.reshape(self.np_a, (2,4,3))
np_b = np.reshape(self.np_b, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3))
dc_b = dc.reshape(self.dc_b, (2,4,3))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater3D_2 (self):
np_a = np.reshape(self.np_a, (2,2,6))
np_b = np.reshape(self.np_b, (2,2,6))
dc_a = dc.reshape(self.dc_a, (2,2,6))
dc_b = dc.reshape(self.dc_b, (2,2,6))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater3D_3 (self):
np_a = np.reshape(self.np_a, (4,1,6))
np_b = np.reshape(self.np_b, (4,1,6))
dc_a = dc.reshape(self.dc_a, (4,1,6))
dc_b = dc.reshape(self.dc_b, (4,1,6))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater4D_1 (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
np_b = np.reshape(self.np_b, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
dc_b = dc.reshape(self.dc_b, (2,2,2,3))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def test_Greater4D_2 (self):
np_a = np.reshape(self.np_a, (1,4,2,3))
np_b = np.reshape(self.np_b, (1,4,2,3))
dc_a = dc.reshape(self.dc_a, (1,4,2,3))
dc_b = dc.reshape(self.dc_b, (1,4,2,3))
npr = np.greater(np_a, np_b)
dcr = dc.greater(dc_a, dc_b)
np.testing.assert_array_equal(npr.flatten(), np.array(dcr.data()))
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Help for writing Python Tests
## Third party software used
1. [Python unit testing framework](https://docs.python.org/2/library/unittest.html) aka PyUnit
1. [Numpy Testing assertions](https://docs.scipy.org/doc/numpy/reference/routines.testing.html)
1. **[Skip examples and jump straight to the Instructions](#instruction-for-your-implementations)** (Go through the examples at least once)
## DNNC Python APIs
DNNC framework is easy to get started since most of APIs closely resemble numpy.
For the sake of simplicity, team has chosen to go with *Row Major* form of tensors for the beta releases.
DNNC supports upto 4D tensors in the beta release
### DNNC Tensor APIs
```python
>>> import dnnc as dc
>>> a=dc.arange(5) ; # create a vector of 5 elements
>>> print(a)
[0.000000 1.000000 2.000000 3.000000 4.000000]
>>> a[1] # print second element of the array.
1.0
>>> a[1] = 100.1 # assign second element a new value 100.1
>>> print(a) # print and check new value.
[0.000000 100.099998 2.000000 3.000000 4.000000]
>>> a=dc.array([[10,11,12],[20,21,22]]) # create 2D arrary from python list
>>> a.shape() # check shape.
(2, 3)
>>> print(a) # print 2D arrary.
[[10.000000 11.000000 12.000000]
[12.000000 20.000000 21.000000]]
>>> a.reshape(dc.ivec([3,2])) # reshape 2x3 matrix to 3x2 matrix
>>> a.shape()
(3, 2)
>>> print(a)
[[10.000000 11.000000]
[20.000000 21.000000]
[0.000000 0.000000]]
```
**Other tensor functions**
```python
>>> a.<tab><tab>
a.broadcast( a.empty( a.name( a.this
a.data( a.flatten( a.rank( a.to_proto(
a.dtype( a.length( a.reshape( a.to_string(
a.eigen_to_numpy( a.load( a.shape( a.transpose(
```
### DNNC APIs
**Matrix Multiplication Example**
```python
>>> a=dc.array([[10,11,12],[20,21,22]]) # create 'a' 2x3 matrix
>>> b=dc.array([[10,11,12],[20,21,22]]) # create 'b' 2x3 matrix
>>> dc.reshape(b,(3,2)) # reshape matrix 'b' to 3x2
>>> b.shape()
(3, 2)
>>> y=dc.matmul(a,b) # multiply 'a' and 'b'.
>>> print(y)
[[484.000000 594.000000]
[914.000000 1124.000000]]
```
**Other DNNC APIs**
```python
>>> dc.<tab><tab>
dc.add( dc.matmul(
dc.arange( dc.ones(
dc.array( dc.random(
dc.dTensor( dc.reshape(
dc.empty( dc.zeros(
dc.ivec dc.fvec
dc.iTensor( dc.fTensor(
dc.thresholded_relu(
```
## Writing a unit test for Operator MatMul
```python
import dnnc as dc
import numpy as np
import unittest
class MatMulTest(unittest.TestCase):
def setUp(self):
self.len = 12
self.np_a = np.random.randn(self.len).astype(np.float32)
self.np_b = np.random.randn(self.len).astype(np.float32)
self.dc_a = dc.array(list(self.np_a));
self.dc_b = dc.array(list(self.np_b));
def test_MatMul_1D (self):
npr = np.matmul(self.np_a, self.np_b)
dcr = dc.matmul(self.dc_a, self.dc_b)
np.testing.assert_allclose(npr, np.array(dcr.data()[0]).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_MatMul_2D (self):
np_a = np.reshape(self.np_a, (3,4))
np_b = np.reshape(self.np_b, (4,3))
dc_a = dc.reshape(self.dc_a, (3,4));
dc_b = dc.reshape(self.dc_b, (4,3));
npr = np.matmul(np_a, np_b);
dcr = dc.matmul(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_MatMul_3D (self):
np_a = np.reshape(self.np_a, (2,2,3))
np_b = np.reshape(self.np_b, (2,3,2))
dc_a = dc.reshape(self.dc_a, (2,2,3));
dc_b = dc.reshape(self.dc_b, (2,3,2));
npr = np.matmul(np_a, np_b);
dcr = dc.matmul(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
unittest.main()
```
---
# Instruction for your implementations:
- I have moved your work on the python interface i.e. **dnnc.i** and **dnnc_api.cpp** to a **[temporary folder](https://github.com/ai-techsystems/dnnc-operators/tree/master/temp/swig)**
- **Because not all the operators work with the interface, so be sure to only push the repository when your** `make` **command doesn't fail. This is very important, as this could take your fellow developers' valuable time trying to fix the bug, rather than implementing test cases.**
- So only push when you add the header files in the interface, and that works with python interface.
- So below is the cycle you have to go through, in the next days.
- Add one of your operator in the **dnnc.i** and **dnnc_api.cpp**.
- Go to the **dnnc-operator/swig** folder, and run `make` command to compile the newly added operators in the interface.
- If everything works fine, then you successfully added your operator in the **dnnc python interface**. And you can check it by importing **dnnc** in python bash.
- Now go to the **dnnc-operator/test/swig** folder, here you will see the **MatMul.py** and **Add.py** examples. Add your operators like the above examples.
- After adding your code, run the code from the same directory like this.
```console
python MatMul.py
```
or you can also use
```console
python -m unittest MatMul.MatMulTest
```
- If you want to see **Verbose output** of your execution, add `-v` at the end of your command. Like
```console
python MatMul.py -v
```
- It will show which test cases work, and which doesn't and shows the reason too.
- To run all of the unittests at once, use this.
```console
python -m unittest *py -v
```
- Then update the **[README](https://github.com/ai-techsystems/dnnc-operators/blob/master/README.md)** to show which test case is not working, or if all are working.
- Now look at the code, here are some things, if you need to know about the lines of the implementation.
---
#### Test case for Add operator is given below
```python
import os,sys
# adding "dnnc-operators/swig" folder to the import directory, we will import "dnnc.py" from here
sys.path.append(os.path.abspath('..'+os.path.sep+'..'+os.path.sep+'swig'));
import dnnc as dc # import dnnc
import numpy as np # import numpy to cross check against dnnc
import unittest # to test our testcases
class AddTest(unittest.TestCase):
# This is like __init__ method of class, but for unittest, it just
# declares the variables everytime before testcases are tested.
def setUp(self):
# declare the total size of the tensor
self.len = 24
# use numpy random to generate random numbers
self.np_a = np.random.randn(self.len).astype(np.float32)
self.np_b = np.random.randn(self.len).astype(np.float32)
#self.np_a = np.arange(self.len).astype(np.float32)
#self.np_b = np.arange(self.len).astype(np.float32)
# set the same numbers as input to dnnc, so cross checking happens on same input
self.dc_a = dc.array(list(self.np_a));
self.dc_b = dc.array(list(self.np_b));
# First test case for !D
# remember, for unittest module to know which method should be tested, you have to give
# test_as prefix to the method
def test_Add1D (self):
# npr is the tensor which stores the result of numpy add of those two input tensors
npr = np.add(self.np_a, self.np_b)
# dcr is the tensor which stores the result of dnnc add of those same input tensors
dcr = dc.add(self.dc_a, self.dc_b)
# assert_allclose checks the two tensor values, with tolerance of
# atol (Absolute tolerance), rtol (Relative tolerance)
np.testing.assert_allclose(npr, np.array(dcr.data()[0]).astype(np.float32),
rtol=1e-3, atol=1e-3)
# Second test case for 2D
def test_Add2D (self):
#for 2d we have to reshape each of np_a, np_b, dc_a, dc_b
np_a = np.reshape(self.np_a, (6,4))
np_b = np.reshape(self.np_b, (6,4))
dc_a = dc.reshape(self.dc_a, (6,4));
dc_b = dc.reshape(self.dc_b, (6,4));
npr = np.add(np_a, np_b);
dcr = dc.add(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Add3D (self):
np_a = np.reshape(self.np_a, (2,4,3))
np_b = np.reshape(self.np_b, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3));
dc_b = dc.reshape(self.dc_b, (2,4,3));
npr = np.add(np_a, np_b);
dcr = dc.add(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Add4D (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
np_b = np.reshape(self.np_b, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3));
dc_b = dc.reshape(self.dc_b, (2,2,2,3));
npr = np.add(np_a, np_b);
dcr = dc.add(dc_a, dc_b);
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
# when the program gets called
if __name__ == '__main__':
unittest.main()
```
---
##### This should give you the idea of how the code is working.<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/broadcast.h"
#include "operators/macros.h"
#include "operators/opTypes.h"
#include <memory>
#include <type_traits>
#include <vector>
// we're forced to include tensor.h here, because of limitation on
// template instantiations to generate complete definition of the
// operator. This breaks principle of modularity along with my heart. :-/
//
// ONNX operator reference:
// https://github.com/onnx/onnx/blob/rel-1.5.0/docs/Operators.md
//
#include "core/tensor.h"
namespace dnnc {
template <typename To, typename Ti1, typename Ti2> class baseOperator {
protected:
OPCODE _op;
std::string _name;
template <typename T> T *tensorMem(tensor<T> &t) { return t._mem_layout; }
public:
baseOperator(OPCODE op, std::string name = "") : _op(op), _name(name) {}
virtual ~baseOperator() {}
/*!< return name of the operator */
virtual inline std::string name() { return _name; }
/*!< return OPCODE of the operator */
virtual inline OPCODE symbol() { return _op; }
// SWIG does not understand, throws error and stops.
// Warning 325: Nested class not currently supported (is_one_of ignored)
#ifndef SWIG
/*!< Constrain data types.*/
template <typename...> struct is_one_of {
static constexpr bool value = false;
};
template <typename F, typename S, typename... T>
struct is_one_of<F, S, T...> {
static constexpr bool value =
std::is_same<F, S>::value || is_one_of<F, T...>::value;
};
/*!<
\return True if T is one of the types specified else False
*/
template <typename Kind, typename... Kinds> bool type_check() {
return is_one_of<Kind, Kinds...>::value;
}
#endif
virtual bool getAttribute(OPATTR, float &) { return false; }
virtual bool getAttribute(OPATTR, int &) { return false; }
virtual bool getAttribute(OPATTR, std::vector<float> &obj) { return false; }
virtual bool getAttribute(OPATTR, std::vector<std::string> &obj) {
return false;
}
virtual bool getAttribute(OPATTR, std::string &) { return false; }
virtual bool getAttribute(OPATTR, std::vector<int> &) { return false; }
virtual bool getAttribute(OPATTR, tensor<int64_t> &obj) { return false; }
virtual bool getAttribute(OPATTR, tensor<double> &obj) { return false; }
virtual bool setAttribute(OPATTR, float) { return false; }
virtual bool setAttribute(OPATTR, int) { return false; }
virtual bool setAttribute(OPATTR, std::vector<std::string> obj) {
return false;
}
virtual bool setAttribute(OPATTR, std::vector<float> obj) { return false; }
virtual bool setAttribute(OPATTR, std::string) { return false; }
virtual bool setAttribute(OPATTR, std::vector<int>) { return false; }
virtual bool setAttribute(OPATTR, tensor<int64_t>) { return false; }
virtual bool setAttribute(OPATTR, tensor<double>) { return false; }
tensor<To> NOT_SUPPORTED() {
SPDLOG_ERROR("operator not supported.");
return NULL_TENSOR<To>;
}
virtual tensor<To> compute(tensor<Ti1> in1) { return NOT_SUPPORTED(); }
virtual tensor<To> compute(tensor<Ti1> &in1) { return NOT_SUPPORTED(); }
virtual tensor<To> compute(tensor<Ti1> in1, tensor<Ti2> in2) {
return NOT_SUPPORTED();
}
virtual tensor<To> compute(tensor<Ti1> in1, tensor<Ti2> in2,
tensor<Ti2> in3) {
return NOT_SUPPORTED();
}
};
template <typename To, typename Ti1, typename Ti2> struct opCmp {
bool operator()(const baseOperator<To, Ti1, Ti2> &lhs,
const baseOperator<To, Ti1, Ti2> &rhs) {
return lhs.symbol() == rhs.symbol() ? lhs.name() < rhs.name()
: lhs.symbol() < rhs.symbol();
}
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#if defined(ARDUINO)
#include "Eigen.h"
#else
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#endif
using namespace Eigen;
// These macros are used to create a EigenMap object on
// the top of memory allocated in class tensor.
// They help in keeping the memory footprint small.
// check out Eigen documentation
// "Interfacing with raw buffers: the Map class"
// https://eigen.tuxfamily.org/dox/group__TutorialMapClass.html
// NOTE: These macros are valid only on classes inherited
// from baseOperator.
#define DNNC_EIGEN_ARRAY_MAP(var, T, t) \
Map<Matrix<T, 1, Dynamic, RowMajor>> var(this->tensorMem(t), t.length());
#define DNNC_EIGEN_VECTOR_CTOR(T) Matrix<T, 1, Dynamic, RowMajor>
#define DNNC_EIGEN_MATRIX_CTOR(T) Matrix<T, Dynamic, Dynamic, RowMajor>
#define DNNC_EIGEN_VECTOR(var, T, t) \
Map<Matrix<T, 1, Dynamic, RowMajor>> var(this->tensorMem(t), t.shape()[0]);
#define DNNC_EIGEN_MATRIX(var, T, t) \
Map<Matrix<T, Dynamic, Dynamic, RowMajor>> var(this->tensorMem(t), \
t.shape()[0], t.shape()[1]);
#define DNNC_EIGEN_TENSOR(T) Tensor<T, 3, RowMajor>
#define DNNC_EIGEN_TENSOR_MAP(var, T, t) \
TensorMap<DNNC_EIGEN_TENSOR(T)> var(this->tensorMem(t), t.shape()[0], \
t.shape()[1], t.shape()[2]);
#define DNNC_EIGEN_TENSOR4D(T) Tensor<T, 4, RowMajor>
#define DNNC_EIGEN_TENSOR4D_MAP(var, T, t) \
TensorMap<DNNC_EIGEN_TENSOR4D(T)> var(this->tensorMem(t), t.shape()[0], \
t.shape()[1], t.shape()[2], \
t.shape()[3]);
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti>
class ArgMax : public baseOperator<To, Ti, Ti> {
int _axis = 0;
int _keepdims = 1;
void updateMax(To index, Ti value, To &maxIndex, Ti &maxValue) {
if (value > maxValue) {
maxValue = value;
maxIndex = index;
}
}
public:
ArgMax(std::string name = "opArgMax")
: baseOperator<To, Ti, Ti>(opArgMax, name) {}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_axis) {
obj = _axis;
return true;
} else if (attrName == attr_keepdims) {
obj = _keepdims;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_axis) {
_axis = obj;
return true;
} else if (attrName == attr_keepdims) {
_keepdims = obj;
return true;
}
return false;
}
tensor<To> compute(tensor<Ti> input) override {
if (!(this->template type_check<To, short int, int, long int>())) {
SPDLOG_ERROR("Constrain output tensor type to int type.");
return NULL_TENSOR<To>;
}
int rank = input.rank();
if (_axis < -rank || _axis > rank - 1) {
SPDLOG_ERROR("axis " + std::to_string(_axis) +
" is out of bounds for tensor.");
return NULL_TENSOR<To>;
}
size_t axis = _axis + (_axis < 0 ? rank : 0); // ascertain positive number.
std::vector<DIMENSION> axes = input.shape();
size_t axis0 = rank > 0 ? axes[0] : 0;
size_t axis1 = rank > 1 ? axes[1] : 0;
size_t axis2 = rank > 2 ? axes[2] : 0;
size_t axis3 = rank > 3 ? axes[3] : 0;
size_t axis4 = rank > 4 ? axes[4] : 0;
std::vector<DIMENSION> new_shape;
if (_keepdims) {
new_shape = axes;
} else {
if (input.rank() == 1) {
new_shape.push_back(1);
} else {
for (size_t x = 0; x < axes.size(); x++) {
if (x != axis) {
new_shape.push_back(axes[x]);
}
}
}
}
tensor<To> result(new_shape);
if (axis == 0) {
for (size_t j = 0; j == 0 || j < axis1; j++) {
for (size_t k = 0; k == 0 || k < axis2; k++) {
for (size_t l = 0; l == 0 || l < axis3; l++) {
for (size_t m = 0; m == 0 || m < axis4; m++) {
Ti maxValue;
To maxIndex;
for (size_t i = 0; i == 0 || i < axis0; i++) {
if (i == 0) {
maxValue = input(i, j, k, l, m);
maxIndex = 0;
} else {
Ti value =
input(i, j, k, l,
m); // TODO: use input.operator[] for performance
updateMax(i, value, maxIndex, maxValue);
}
}
if (_keepdims) {
for (size_t i = 0; i < axis0; i++)
result.load(maxIndex, i, j, k, l, m);
} else {
result.load(maxIndex, j, k, l, m);
}
}
}
}
}
return result;
} else if (axis == 1) {
for (size_t i = 0; i == 0 || i < axis0; i++) {
for (size_t k = 0; k == 0 || k < axis2; k++) {
for (size_t l = 0; l == 0 || l < axis3; l++) {
for (size_t m = 0; m == 0 || m < axis4; m++) {
Ti maxValue;
To maxIndex;
for (size_t j = 0; j == 0 || j < axis1; j++) {
if (j == 0) {
maxValue = input(i, j, k, l, m);
maxIndex = 0;
} else {
Ti value =
input(i, j, k, l,
m); // TODO: use input.operator[] for performance
updateMax(j, value, maxIndex, maxValue);
}
}
if (_keepdims) {
for (size_t j = 0; j < axis1; j++)
result.load(maxIndex, i, j, k, l, m);
} else {
result.load(maxIndex, i, k, l, m);
}
}
}
}
}
return result;
} else if (axis == 2) {
for (size_t i = 0; i == 0 || i < axis0; i++) {
for (size_t j = 0; j == 0 || j < axis1; j++) {
for (size_t l = 0; l == 0 || l < axis3; l++) {
for (size_t m = 0; m == 0 || m < axis4; m++) {
Ti maxValue;
To maxIndex;
for (size_t k = 0; k == 0 || k < axis2; k++) {
if (k == 0) {
maxValue = input(i, j, k, l, m);
maxIndex = 0;
} else {
Ti value =
input(i, j, k, l,
m); // TODO: use input.operator[] for performance
updateMax(k, value, maxIndex, maxValue);
}
}
if (_keepdims) {
for (size_t k = 0; k < axis2; k++)
result.load(maxIndex, i, j, k, l, m);
} else {
result.load(maxIndex, i, j, l, m);
}
}
}
}
}
return result;
} else if (axis == 3) {
for (size_t i = 0; i == 0 || i < axis0; i++) {
for (size_t j = 0; j == 0 || j < axis1; j++) {
for (size_t k = 0; k == 0 || k < axis2; k++) {
for (size_t m = 0; m == 0 || m < axis4; m++) {
Ti maxValue;
To maxIndex;
for (size_t l = 0; l == 0 || l < axis3; l++) {
if (l == 0) {
maxValue = input(i, j, k, l, m);
maxIndex = 0;
} else {
Ti value =
input(i, j, k, l,
m); // TODO: use input.operator[] for performance
updateMax(l, value, maxIndex, maxValue);
}
}
if (_keepdims) {
for (size_t l = 0; l < axis3; l++)
result.load(maxIndex, i, j, k, l, m);
} else {
result.load(maxIndex, i, j, k, m);
}
}
}
}
}
return result;
} else if (axis == 4) {
for (size_t i = 0; i == 0 || i < axis0; i++) {
for (size_t j = 0; j == 0 || j < axis1; j++) {
for (size_t k = 0; k == 0 || k < axis2; k++) {
for (size_t l = 0; l == 0 || l < axis3; l++) {
Ti maxValue;
To maxIndex;
for (size_t m = 0; m == 0 || m < axis4; m++) {
if (m == 0) {
maxValue = input(i, j, k, l, m);
maxIndex = 0;
} else {
Ti value =
input(i, j, k, l,
m); // TODO: use input.operator[] for performance
updateMax(m, value, maxIndex, maxValue);
}
}
if (_keepdims) {
for (size_t m = 0; m < axis4; m++)
result.load(maxIndex, i, j, k, l, m);
} else {
result.load(maxIndex, i, j, k, l);
}
}
}
}
}
return result;
} else {
SPDLOG_ERROR("axis " + std::to_string(_axis) +
" more than 5 for ArgMax is not supported.");
}
return NULL_TENSOR<To>;
}
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import numpy as np
import tensorflow as tf
import deepC.dnnc as dc
import time
class NPMatMul():
def __init__(self, n):
self.N = n
self.np_a = np.random.randn(n*n).astype(np.float32)
self.np_b = np.random.randn(n*n).astype(np.float32)
def MatMul3D (self):
np_a = np.reshape(self.np_a, (self.N, self.N))
np_b = np.reshape(self.np_b, (self.N, self.N))
npr = np.matmul(np_a, np_b);
class DCMatMul():
def __init__(self, n):
self.N = n
self.dc_a = dc.random(n,n);
self.dc_b = dc.random(n,n);
def MatMul3D (self):
dc_a = dc.reshape(self.dc_a, (self.N, self.N))
dc_b = dc.reshape(self.dc_b, (self.N, self.N))
dcr = dc.matmul(dc_a, dc_b);
class TFMatMul():
def __init__(self, n):
tf.enable_eager_execution()
self.N = n
self.tf_a = tf.get_variable("tf_a", initializer=tf.random_uniform([n, n], dtype=tf.float32))
self.tf_b = tf.get_variable("tf_b", initializer=tf.random_uniform([n, n], dtype=tf.float32))
def MatMul3D (self):
tfr = tf.matmul(self.tf_a, self.tf_b)
def fmt(n):
return "{0:.2g}".format(n)
if __name__ == '__main__':
N=200
print ("Matrix(NxN) DC NP TF DC/NP DC/TF")
for N in [20, 50, 200, 500, 2000, 5000]:
nmpy = NPMatMul(N);
start = time.time()
nmpy.MatMul3D();
np_time = time.time()-start
tfmm = TFMatMul(N);
start = time.time()
tfmm.MatMul3D();
tf_time = time.time()-start;
dcmm = DCMatMul(N);
start = time.time()
dcmm.MatMul3D();
dc_time = time.time()-start;
print (N, fmt(dc_time), fmt(np_time), fmt(tf_time), fmt(dc_time/np_time), fmt(dc_time/tf_time))
<file_sep>import onnx
import caffe2.python.onnx.backend as backend
import numpy as np
def infer_gesture(input_file):
model_file = "asl_model.onnx"
gesture = None;
with open(input_file, "r") as fp:
lineList = fp.read().replace('\n', ',')[:-1]
ary = [float(num) for num in lineList.split(',')]
# normalize
for i in range(0, 714, 6):
ary[i+0] = (ary[i+0] + 4.)/8.
ary[i+1] = (ary[i+1] + 4.)/8.
ary[i+2] = (ary[i+2] + 4.)/8.
ary[i+3] = (ary[i+3] + 2000.)/4000.
ary[i+4] = (ary[i+4] + 2000.)/4000.
ary[i+5] = (ary[i+5] + 2000.)/4000.
gesture = np.array(ary, dtype=np.float32)
# Load the ONNX model
model = onnx.load(model_file)
# Check that the IR is well formed
onnx.checker.check_model(model)
rep = backend.prepare(model, device="CPU")
outputs = rep.run(gesture)
print(input_file + ": " + ('hi', 'sup')[np.argmax(outputs[0])], end='')
print("\t", outputs[0], end='')
print("")
#"./gesture.data",
input_files = [
"gestures/hi00.csv",
"gestures/hi01.csv",
"gestures/hi02.csv",
"gestures/hi03.csv",
"gestures/hi04.csv",
"gestures/hi05.csv",
"gestures/sup00.csv",
"gestures/sup01.csv",
"gestures/sup02.csv",
"gestures/sup03.csv",
"gestures/sup04.csv",
"gestures/sup05.csv"
]
for input_file in input_files:
infer_gesture(input_file)
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "operators/opTypes.h"
namespace dnnc {
OPATTR getAttrName(std::string attrStr) {
if (attrStr == "activation_alpha")
return attr_activation_alpha;
if (attrStr == "activation_beta")
return attr_activation_beta;
if (attrStr == "activations")
return attr_activations;
if (attrStr == "alpha")
return attr_alpha;
if (attrStr == "auto_pad")
return attr_auto_pad;
if (attrStr == "axes")
return attr_axes;
if (attrStr == "axis")
return attr_axis;
if (attrStr == "batch_axis")
return attr_batch_axis;
if (attrStr == "beta")
return attr_beta;
if (attrStr == "bias")
return attr_bias;
if (attrStr == "blocksize")
return attr_blocksize;
if (attrStr == "body")
return attr_body;
if (attrStr == "case_change_action")
return attr_case_change_action;
if (attrStr == "ceil_mode")
return attr_ceil_mode;
if (attrStr == "center_point_box")
return attr_center_point_box;
if (attrStr == "clip")
return attr_clip;
if (attrStr == "count_include_pad")
return attr_count_include_pad;
if (attrStr == "detect_negative")
return attr_detect_negative;
if (attrStr == "detect_positive")
return attr_detect_positive;
if (attrStr == "dilations")
return attr_dilations;
if (attrStr == "direction")
return attr_direction;
if (attrStr == "dtype")
return attr_dtype;
if (attrStr == "else_branch")
return attr_else_branch;
if (attrStr == "epsilon")
return attr_epsilon;
if (attrStr == "exclusive")
return attr_exclusive;
if (attrStr == "fmod")
return attr_fmod;
if (attrStr == "gamma")
return attr_gamma;
if (attrStr == "group")
return attr_group;
if (attrStr == "hidden_size")
return attr_hidden_size;
if (attrStr == "high")
return attr_high;
if (attrStr == "input_forget")
return attr_input_forget;
if (attrStr == "is_case_sensitive")
return attr_is_case_sensitive;
if (attrStr == "k")
return attr_k;
if (attrStr == "keepdims")
return attr_keepdims;
if (attrStr == "kernel_shape")
return attr_kernel_shape;
if (attrStr == "lambd")
return attr_lambd;
if (attrStr == "larges")
return attr_larges;
if (attrStr == "linear_before_reset")
return attr_linear_before_reset;
if (attrStr == "locale")
return attr_locale;
if (attrStr == "low")
return attr_low;
if (attrStr == "max_gram_length")
return attr_max_gram_length;
if (attrStr == "max_skip_count")
return attr_max_skip_count;
if (attrStr == "mean")
return attr_mean;
if (attrStr == "min_gram_length")
return attr_min_gram_length;
if (attrStr == "mode")
return attr_mode;
if (attrStr == "momentum")
return attr_momentum;
if (attrStr == "ngram_counts")
return attr_ngram_counts;
if (attrStr == "ngram_indexes")
return attr_ngram_indexes;
if (attrStr == "num_scan_inputs")
return attr_num_scan_inputs;
if (attrStr == "output_height")
return attr_output_height;
if (attrStr == "output_padding")
return attr_output_padding;
if (attrStr == "output_shape")
return attr_output_shape;
if (attrStr == "output_width")
return attr_output_width;
if (attrStr == "p")
return attr_p;
if (attrStr == "pads")
return attr_pads;
if (attrStr == "perm")
return attr_perm;
if (attrStr == "pool_int64s")
return attr_pool_int64s;
if (attrStr == "pool_strings")
return attr_pool_strings;
if (attrStr == "pooled_shape")
return attr_pooled_shape;
if (attrStr == "ratio")
return attr_ratio;
if (attrStr == "reverse")
return attr_reverse;
if (attrStr == "sample_size")
return attr_sample_size;
if (attrStr == "sampling_ratio")
return attr_sampling_ratio;
if (attrStr == "scale")
return attr_scale;
if (attrStr == "scan_input_axes")
return attr_scan_input_axes;
if (attrStr == "scan_input_directions")
return attr_scan_input_directions;
if (attrStr == "scan_output_axes")
return attr_scan_output_axes;
if (attrStr == "scan_output_directions")
return attr_scan_output_directions;
if (attrStr == "seed")
return attr_seed;
if (attrStr == "shape")
return attr_shape;
if (attrStr == "size")
return attr_size;
if (attrStr == "sorted")
return attr_sorted;
if (attrStr == "spatial_scale")
return attr_spatial_scale;
if (attrStr == "split")
return attr_split;
if (attrStr == "stopwords")
return attr_stopwords;
if (attrStr == "storage_order")
return attr_storage_order;
if (attrStr == "strides")
return attr_strides;
if (attrStr == "then_branch")
return attr_then_branch;
if (attrStr == "time_axis")
return attr_time_axis;
if (attrStr == "to")
return attr_to;
if (attrStr == "transA")
return attr_transA;
if (attrStr == "transB")
return attr_transB;
if (attrStr == "value")
return attr_value;
if (attrStr == "weights")
return attr_weights;
if (attrStr == "invalid")
return attr_invalid;
return attr_invalid;
}
std::string getAttrNameStr(OPATTR attr) {
switch (attr) {
case (attr_activation_alpha):
return "activation_alpha";
break;
case (attr_activation_beta):
return "activation_beta";
break;
case (attr_activations):
return "activations";
break;
case (attr_alpha):
return "alpha";
break;
case (attr_auto_pad):
return "auto_pad";
break;
case (attr_axes):
return "axes";
break;
case (attr_axis):
return "axis";
break;
case (attr_batch_axis):
return "batch_axis";
break;
case (attr_beta):
return "beta";
break;
case (attr_bias):
return "bias";
break;
case (attr_blocksize):
return "blocksize";
break;
case (attr_body):
return "body";
break;
case (attr_case_change_action):
return "case_change_action";
break;
case (attr_ceil_mode):
return "ceil_mode";
break;
case (attr_center_point_box):
return "center_point_box";
break;
case (attr_clip):
return "clip";
break;
case (attr_count_include_pad):
return "count_include_pad";
break;
case (attr_detect_negative):
return "detect_negative";
break;
case (attr_detect_positive):
return "detect_positive";
break;
case (attr_dilations):
return "dilations";
break;
case (attr_direction):
return "direction";
break;
case (attr_dtype):
return "dtype";
break;
case (attr_else_branch):
return "else_branch";
break;
case (attr_epsilon):
return "epsilon";
break;
case (attr_exclusive):
return "exclusive";
break;
case (attr_fmod):
return "fmod";
break;
case (attr_gamma):
return "gamma";
break;
case (attr_group):
return "group";
break;
case (attr_hidden_size):
return "hidden_size";
break;
case (attr_high):
return "high";
break;
case (attr_input_forget):
return "input_forget";
break;
case (attr_is_case_sensitive):
return "is_case_sensitive";
break;
case (attr_k):
return "k";
break;
case (attr_keepdims):
return "keepdims";
break;
case (attr_kernel_shape):
return "kernel_shape";
break;
case (attr_lambd):
return "lambd";
break;
case (attr_larges):
return "larges";
break;
case (attr_linear_before_reset):
return "linear_before_reset";
break;
case (attr_locale):
return "locale";
break;
case (attr_low):
return "low";
break;
case (attr_max_gram_length):
return "max_gram_length";
break;
case (attr_max_skip_count):
return "max_skip_count";
break;
case (attr_mean):
return "mean";
break;
case (attr_min_gram_length):
return "min_gram_length";
break;
case (attr_mode):
return "mode";
break;
case (attr_momentum):
return "momentum";
break;
case (attr_ngram_counts):
return "ngram_counts";
break;
case (attr_ngram_indexes):
return "ngram_indexes";
break;
case (attr_num_scan_inputs):
return "num_scan_inputs";
break;
case (attr_output_height):
return "output_height";
break;
case (attr_output_padding):
return "output_padding";
break;
case (attr_output_shape):
return "output_shape";
break;
case (attr_output_width):
return "output_width";
break;
case (attr_p):
return "p";
break;
case (attr_pads):
return "pads";
break;
case (attr_perm):
return "perm";
break;
case (attr_pool_int64s):
return "pool_int64s";
break;
case (attr_pool_strings):
return "pool_strings";
break;
case (attr_pooled_shape):
return "pooled_shape";
break;
case (attr_ratio):
return "ratio";
break;
case (attr_reverse):
return "reverse";
break;
case (attr_sample_size):
return "sample_size";
break;
case (attr_sampling_ratio):
return "sampling_ratio";
break;
case (attr_scale):
return "scale";
break;
case (attr_scan_input_axes):
return "scan_input_axes";
break;
case (attr_scan_input_directions):
return "scan_input_directions";
break;
case (attr_scan_output_axes):
return "scan_output_axes";
break;
case (attr_scan_output_directions):
return "scan_output_directions";
break;
case (attr_seed):
return "seed";
break;
case (attr_shape):
return "shape";
break;
case (attr_size):
return "size";
break;
case (attr_sorted):
return "sorted";
break;
case (attr_spatial_scale):
return "spatial_scale";
break;
case (attr_split):
return "split";
break;
case (attr_stopwords):
return "stopwords";
break;
case (attr_storage_order):
return "storage_order";
break;
case (attr_strides):
return "strides";
break;
case (attr_then_branch):
return "then_branch";
break;
case (attr_time_axis):
return "time_axis";
break;
case (attr_to):
return "to";
break;
case (attr_transA):
return "transA";
break;
case (attr_transB):
return "transB";
break;
case (attr_value):
return "value";
break;
case (attr_weights):
return "weights";
break;
case (attr_invalid):
default:
return "invalid";
break;
}
return "invalid";
}
OPCODE getOpCode(std::string opCodeStr) {
if (opCodeStr == "Abs")
return opAbs;
if (opCodeStr == "Acos")
return opAcos;
if (opCodeStr == "Acosh")
return opAcosh;
if (opCodeStr == "Add")
return opAdd;
if (opCodeStr == "And")
return opAnd;
if (opCodeStr == "ArgMax")
return opArgMax;
if (opCodeStr == "ArgMin")
return opArgMin;
if (opCodeStr == "Asin")
return opAsin;
if (opCodeStr == "Asinh")
return opAsinh;
if (opCodeStr == "Atan")
return opAtan;
if (opCodeStr == "Atanh")
return opAtanh;
if (opCodeStr == "AveragePool")
return opAveragePool;
if (opCodeStr == "BatchNormalization")
return opBatchNormalization;
if (opCodeStr == "BitShift")
return opBitShift;
if (opCodeStr == "BitwiseAnd")
return opBitwiseAnd;
if (opCodeStr == "BitwiseOr")
return opBitwiseOr;
if (opCodeStr == "BitwiseXor")
return opBitwiseXor;
if (opCodeStr == "Cast")
return opCast;
if (opCodeStr == "Ceil")
return opCeil;
if (opCodeStr == "Clip")
return opClip;
if (opCodeStr == "Compress")
return opCompress;
if (opCodeStr == "Concat")
return opConcat;
if (opCodeStr == "Constant")
return opConstant;
if (opCodeStr == "ConstantOfShape")
return opConstantOfShape;
if (opCodeStr == "Conv")
return opConv;
if (opCodeStr == "ConvInteger")
return opConvInteger;
if (opCodeStr == "ConvTranspose")
return opConvTranspose;
if (opCodeStr == "Cos")
return opCos;
if (opCodeStr == "Cosh")
return opCosh;
if (opCodeStr == "CumSum")
return opCumSum;
if (opCodeStr == "DepthToSpace")
return opDepthToSpace;
if (opCodeStr == "DequantizeLinear")
return opDequantizeLinear;
if (opCodeStr == "Div")
return opDiv;
if (opCodeStr == "Dropout")
return opDropout;
if (opCodeStr == "Elu")
return opElu;
if (opCodeStr == "Equal")
return opEqual;
if (opCodeStr == "Erf")
return opErf;
if (opCodeStr == "Exp")
return opExp;
if (opCodeStr == "Expand")
return opExpand;
if (opCodeStr == "EyeLike")
return opEyeLike;
if (opCodeStr == "Flatten")
return opFlatten;
if (opCodeStr == "Floor")
return opFloor;
if (opCodeStr == "FloorDiv")
return opFloorDiv;
if (opCodeStr == "GRU")
return opGRU;
if (opCodeStr == "Gather")
return opGather;
if (opCodeStr == "Gemm")
return opGemm;
if (opCodeStr == "GlobalAveragePool")
return opGlobalAveragePool;
if (opCodeStr == "GlobalLpPool")
return opGlobalLpPool;
if (opCodeStr == "GlobalMaxPool")
return opGlobalMaxPool;
if (opCodeStr == "Greater")
return opGreater;
if (opCodeStr == "GreaterEqual")
return opGreaterEqual;
if (opCodeStr == "HardSigmoid")
return opHardSigmoid;
if (opCodeStr == "Hardmax")
return opHardmax;
if (opCodeStr == "Identity")
return opIdentity;
if (opCodeStr == "If")
return opIf;
if (opCodeStr == "InstanceNormalization")
return opInstanceNormalization;
if (opCodeStr == "IsInf")
return opIsInf;
if (opCodeStr == "IsNaN")
return opIsNaN;
if (opCodeStr == "LRN")
return opLRN;
if (opCodeStr == "LSTM")
return opLSTM;
if (opCodeStr == "LeakyRelu")
return opLeakyRelu;
if (opCodeStr == "Less")
return opLess;
if (opCodeStr == "LessEqual")
return opLessEqual;
if (opCodeStr == "Log")
return opLog;
if (opCodeStr == "LogSoftmax")
return opLogSoftmax;
if (opCodeStr == "Loop")
return opLoop;
if (opCodeStr == "LpNormalization")
return opLpNormalization;
if (opCodeStr == "LpPool")
return opLpPool;
if (opCodeStr == "MatMul")
return opMatMul;
if (opCodeStr == "MatMulInteger")
return opMatMulInteger;
if (opCodeStr == "Max")
return opMax;
if (opCodeStr == "MaxPool")
return opMaxPool;
if (opCodeStr == "MaxRoiPool")
return opMaxRoiPool;
if (opCodeStr == "MaxUnpool")
return opMaxUnpool;
if (opCodeStr == "Mean")
return opMean;
if (opCodeStr == "Min")
return opMin;
if (opCodeStr == "Mod")
return opMod;
if (opCodeStr == "Mul")
return opMul;
if (opCodeStr == "Multinomial")
return opMultinomial;
if (opCodeStr == "Neg")
return opNeg;
if (opCodeStr == "NonMaxSuppression")
return opNonMaxSuppression;
if (opCodeStr == "NonZero")
return opNonZero;
if (opCodeStr == "Not")
return opNot;
if (opCodeStr == "NotEqual")
return opNotEqual;
if (opCodeStr == "OneHot")
return opOneHot;
if (opCodeStr == "Or")
return opOr;
if (opCodeStr == "PRelu")
return opPRelu;
if (opCodeStr == "Pad")
return opPad;
if (opCodeStr == "Pow")
return opPow;
if (opCodeStr == "QLinearConv")
return opQLinearConv;
if (opCodeStr == "QLinearMatMul")
return opQLinearMatMul;
if (opCodeStr == "QuantizeLinear")
return opQuantizeLinear;
if (opCodeStr == "RNN")
return opRNN;
if (opCodeStr == "RandomNormal")
return opRandomNormal;
if (opCodeStr == "RandomNormalLike")
return opRandomNormalLike;
if (opCodeStr == "RandomUniform")
return opRandomUniform;
if (opCodeStr == "RandomUniformLike")
return opRandomUniformLike;
if (opCodeStr == "Reciprocal")
return opReciprocal;
if (opCodeStr == "ReduceL1")
return opReduceL1;
if (opCodeStr == "ReduceL2")
return opReduceL2;
if (opCodeStr == "ReduceLogSum")
return opReduceLogSum;
if (opCodeStr == "ReduceLogSumExp")
return opReduceLogSumExp;
if (opCodeStr == "ReduceMax")
return opReduceMax;
if (opCodeStr == "ReduceMean")
return opReduceMean;
if (opCodeStr == "ReduceMin")
return opReduceMin;
if (opCodeStr == "ReduceProd")
return opReduceProd;
if (opCodeStr == "ReduceSum")
return opReduceSum;
if (opCodeStr == "ReduceSumSquare")
return opReduceSumSquare;
if (opCodeStr == "Relu")
return opRelu;
if (opCodeStr == "Remainder")
return opRemainder;
if (opCodeStr == "Reshape")
return opReshape;
if (opCodeStr == "Resize")
return opResize;
if (opCodeStr == "ReverseSequence")
return opReverseSequence;
if (opCodeStr == "RoiAlign")
return opRoiAlign;
if (opCodeStr == "Round")
return opRound;
if (opCodeStr == "Scan")
return opScan;
if (opCodeStr == "Scatter")
return opScatter;
if (opCodeStr == "Selu")
return opSelu;
if (opCodeStr == "Shape")
return opShape;
if (opCodeStr == "Shrink")
return opShrink;
if (opCodeStr == "Sigmoid")
return opSigmoid;
if (opCodeStr == "Sign")
return opSign;
if (opCodeStr == "Sin")
return opSin;
if (opCodeStr == "Sinh")
return opSinh;
if (opCodeStr == "Size")
return opSize;
if (opCodeStr == "Slice")
return opSlice;
if (opCodeStr == "SetSlice")
return opSetSlice;
if (opCodeStr == "Softmax")
return opSoftmax;
if (opCodeStr == "Softplus")
return opSoftplus;
if (opCodeStr == "Softsign")
return opSoftsign;
if (opCodeStr == "SpaceToDepth")
return opSpaceToDepth;
if (opCodeStr == "Split")
return opSplit;
if (opCodeStr == "Sqrt")
return opSqrt;
if (opCodeStr == "Squeeze")
return opSqueeze;
if (opCodeStr == "StringNormalizer")
return opStringNormalizer;
if (opCodeStr == "Sub")
return opSub;
if (opCodeStr == "Sum")
return opSum;
if (opCodeStr == "Tan")
return opTan;
if (opCodeStr == "Tanh")
return opTanh;
if (opCodeStr == "TfIdfVectorizer")
return opTfIdfVectorizer;
if (opCodeStr == "ThresholdedRelu")
return opThresholdedRelu;
if (opCodeStr == "Tile")
return opTile;
if (opCodeStr == "TopK")
return opTopK;
if (opCodeStr == "Transpose")
return opTranspose;
if (opCodeStr == "TrueDiv")
return opTrueDiv;
if (opCodeStr == "Unsqueeze")
return opUnsqueeze;
if (opCodeStr == "Upsample")
return opUpsample;
if (opCodeStr == "Where")
return opWhere;
if (opCodeStr == "Xor")
return opXor;
if (opCodeStr == "Invalid")
return opInvalid;
return opInvalid;
}
std::string getOpCodeStr(OPCODE opCode) {
switch (opCode) {
case (opAbs):
return "Abs";
break;
case (opAcos):
return "Acos";
break;
case (opAcosh):
return "Acosh";
break;
case (opAdd):
return "Add";
break;
case (opAnd):
return "And";
break;
case (opArgMax):
return "ArgMax";
break;
case (opArgMin):
return "ArgMin";
break;
case (opAsin):
return "Asin";
break;
case (opAsinh):
return "Asinh";
break;
case (opAtan):
return "Atan";
break;
case (opAtanh):
return "Atanh";
break;
case (opAveragePool):
return "AveragePool";
break;
case (opBatchNormalization):
return "BatchNormalization";
break;
case (opBitShift):
return "BitShift";
break;
case (opBitwiseXor):
return "BitwiseXor";
break;
case (opBitwiseOr):
return "BitwiseOr";
break;
case (opBitwiseAnd):
return "BitwiseAnd";
break;
case (opCast):
return "Cast";
break;
case (opCeil):
return "Ceil";
break;
case (opClip):
return "Clip";
break;
case (opCompress):
return "Compress";
break;
case (opConcat):
return "Concat";
break;
case (opConstant):
return "Constant";
break;
case (opConstantOfShape):
return "ConstantOfShape";
break;
case (opConv):
return "Conv";
break;
case (opConvInteger):
return "ConvInteger";
break;
case (opConvTranspose):
return "ConvTranspose";
break;
case (opCos):
return "Cos";
break;
case (opCosh):
return "Cosh";
break;
case (opCumSum):
return "CumSum";
break;
case (opDepthToSpace):
return "DepthToSpace";
break;
case (opDequantizeLinear):
return "DequantizeLinear";
break;
case (opDiv):
return "Div";
break;
case (opDropout):
return "Dropout";
break;
case (opElu):
return "Elu";
break;
case (opEqual):
return "Equal";
break;
case (opErf):
return "Erf";
break;
case (opExp):
return "Exp";
break;
case (opExpand):
return "Expand";
break;
case (opEyeLike):
return "EyeLike";
break;
case (opFlatten):
return "Flatten";
break;
case (opFloor):
return "Floor";
break;
case (opFloorDiv):
return "FloorDiv";
break;
case (opGRU):
return "GRU";
break;
case (opGather):
return "Gather";
break;
case (opGemm):
return "Gemm";
break;
case (opGlobalAveragePool):
return "GlobalAveragePool";
break;
case (opGlobalLpPool):
return "GlobalLpPool";
break;
case (opGlobalMaxPool):
return "GlobalMaxPool";
break;
case (opGreater):
return "Greater";
break;
case (opGreaterEqual):
return "GreaterEqual";
break;
case (opHardSigmoid):
return "HardSigmoid";
break;
case (opHardmax):
return "Hardmax";
break;
case (opIdentity):
return "Identity";
break;
case (opIf):
return "If";
break;
case (opInstanceNormalization):
return "InstanceNormalization";
break;
case (opIsInf):
return "IsInf";
break;
case (opIsNaN):
return "IsNaN";
break;
case (opLRN):
return "LRN";
break;
case (opLSTM):
return "LSTM";
break;
case (opLeakyRelu):
return "LeakyRelu";
break;
case (opLess):
return "Less";
break;
case (opLessEqual):
return "LessEqual";
break;
case (opLog):
return "Log";
break;
case (opLogSoftmax):
return "LogSoftmax";
break;
case (opLoop):
return "Loop";
break;
case (opLpNormalization):
return "LpNormalization";
break;
case (opLpPool):
return "LpPool";
break;
case (opMatMul):
return "MatMul";
break;
case (opMatMulInteger):
return "MatMulInteger";
break;
case (opMax):
return "Max";
break;
case (opMaxPool):
return "MaxPool";
break;
case (opMaxRoiPool):
return "MaxRoiPool";
break;
case (opMaxUnpool):
return "MaxUnpool";
break;
case (opMean):
return "Mean";
break;
case (opMin):
return "Min";
break;
case (opMod):
return "Mod";
break;
case (opMul):
return "Mul";
break;
case (opMultinomial):
return "Multinomial";
break;
case (opNeg):
return "Neg";
break;
case (opNonMaxSuppression):
return "NonMaxSuppression";
break;
case (opNonZero):
return "NonZero";
break;
case (opNot):
return "Not";
break;
case (opNotEqual):
return "NotEqual";
break;
case (opOneHot):
return "OneHot";
break;
case (opOr):
return "Or";
break;
case (opPRelu):
return "PRelu";
break;
case (opPad):
return "Pad";
break;
case (opPow):
return "Pow";
break;
case (opQLinearConv):
return "QLinearConv";
break;
case (opQLinearMatMul):
return "QLinearMatMul";
break;
case (opQuantizeLinear):
return "QuantizeLinear";
break;
case (opRNN):
return "RNN";
break;
case (opRandomNormal):
return "RandomNormal";
break;
case (opRandomNormalLike):
return "RandomNormalLike";
break;
case (opRandomUniform):
return "RandomUniform";
break;
case (opRandomUniformLike):
return "RandomUniformLike";
break;
case (opReciprocal):
return "Reciprocal";
break;
case (opReduceL1):
return "ReduceL1";
break;
case (opReduceL2):
return "ReduceL2";
break;
case (opReduceLogSum):
return "ReduceLogSum";
break;
case (opReduceLogSumExp):
return "ReduceLogSumExp";
break;
case (opReduceMax):
return "ReduceMax";
break;
case (opReduceMean):
return "ReduceMean";
break;
case (opReduceMin):
return "ReduceMin";
break;
case (opReduceProd):
return "ReduceProd";
break;
case (opReduceSum):
return "ReduceSum";
break;
case (opReduceSumSquare):
return "ReduceSumSquare";
break;
case (opRelu):
return "Relu";
break;
case (opRemainder):
return "Remainder";
break;
case (opReshape):
return "Reshape";
break;
case (opResize):
return "Resize";
break;
case (opReverseSequence):
return "ReverseSequence";
break;
case (opRoiAlign):
return "RoiAlign";
break;
case (opRound):
return "Round";
break;
case (opScan):
return "Scan";
break;
case (opScatter):
return "Scatter";
break;
case (opSelu):
return "Selu";
break;
case (opShape):
return "Shape";
break;
case (opShrink):
return "Shrink";
break;
case (opSigmoid):
return "Sigmoid";
break;
case (opSign):
return "Sign";
break;
case (opSin):
return "Sin";
break;
case (opSinh):
return "Sinh";
break;
case (opSize):
return "Size";
break;
case (opSlice):
return "Slice";
break;
case (opSetSlice):
return "SetSlice";
break;
case (opSoftmax):
return "Softmax";
break;
case (opSoftplus):
return "Softplus";
break;
case (opSoftsign):
return "Softsign";
break;
case (opSpaceToDepth):
return "SpaceToDepth";
break;
case (opSplit):
return "Split";
break;
case (opSqrt):
return "Sqrt";
break;
case (opSqueeze):
return "Squeeze";
break;
case (opStringNormalizer):
return "StringNormalizer";
break;
case (opSub):
return "Sub";
break;
case (opSum):
return "Sum";
break;
case (opTan):
return "Tan";
break;
case (opTanh):
return "Tanh";
break;
case (opTfIdfVectorizer):
return "TfIdfVectorizer";
break;
case (opThresholdedRelu):
return "ThresholdedRelu";
break;
case (opTile):
return "Tile";
break;
case (opTopK):
return "TopK";
break;
case (opTranspose):
return "Transpose";
break;
case (opTrueDiv):
return "TrueDiv";
break;
case (opUnsqueeze):
return "Unsqueeze";
break;
case (opUpsample):
return "Upsample";
break;
case (opWhere):
return "Where";
break;
case (opXor):
return "Xor";
break;
case (opInvalid):
return "Invalid";
break;
}
return "Invalid";
}
} // namespace dnnc
#ifdef DNNC_OPTYPES_TEST
#include <iostream>
using namespace dnnc;
int main() {
std::cout << getAttrNameStr(attr_transA);
return 0;
}
#endif
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! \f$ y=scale*\frac{x-mean}{\sqrt{variance+epsilon}} \f$
where mean and variance are computed per instance per channel (C).
The formula for Mean is given by:
\f$ \mu = \frac{1}{n}\sum_{i=1}^{n} x_{i} \f$ */
/*! This can be calculated in a single pass through all the elements.*/
/*! The formula for Variance is given by:
\f$ var(X) = \frac{1}{n}\sum_{i=1}^{n} (x_{i}-\mu)^{2} \f$ */
/*! According to this Mean of the elements in channel is prerequisite for
Variance calculation. A little bit of maths will tell you that mean is not
required for Variance they can be calculated simultaneously.*/
/*! \f$ var(X) = \frac{1}{n}\sum_{i=1}^{n} (x_{i}-\mu)^{2} =
\frac{1}{n}\sum_{i=1}^{n} (x_{i}^{2}-2\mu x_{i}+\mu^{2})=
\frac{1}{n}\sum_{i=1}^{n}x_{i}^{2}-\frac{2\mu}{n}\sum_{i=1}^{n} x_{i}+
\mu\frac{n}{n} = \frac{1}{n}\sum_{i=1}^{n}x_{i}^{2} - \mu^2 \f$ */
/*! And this formulation became part of dnn compiler operator implementation.
* The operator is O(n) where n = Number of elements in the tensor =
* N*C*D1…*Dk.*/
template <typename T>
class InstanceNormalization : public baseOperator<T, T, T> {
protected:
float epsilon = 1e-05; /*!< In case variance goes to zero and to avoid
division by zero. */
public:
InstanceNormalization(std::string name = "opInstanceNormalization",
float epsilon = 1e-05)
: baseOperator<T, T, T>(opInstanceNormalization) {
this->epsilon = epsilon;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_epsilon) {
obj = epsilon;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_epsilon) {
epsilon = obj;
return true;
}
return false;
}
tensor<T>
compute(tensor<T>
input /*!< [float,double]: ND tensor of shape ( NxCxD1xD2…Dk ).*/,
tensor<T> &scale /*!< 1D vector of dimension C.*/,
tensor<T> &B /*!< : 1D vector of dimension C.*/) {
if (!(this->template type_check<T, float, double>())) {
SPDLOG_ERROR("Constrain input and output types to float tensors.");
return NULL_TENSOR<T>;
}
tensor<T> result(input.shape(), input.name());
std::vector<size_t> original_shape = input.shape();
if ((input.shape()[1] != scale.shape()[0]) ||
(input.shape()[1] != B.shape()[0])) {
SPDLOG_ERROR("Inappropriate tensor dimenions");
return NULL_TENSOR<T>;
}
size_t size = 1;
for (size_t i = 2; i < input.rank(); i++) {
size *= input.shape()[i];
}
std::vector<size_t> shape{input.shape()[0], input.shape()[1], size};
int channel_size = size * input.shape()[0];
input.reshape(shape);
result.reshape(shape);
T sum = 0;
T sq_sum = 0;
T mean;
T var;
for (size_t i = 0; i < input.shape()[1]; i++) {
// std::cout << "Current Channel=" << i << "\n";
for (size_t j = 0; j < input.shape()[0]; j++) {
for (size_t k = 0; k < size; k++) {
// std::cout << input(j, i, k) << ',';
sum += input(j, i, k);
sq_sum += input(j, i, k) * input(j, i, k);
}
}
// std::cout << "\n";
mean = sum / channel_size;
var = (sq_sum / channel_size - mean * mean);
// std::cout << "Mean= " << mean << ',' << "Variance=" << var <<
// std::endl;
for (size_t j = 0; j < input.shape()[0]; j++) {
for (size_t k = 0; k < size; k++) {
result(j, i, k) =
scale[i] * (input(j, i, k) - mean) / sqrt(var + epsilon) + B[i];
}
}
sum = 0;
sq_sum = 0;
}
result.reshape(original_shape);
return result;
}
/*!<
\return The output tensor of the same shape as input.
*/
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include <graph/node.h>
#include <set>
namespace dnnc {
/*!< This is a directed graph representing data flow graph
* for deep neural networks. Default graph is singleton by design,
* and light by construction. Default graph lives throughout the
* life of program and dies with it.
*
* One can create subgraphs pointers owned by default graph.
*
* Reference: https://github.com/onnx/onnx/blob/rel-1.5.0/docs/IR.md
*/
class graph {
protected:
std::string _name = "";
size_t _nodeIndex = 0; /*!< index for creating names for nodes without name */
std::vector<node *> _nodes;
std::vector<size_t> _inputs; /*!< indices in _nodes containng input nodes */
std::vector<size_t> _outputs; /*!< indices in _nodes containng output nodes */
std::vector<dnnParameters> _initializers;
/*!< Hierarchical graph mechanism by registry.
* 1. Parent registers every new born in _subgraphs (see subgraph method).
* 2. Before dying, child deregisters itself from parent's _subgraphs (see
* destructor).
* */
graph *_parent = 0x0;
std::vector<graph *> _subgraphs;
graph(graph *parent = 0x0) : _nodeIndex(0), _parent(parent) {}
// prohibited methods for singleton instance
graph(const graph &other) = delete;
graph &operator=(const graph &other) = delete;
size_t nextIndex() { return ++_nodeIndex; }
std::string createName() { return "dnnc___" + std::to_string(nextIndex()); }
ioNode *addIONode(std::string name, DNNC_DataType type,
std::vector<size_t> shape, node::NODE_TYPE ntype) {
assert(ntype == node::INPUT || ntype == node::OUTPUT);
node *newNode = 0x0;
if (findNodeByName(name, newNode)) {
assert((newNode->ntype() == node::INPUT ||
newNode->ntype() == node::OUTPUT) &&
"found operator node with same name as io node");
assert(newNode->symbol() == opInvalid &&
"found operator node with same name as io node.");
return dynamic_cast<ioNode *>(newNode);
}
name = name.empty() ? createName() : name;
ioNode *new_ioNode = new ioNode(name, ntype, type, shape);
_nodes.push_back(new_ioNode);
ntype == node::INPUT ? _inputs.push_back(_nodes.size() - 1)
: _outputs.push_back(_nodes.size() - 1);
return new_ioNode;
}
public:
static graph &singleton() {
static graph
_graph; /*!< only one graph can be created, (singleton by design) */
return _graph;
}
graph &subgraph() {
graph *sg = new graph(this);
// register new born in _subgraphs.
_subgraphs.push_back(sg);
return *sg;
}
void destroy() {
if (_parent) {
// Before dying, deregister itself from parent's _subgraphs.
// Erase-Remove idiom
_parent->_subgraphs.erase(std::remove(_parent->_subgraphs.begin(),
_parent->_subgraphs.end(), this),
_parent->_subgraphs.end());
}
for (auto &sg : _subgraphs)
delete sg;
for (auto &n : _nodes)
delete n;
_name = "";
_nodeIndex = 0;
_nodes.clear();
_inputs.clear();
_outputs.clear();
_initializers.clear();
_subgraphs.clear();
}
~graph() { destroy(); }
void setName(std::string name) { _name = name; }
std::string getName() { return _name; }
size_t nNodes() { return _nodes.size(); }
/*<! reset all existing marks on the graph nodes */
void resetNodeMarks();
/*<! add compute node to the graph */
opNode *addOPNode(std::string name, OPCODE symbol) {
assert(symbol != opInvalid &&
"operator node can not be created with invalid opCode.");
node *newNode = 0x0;
if (false == name.empty() && findNodeByName(name, newNode)) {
assert(newNode->ntype() == node::OPERATOR &&
"found io node with same name as operator node");
assert(newNode->symbol() != symbol &&
"found operator node with same name and difference symbol");
return dynamic_cast<opNode *>(newNode);
}
name = name.empty() ? createName() : name;
opNode *new_opNode = new opNode(symbol, name);
_nodes.push_back(new_opNode);
return new_opNode;
}
/*<! add input term node to the comptue graph */
ioNode *addInput(std::string name, DNNC_DataType type,
std::vector<size_t> shape) {
return addIONode(name, type, shape, node::INPUT);
}
/*<! add output term node to the comptue graph */
ioNode *addOutput(std::string name, DNNC_DataType type,
std::vector<size_t> shape) {
return addIONode(name, type, shape, node::OUTPUT);
}
std::vector<ioNode *> inputs() {
std::vector<ioNode *> vins;
for (size_t &i : _inputs)
vins.push_back(dynamic_cast<ioNode *>(_nodes[i]));
return vins;
}
std::vector<ioNode *> outputs() {
std::vector<ioNode *> vouts;
for (size_t &i : _outputs)
vouts.push_back(dynamic_cast<ioNode *>(_nodes[i]));
return vouts;
}
bool isOutput(std::string name) {
for (size_t &i : _outputs)
if (_nodes[i]->name() == name)
return true;
return false;
}
void addParameters(dnnParameters param) { _initializers.push_back(param); }
std::vector<dnnParameters> parameters() { return _initializers; }
/*<! Search all nodes in the graph. Return a vector of nodes with
* IO (input or output) same as name passed as argument.*/
std::vector<node *> findNodesWithIO(std::string name, bool in = true) {
std::vector<node *> nodes;
for (node *n : _nodes) {
if (n->ntype() == node::OPERATOR) {
if (in) {
std::vector<std::string> n_ins = dynamic_cast<opNode *>(n)->inputs();
if (std::find(n_ins.begin(), n_ins.end(), name) != n_ins.end())
nodes.push_back(n);
} else {
std::vector<std::string> n_outs =
dynamic_cast<opNode *>(n)->outputs();
if (std::find(n_outs.begin(), n_outs.end(), name) != n_outs.end())
nodes.push_back(n);
}
} else if (n->ntype() == node::INPUT && in == false) {
if (n->name() == name)
nodes.push_back(n);
} else if (n->ntype() == node::OUTPUT && in == true) {
if (n->name() == name)
nodes.push_back(n);
}
}
return nodes;
}
bool findNodeByName(std::string name, node *&n) {
for (node *other : _nodes) { // TODO: use std::find
if (other->name() == name) {
n = other;
return true;
}
}
return false;
}
bool sanityCheck();
#ifndef SWIGPYTHON
struct node_iter {
int pos;
inline void next(const graph *ref) { ++pos; }
inline void begin(const graph *ref) { pos = 0; }
inline void end(const graph *ref) { pos = ref->_nodes.size(); }
inline node *&get(graph *ref) { return ref->_nodes[pos]; }
inline const node *get(const graph *ref) { return ref->_nodes[pos]; }
inline bool cmp(const node_iter &s) const { return pos != s.pos; }
};
SETUP_ITERATORS(graph, node *, node_iter)
#endif
};
static graph &Graph() { return graph::singleton(); }
} // namespace dnnc
<file_sep># cmake/modules/language_support.cmake
#
# Temporary additional general language support is contained within this
# file.
# This additional function definition is needed to provide a workaround for
# CMake bug 9220.
# On debian testing (cmake 2.6.2), I get return code zero when calling
# cmake the first time, but cmake crashes when running a second time
# as follows:
#
# -- The Fortran compiler identification is unknown
# CMake Error at /usr/share/cmake-2.6/Modules/CMakeFortranInformation.cmake:7 (GET_FILENAME_COMPONENT):
# get_filename_component called with incorrect number of arguments
# Call Stack (most recent call first):
# CMakeLists.txt:3 (enable_language)
#
# My workaround is to invoke cmake twice. If both return codes are zero,
# it is safe to invoke ENABLE_LANGUAGE(Fortran OPTIONAL)
function(workaround_9220 language language_works)
#message("DEBUG: language = ${language}")
set(text
"project(test NONE)
cmake_minimum_required(VERSION 2.8.0)
set (CMAKE_Fortran_FLAGS \"${CMAKE_Fortran_FLAGS}\")
set (CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS}\")
enable_language(${language})
")
file(REMOVE_RECURSE ${CMAKE_BINARY_DIR}/language_tests/${language})
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language})
file(WRITE ${CMAKE_BINARY_DIR}/language_tests/${language}/CMakeLists.txt
${text})
execute_process(
COMMAND ${CMAKE_COMMAND} . -G "${CMAKE_GENERATOR}"
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language}
RESULT_VARIABLE return_code
OUTPUT_QUIET
ERROR_QUIET
)
if(return_code EQUAL 0)
# Second run
execute_process (
COMMAND ${CMAKE_COMMAND} . -G "${CMAKE_GENERATOR}"
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language}
RESULT_VARIABLE return_code
OUTPUT_QUIET
ERROR_QUIET
)
if(return_code EQUAL 0)
set(${language_works} ON PARENT_SCOPE)
else(return_code EQUAL 0)
set(${language_works} OFF PARENT_SCOPE)
endif(return_code EQUAL 0)
else(return_code EQUAL 0)
set(${language_works} OFF PARENT_SCOPE)
endif(return_code EQUAL 0)
endfunction(workaround_9220)
# Temporary tests of the above function.
#workaround_9220(CXX CXX_language_works)
#message("CXX_language_works = ${CXX_language_works}")
#workaround_9220(CXXp CXXp_language_works)
#message("CXXp_language_works = ${CXXp_language_works}")
<file_sep># generate split test header file only if it does not yet exist
# in order to prevent a rebuild everytime cmake is configured
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
foreach(i RANGE 1 999)
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
"#ifdef EIGEN_TEST_PART_${i}\n"
"#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
"#else\n"
"#define CALL_SUBTEST_${i}(FUNC)\n"
"#endif\n\n"
)
endforeach()
endif()
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
add_custom_target(BuildUnsupported)
include_directories(../../test ../../unsupported ../../Eigen
${CMAKE_CURRENT_BINARY_DIR}/../../test)
find_package (Threads)
find_package(GoogleHash)
if(GOOGLEHASH_FOUND)
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
include_directories(${GOOGLEHASH_INCLUDES})
ei_add_property(EIGEN_TESTED_BACKENDS "GoogleHash, ")
else(GOOGLEHASH_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "GoogleHash, ")
endif(GOOGLEHASH_FOUND)
find_package(Adolc)
if(ADOLC_FOUND)
include_directories(${ADOLC_INCLUDES})
ei_add_property(EIGEN_TESTED_BACKENDS "Adolc, ")
if(EIGEN_TEST_CXX11)
ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES})
else()
message(STATUS "Adolc found, but tests require C++11 mode")
endif()
else(ADOLC_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "Adolc, ")
endif(ADOLC_FOUND)
# this test seems to never have been successful on x87, so is considered to contain a FP-related bug.
# see thread: "non-linear optimization test summary"
ei_add_test(NonLinearOptimization)
ei_add_test(NumericalDiff)
ei_add_test(autodiff_scalar)
ei_add_test(autodiff)
if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$")
ei_add_test(BVH)
endif()
ei_add_test(matrix_exponential)
ei_add_test(matrix_function)
ei_add_test(matrix_power)
ei_add_test(matrix_square_root)
ei_add_test(alignedvector3)
ei_add_test(FFT)
ei_add_test(EulerAngles)
find_package(MPFR 2.3.0)
find_package(GMP)
if(MPFR_FOUND AND EIGEN_COMPILER_SUPPORT_CPP11)
include_directories(${MPFR_INCLUDES} ./mpreal)
ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ")
set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES})
ei_add_test(mpreal_support "-std=c++11" "${EIGEN_MPFR_TEST_LIBRARIES}" )
else()
ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ")
endif()
ei_add_test(sparse_extra "" "")
find_package(FFTW)
if(FFTW_FOUND)
ei_add_property(EIGEN_TESTED_BACKENDS "fftw, ")
include_directories( ${FFTW_INCLUDES} )
if(FFTWL_LIB)
ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "${FFTW_LIBRARIES}" )
else()
ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT" "${FFTW_LIBRARIES}" )
endif()
else()
ei_add_property(EIGEN_MISSING_BACKENDS "fftw, ")
endif()
option(EIGEN_TEST_NO_OPENGL "Disable OpenGL support in unit tests" OFF)
if(NOT EIGEN_TEST_NO_OPENGL)
find_package(OpenGL)
find_package(GLUT)
find_package(GLEW)
if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND)
include_directories(${OPENGL_INCLUDE_DIR} ${GLUT_INCLUDE_DIR} ${GLEW_INCLUDE_DIRS})
ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ")
set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OPENGL_LIBRARIES})
ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" )
else()
ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
endif()
else()
ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
endif()
ei_add_test(polynomialsolver)
ei_add_test(polynomialutils)
ei_add_test(splines)
ei_add_test(gmres)
ei_add_test(minres)
ei_add_test(levenberg_marquardt)
ei_add_test(kronecker_product)
ei_add_test(special_functions)
# TODO: The following test names are prefixed with the cxx11 string, since historically
# the tests depended on c++11. This isn't the case anymore so we ought to rename them.
# FIXME: Old versions of MSVC fail to compile this code, so we just disable these tests
# when using visual studio. We should make the check more strict to enable the tests for
# newer versions of MSVC.
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ei_add_test(cxx11_tensor_dimension)
ei_add_test(cxx11_tensor_map)
ei_add_test(cxx11_tensor_assign)
ei_add_test(cxx11_tensor_comparisons)
ei_add_test(cxx11_tensor_forced_eval)
ei_add_test(cxx11_tensor_math)
ei_add_test(cxx11_tensor_const)
ei_add_test(cxx11_tensor_intdiv)
ei_add_test(cxx11_tensor_casts)
ei_add_test(cxx11_tensor_empty)
ei_add_test(cxx11_tensor_sugar)
ei_add_test(cxx11_tensor_roundings)
ei_add_test(cxx11_tensor_layout_swap)
ei_add_test(cxx11_tensor_io)
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
# This test requires __uint128_t which is only available on 64bit systems
ei_add_test(cxx11_tensor_uint128)
endif()
endif()
if(EIGEN_TEST_CXX11)
if(EIGEN_TEST_SYCL)
ei_add_test_sycl(cxx11_tensor_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_forced_eval_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
endif(EIGEN_TEST_SYCL)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.
# This is already set if EIGEN_TEST_CXX11 is enabled:
# set(CMAKE_CXX_STANDARD 11)
ei_add_test(cxx11_eventcount "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_runqueue "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_non_blocking_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_meta)
ei_add_test(cxx11_tensor_simple)
# ei_add_test(cxx11_tensor_symmetry)
ei_add_test(cxx11_tensor_index_list)
ei_add_test(cxx11_tensor_mixed_indices)
ei_add_test(cxx11_tensor_contraction)
ei_add_test(cxx11_tensor_convolution)
ei_add_test(cxx11_tensor_expr)
ei_add_test(cxx11_tensor_fixed_size)
ei_add_test(cxx11_tensor_of_const_values)
ei_add_test(cxx11_tensor_of_complex)
ei_add_test(cxx11_tensor_of_strings)
ei_add_test(cxx11_tensor_lvalue)
ei_add_test(cxx11_tensor_broadcasting)
ei_add_test(cxx11_tensor_chipping)
ei_add_test(cxx11_tensor_concatenation)
ei_add_test(cxx11_tensor_inflation)
ei_add_test(cxx11_tensor_morphing)
ei_add_test(cxx11_tensor_padding)
ei_add_test(cxx11_tensor_patch)
ei_add_test(cxx11_tensor_image_patch)
ei_add_test(cxx11_tensor_volume_patch)
ei_add_test(cxx11_tensor_reduction)
ei_add_test(cxx11_tensor_argmax)
ei_add_test(cxx11_tensor_shuffling)
ei_add_test(cxx11_tensor_striding)
ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_ref)
ei_add_test(cxx11_tensor_random)
ei_add_test(cxx11_tensor_generator)
ei_add_test(cxx11_tensor_custom_op)
ei_add_test(cxx11_tensor_custom_index)
ei_add_test(cxx11_tensor_fft)
ei_add_test(cxx11_tensor_ifft)
ei_add_test(cxx11_tensor_scan)
endif()
# These tests needs nvcc
find_package(CUDA 7.0)
if(CUDA_FOUND AND EIGEN_TEST_CUDA)
# Make sure to compile without the -pedantic, -Wundef, -Wnon-virtual-dtor
# and -fno-check-new flags since they trigger thousands of compilation warnings
# in the CUDA runtime
# Also remove -ansi that is incompatible with std=c++11.
string(REPLACE "-pedantic" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-Wundef" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-Wnon-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-fno-check-new" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-ansi" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
message(STATUS "Flags used to compile cuda code: " ${CMAKE_CXX_FLAGS})
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE)
endif()
if(EIGEN_TEST_CUDA_CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 --cuda-gpu-arch=sm_${EIGEN_CUDA_COMPUTE_ARCH}")
endif()
set(EIGEN_CUDA_RELAXED_CONSTEXPR "--expt-relaxed-constexpr")
if (${CUDA_VERSION} STREQUAL "7.0")
set(EIGEN_CUDA_RELAXED_CONSTEXPR "--relaxed-constexpr")
endif()
if( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3))
set(EIGEN_CUDA_CXX11_FLAG "-std=c++11")
else()
# otherwise the flag has already been added because of the above set(CMAKE_CXX_STANDARD 11)
set(EIGEN_CUDA_CXX11_FLAG "")
endif()
set(CUDA_NVCC_FLAGS "${EIGEN_CUDA_CXX11_FLAG} ${EIGEN_CUDA_RELAXED_CONSTEXPR} -arch compute_${EIGEN_CUDA_COMPUTE_ARCH} -Xcudafe \"--display_error_number\" ${CUDA_NVCC_FLAGS}")
cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include")
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
ei_add_test(cxx11_tensor_complex_cuda)
ei_add_test(cxx11_tensor_complex_cwise_ops_cuda)
ei_add_test(cxx11_tensor_reduction_cuda)
ei_add_test(cxx11_tensor_argmax_cuda)
ei_add_test(cxx11_tensor_cast_float16_cuda)
ei_add_test(cxx11_tensor_scan_cuda)
# Contractions require arch 3.0 or higher
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 29)
ei_add_test(cxx11_tensor_device)
ei_add_test(cxx11_tensor_cuda)
ei_add_test(cxx11_tensor_contract_cuda)
ei_add_test(cxx11_tensor_of_float16_cuda)
endif()
# The random number generation code requires arch 3.5 or greater.
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 34)
ei_add_test(cxx11_tensor_random_cuda)
endif()
unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
endif()
<file_sep># What is DNNC Tensor
Tensor variable is a fixed-length multi-dimensional array.
It is a mutable object with ability to transform constrained by algebraic rules.
Similar to NumPy ndarray objects, dnnc.tensor objects have a data type and a shape.
Additionally, dnnc.tensors can reside in accelerator memory (like a GPU).
DNNC will offers a rich library of operations (dnnc.add, dnnc.matmul, dnnc.transpose etc.) that consume and produce dnnc.tensors.
Here is a partial list.
# Tensor Operations
## Assignment Operators:
```
Operator Method
+= object.__iadd__(self, other)
-= object.__isub__(self, other)
*= object.__imul__(self, other)
/= object.__itruediv__(self, other)
//= object.__ifloordiv__(self, other)
%= object.__imod__(self, other)
**= object.__ipow__(self, other[, modulo])
<<= object.__ilshift__(self, other)
>>= object.__irshift__(self, other)
&= object.__iand__(self, other)
^= object.__ixor__(self, other)
|= object.__ior__(self, other)
```
## Comparison Operators
```
Operator Method
< object.__lt__(self, other)
<= object.__le__(self, other)
== object.__eq__(self, other)
!= object.__ne__(self, other)
>= object.__ge__(self, other)
> object.__gt__(self, other)
```
## Unary Operators:
```
Operator Method
- object.__neg__(self)
+ object.__pos__(self)
abs() object.__abs__(self)
~ object.__invert__(self)
complex() object.__complex__(self)
int() object.__int__(self)
long() object.__long__(self)
float() object.__float__(self)
oct() object.__oct__(self)
hex() object.__hex__(self)
```
## Binary Operators
```
Operator Method
+ object.__add__(self, other)
- object.__sub__(self, other)
* object.__mul__(self, other)
// object.__floordiv__(self, other)
/ object.__truediv__(self, other)
% object.__mod__(self, other)
** object.__pow__(self, other[, modulo])
<< object.__lshift__(self, other)
>> object.__rshift__(self, other)
& object.__and__(self, other)
^ object.__xor__(self, other)
| object.__or__(self, other)
```
**Reference:**
1. [Python Data model](https://docs.python.org/3/reference/datamodel.html)
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti, typename Tind>
class Slice : public baseOperator<To, Ti, Tind> {
// Slice attributes
public:
Slice(std::string name = "opSlice")
: baseOperator<To, Ti, Tind>(opSlice, name) {}
// bool getAttribute<int>(OPATTR attrName, int& obj) ;
tensor<To>
compute(tensor<Ti> a, // N-D Tensor of data to extract slices from
tensor<Tind> start, // 1-D tensor of starting indices of
// corresponding axis in `axes`
tensor<Tind> end, // 1-D tensor of ending indices (exclusive) of
// corresponding axis in `axes`
tensor<Tind> axes = NULL_TENSOR<Tind>,
// 1-D tensor of axes that `starts` and `ends` apply to.
// Negative value means counting dimensions from the back.
tensor<Tind> steps = NULL_TENSOR<Tind>)
// 1-D tensor of slice step of corresponding
// axis in `axes`. Default to 1.
{
// Process and check the arguments
std::stringstream errMsg;
DIMENSION num_axes = start.shape()[0];
Tind rank = a.rank();
if (start.rank() != 1) {
errMsg << "start tensor is " << start.rank()
<< "dimensional (should be 1 dimensional)" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
if (end.rank() != 1) {
errMsg << "end tensor is " << end.rank()
<< "dimensional (should be 1 dimensional)" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
if (start.shape() != end.shape()) {
errMsg << "start and end tensor sizes don't match (";
errMsg << "start tensor size = " << start.shape()[0] << ", ";
errMsg << "end tensor size = " << end.shape()[0] << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
if (axes == NULL_TENSOR<int>) {
std::vector<DIMENSION> shape{num_axes};
tensor<int> default_axis(shape);
axes = default_axis;
for (size_t i = 0; i < num_axes; i++) {
axes(i) = i;
}
}
if (steps == NULL_TENSOR<DIMENSION>) {
std::vector<DIMENSION> shape{num_axes};
tensor<Tind> default_steps(shape);
steps = default_steps;
for (size_t i = 0; i < num_axes; i++) {
steps(i) = 1;
}
}
if (axes.rank() != 1) {
errMsg << "axes tensor is " << axes.rank()
<< "dimensional (should be 1 dimensional)" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
if (steps.rank() != 1) {
errMsg << "steps tensor is " << steps.rank()
<< "dimensional (should be 1 dimensional)" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
if (start.shape() != axes.shape()) {
errMsg << "start and axes tensor sizes don't match (";
errMsg << "start tensor size = " << start.shape()[0] << ", ";
errMsg << "axes tensor size = " << axes.shape()[0] << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
if (start.shape() != steps.shape()) {
errMsg << "start and axes tensor sizes don't match (";
errMsg << "start tensor size = " << start.shape()[0] << ", ";
errMsg << "steps tensor size = " << steps.shape()[0] << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
for (size_t i = 0; i < num_axes; i++) {
// change values from negative to positive
if (start(i) < 0) {
start(i) += a.shape()[i];
}
if (end(i) < 0) {
// when step is negative and end is -1, store -1
// this is required by python_slice for negative steps
if ((steps(i) < 0) && (end(i) == -1)) {
end(i) = -1;
} else {
end(i) += a.shape()[i];
}
}
// Numpy like checks and counter measures for corner cases
// step cannot be zero
if (steps(i) == 0) {
errMsg << "slice step cannot be zero" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
// if step is positive
else if (steps(i) > 0) {
// return NULL tensor if start is greater equal to
// shape[axis] or start is greater equal to end
if ((Tind)start(i) >= (Tind)a.shape()[i] || (end(i) - 1 < start(i))) {
return NULL_TENSOR<To>;
}
// if end is greater than shape[axis], limit end to shape[axis]
if ((Tind)end(i) > (Tind)(a.shape()[i])) {
end(i) = a.shape()[i];
}
}
// if step is negative
else if (steps(i) < 0) {
// if start is greater equal to shape[axis], limit start to
// shape[axis]-1
if ((Tind)start(i) >= (Tind)a.shape()[i]) {
start(i) = a.shape()[i] - 1;
}
// return NULL tensor if end is greater equal to
// shape[axis] or end is greater equal to start
if ((Tind)end(i) >= (Tind)(a.shape()[i]) || (start(i) - 1 < end(i))) {
return NULL_TENSOR<To>;
}
}
// ** Numpy doen't raise error for the below conditions,
// it smartly avoids them
// start
// if (start(i) > a.shape()[i]) {
// errMsg << "start value (" << start(i) << ") along axis (" << i
// << ") is beyond the size (" << a.shape()[i]
// << ") of input tensor along the axis" << std::endl;
// SPDLOG_ERROR(errMsg.str().c_str());
// }
// end
// if (end(i) > (a.shape()[i])) {
// errMsg << "end value (" << end(i) << ") along axis (" << i
// << ") is beyond the size (" << a.shape()[i]
// << ") of input tensor along the axis" << std::endl;
// SPDLOG_ERROR(errMsg.str().c_str());
// }
// comparing start and end when step is positive
// else if ((steps(i) > 0) && (end(i) - 1 < start(i))) {
// errMsg << "end value (" << end(i) - 1 << ") along axis (" << i
// << ") is smaller than the start value (" << start(i)
// << ") along the axis while step is positive" << std::endl;
// SPDLOG_ERROR(errMsg.str().c_str());
// }
// comparing start and end when step is negative
// else if ((steps(i) < 0) && (start(i) - 1 < end(i))) {
// errMsg << "start value (" << start(i) - 1 << ") along axis (" << i
// << ") is smaller than the end value (" << end(i)
// << ") along the axis while step is negative" << std::endl;
// SPDLOG_ERROR(errMsg.str().c_str());
// }
// axes
if (axes(i) < 0) {
if ((axes(i) + rank) < 0) {
errMsg << "axes value (" << axes(i) << ") along axis (" << i
<< ") is beyond the input tensor dimension" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
axes(i) = rank + axes(i);
}
if (axes(i) > rank - 1) {
errMsg << "axes value (" << axes(i) << ") along axis (" << i
<< ") is large than the number of dimensions of input tensor"
<< std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
for (size_t j = i + 1; j < num_axes; j++) {
if (axes(i) == axes(j)) {
errMsg << "repeated axis value (" << axes(i) << ") at indices " << i
<< " and " << j << " of axes input" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
return NULL_TENSOR<To>;
}
}
// steps
}
// Determine the shape of the result tensor
std::vector<size_t> resultShape(rank);
std::vector<Tind> start_index(rank);
std::vector<Tind> end_index(rank);
std::vector<Tind> step(rank);
for (Tind axis = 0; axis < rank; axis++) {
// determine slicing along the axis-th dimension
for (size_t i = 0; i < num_axes; i++) {
if (axes(i) == axis) {
if (steps[i] > 0) {
start_index[axis] = start(i);
end_index[axis] = end(i) - 1;
step[axis] = steps[i];
} else {
// Changed by Gunjan, marked to find it later if doesn't work
start_index[axis] = start(i);
end_index[axis] = end(i) + 1;
step[axis] = steps[i];
/*
int tmp_start = start(i);
end_index[axis] = start(i);
while (tmp_start > end(i)) {
start_index[axis] = start;
tmp_start = tmp_start + steps[i];
}
step[axis] = -steps[i];
*/
}
break;
} else {
start_index[axis] = 0;
end_index[axis] = a.shape()[axis] - 1;
step[axis] = 1;
}
}
resultShape[axis] =
(end_index[axis] - start_index[axis]) / step[axis] + 1;
}
// Slice now
tensor<To> result(resultShape);
if (rank == 1) {
Tind i0 = 0;
for (Tind _i0 = start_index[0];
(step[0] > 0) ? (_i0 <= end_index[0]) : (_i0 >= end_index[0]);
_i0 += step[0]) {
result(i0++) = a(_i0);
}
} else if (rank == 2) {
Tind i0 = 0;
for (Tind _i0 = start_index[0];
(step[0] > 0) ? (_i0 <= end_index[0]) : (_i0 >= end_index[0]);
_i0 += step[0]) {
Tind i1 = 0;
for (Tind _i1 = start_index[1];
(step[1] > 0) ? (_i1 <= end_index[1]) : (_i1 >= end_index[1]);
_i1 += step[1]) {
// std::cout << _i0 << " , " << _i1 << " : " << a(_i0,_i1) <<
// std::endl; // for testing purposes
result(i0, i1++) = a(_i0, _i1);
}
i0++;
}
} else if (rank == 3) {
Tind i0 = 0;
for (Tind _i0 = start_index[0];
(step[0] > 0) ? (_i0 <= end_index[0]) : (_i0 >= end_index[0]);
_i0 += step[0]) {
Tind i1 = 0;
for (Tind _i1 = start_index[1];
(step[1] > 0) ? (_i1 <= end_index[1]) : (_i1 >= end_index[1]);
_i1 += step[1]) {
Tind i2 = 0;
for (Tind _i2 = start_index[2];
(step[2] > 0) ? (_i2 <= end_index[2]) : (_i2 >= end_index[2]);
_i2 += step[2]) {
result(i0, i1, i2++) = a(_i0, _i1, _i2);
}
i1++;
}
i0++;
}
} else if (rank == 4) {
Tind i0 = 0;
for (Tind _i0 = start_index[0];
(step[0] > 0) ? (_i0 <= end_index[0]) : (_i0 >= end_index[0]);
_i0 += step[0]) {
Tind i1 = 0;
for (Tind _i1 = start_index[1];
(step[1] > 0) ? (_i1 <= end_index[1]) : (_i1 >= end_index[1]);
_i1 += step[1]) {
Tind i2 = 0;
for (Tind _i2 = start_index[2];
(step[2] > 0) ? (_i2 <= end_index[2]) : (_i2 >= end_index[2]);
_i2 += step[2]) {
Tind i3 = 0;
for (Tind _i3 = start_index[3];
(step[3] > 0) ? (_i3 <= end_index[3]) : (_i3 >= end_index[3]);
_i3 += step[3]) {
result(i0, i1, i2, i3++) = a(_i0, _i1, _i2, _i3);
}
i2++;
}
i1++;
}
i0++;
}
} else {
SPDLOG_ERROR("Not supported");
return NULL_TENSOR<To>;
}
return result;
}
};
} // namespace dnnc
<file_sep>import unittest
import os, sys
import common
import importlib
if __name__ == '__main__':
for folder in ['swig', 'parser', 'compiler'] :
# add the test name here
if ( len(sys.argv) > 1 ):
test = sys.argv[1];
else:
test = input("Enter test name (ex. 'MatMul.py'): ")
loader = unittest.TestLoader()
tests = []
pkg = importlib.import_module(folder)
pkg.load_test(loader,test,tests)
if ( len(tests) == 0 ):
continue;
suite = unittest.TestSuite(tests)
runner = unittest.TextTestRunner(verbosity=0)
runner.run(suite)
exit(0)
<file_sep>import mnist
from subprocess import PIPE, run
import random
import numpy as np
# download images and labels.
images = mnist.test_images()
labels = mnist.test_labels()
# display text image
def display(image):
for i in range(image.shape[0]):
for j in range(image.shape[1]):
print(('*' if (image[i,j]) else ' '), end='')
print('');
print('');
# Write image tensor
def write_image(index):
with open("image.data", "w") as fp:
img_str = np.array_str(images[index].flatten()/255.0)
fp.write(img_str.strip("[]"))
def run_model(command):
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
for line in result.stdout.split("\n"):
if ( line.find("writing file ") == 0 ):
resultFile = line[13:line.find('.',-1)].split()[0]
with open(resultFile, 'r') as f:
return f.read()
return ""
# Run model in the loop
import deepC.dnnc as dc
for i in range (5):
index = random.randint(0,len(images)-1)
write_image(index);
model_result = run_model("./mnist.exe ./image.data").strip("[]")
# Convert log softmax output to probability
log_probs = dc.array([float(f) for f in model_result.strip("[]").split()])
probabilities = dc.exp(log_probs)
trueLabel = labels[index]
prediction = dc.argmax(probabilities)[0]
display(images[index])
print("True label = ", labels[index])
print("Model Prediction: ", dc.argmax(probabilities))
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "graph/graph.h"
using namespace dnnc;
void dnnc::graph::resetNodeMarks() {
for (node *n : _nodes)
n->resetMarks();
}
bool dnnc::graph::sanityCheck() {
bool result = true;
for (node *n : _nodes) {
std::vector<node *> next_level_nodes;
if (false == n->inputNodes(*this, next_level_nodes)) {
if (n->ntype() != node::INPUT && n->symbol() != opConstant) {
std::cerr << "ERROR (GRAPH): some of graph " + _name + "'s node " +
n->name() + "'s\n";
std::cerr << " outputs are not connected to other nodes "
"in the graph.\n";
result = false;
}
}
if (false == n->outputNodes(*this, next_level_nodes)) {
if (n->ntype() != node::OUTPUT) {
std::cerr << "ERROR (GRAPH): some of graph " + _name + "'s node " +
n->name() + "'s\n";
std::cerr << " inputs are not connected to other nodes "
"in the graph.\n";
result = false;
}
}
}
return result;
}
#ifdef DNNC_GRAPH_TEST
using namespace dnnc;
int main() {
dnnc::graph &g = dnnc::Graph();
for (node &n : g) {
std::cout << n->name() << "\n";
}
g.setName("CNTK");
return 0;
}
#endif
<file_sep>import os
def generate_onnx_runner(op_name, inputs):
py_file = '''
# Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
\n\n'''
py_file += "import onnx\n"
py_file += "import caffe2.python.onnx.backend\n"
py_file += "from caffe2.python import core, workspace\n"
py_file += "import numpy as np\n\n"
py_file += "onnx_path = '../testcases/" + op_name + "/" + op_name + ".onnx'\n"
py_file += "modelFile = onnx.load(onnx_path)\n"
py_file += "inputs = " + inputs + "\n"
py_file += "output = caffe2.python.onnx.backend.run_model(modelFile, inputs)\n"
py_file += "print(output)\n\n"
path_name = '../testcases/' + op_name
file_name = path_name + "/" + op_name + "_onnx_runner.py"
if not os.path.isdir(path_name):
os.system("mkdir -p " + path_name)
with open(file_name, 'w') as f:
f.write(py_file)
os.system('python ' + file_name)<file_sep># add PYTHONPATH etc here.
import os,sys
DNNC_ROOT=os.path.abspath(os.path.dirname(__file__) + os.path.sep + '..')
# 1. os.environ is needed to launch compiler commands.
if ( "PYTHONPATH" in os.environ ) :
os.environ["PYTHONPATH"] += os.pathsep + DNNC_ROOT
else:
os.environ["PYTHONPATH"] = DNNC_ROOT
# 2. sys.path is needed to import deepC
sys.path.append(DNNC_ROOT);
<file_sep># deepC
[](https://travis-ci.org/github/ai-techsystems/deepC)
[](https://badge.fury.io/py/deepC)
[](https://pepy.tech/project/deepc)
[](LICENSE)
[](https://opencollective.com/dnnc)
[](https://gitter.im/ai-techsystems/community)
The deepC is a **vendor independent deep learning library, compiler and inference framework** designed for small form-factor devices including **μControllers, IoT and Edge devices**
## 🏃♂️ Using deepC
Here are few of many ways.
1. Try deepC with [Colab Noteboook](https://colab.research.google.com/drive/1EKgQcMCHr-0OsG9qJ4wXv7J4JFlPY7CK)
1. Install it on Ubuntu, raspbian (or any other debian derivatives) using ```pip install deepC```
1. Compile onnx model- [read this article](test/compiler/mnist/README.md) or [watch this video](https://youtu.be/BpFs83MU3HM)
1. Use deepC with a [Docker File](Dockerfile)
See more examples in [tutorial](tutorials/README.md) dir.
## 📛 what is deepC?
deepC library, compiler and inference framework is designed to **enable and perform** deep learning neural networks by focussing on features of small form-factor devices like micro-controllers, eFPGAs, cpus and other embedded devices like [raspberry-pi](https://www.raspberrypi.org/), [odroid](https://www.hardkernel.com/), [arduino](https://www.arduino.cc/), [SparkFun Edge](https://www.sparkfun.com/products/15170), [risc-V](https://www.amazon.com/Seeed-Studio-Sipeed-Maixduino-RISC-V/dp/B07SW9ZWQQ), mobile phones, x86 and arm laptops among others.

deepC also offers ahead of time compiler producing optimized executable based on [LLVM compiler tool chain](https://llvm.org/) specialized for deep neural networks with [ONNX](https://onnx.ai/) as front end.
## 📝 Design
Main components of **deepC** have been designed to represent and optimize the common deep learning networks in high level graph IR and to transform the computation graph to minimize memory utilization, optimize data layout and fuse computation patterns for different hardware backends.
<img width="600" alt="Architecture" src="https://github.com/ai-techsystems/deepC/blob/master/misc/dnnCompilerArch.jpg">
Read more at [high level design document](docs/highLevelDesign.md)
## 💧 PreRequisites
* [ONNX 1.5](https://github.com/onnx/onnx/tree/rel-1.5.0#installation)
* [LLVM 8.0](http://releases.llvm.org/8.0.0/docs/GettingStarted.html#getting-started-quickly-a-summary)
* [Python 3.6](https://www.python.org/downloads/release/python-360/)
* [SWIG 3.0](https://sourceforge.net/projects/swig/files/swig/swig-3.0.12/)
## 💻 Development
Build and start modifying deepC locally from source code with following steps
### ⭕ Ubuntu 18.04
Follow the steps to install pre-requisites
```bash
sudo apt-get update
sudo apt-get install build-essential python3.6-dev python3-pip swig doxygen clang-format clang clang-8 llvm-8 llvm-8-dev protobuf-compiler libprotoc-dev
sudo pip3 install numpy==1.15.0 onnx==1.5.0
```
Once you are done, build deepC
```bash
git clone https://github.com/ai-techsystems/deepC.git
cd deepC
make
```
### ⭕ Mac OS / Windows 10
Make sure you have the below pre-requisites
#### Mac OS:
- [Python for Mac](https://www.python.org/downloads/mac-osx/)
- [Docker for Mac](https://docs.docker.com/v17.09/docker-for-mac/install/#download-docker-for-mac)
#### Windows 10:
- [Python for Windows](https://www.python.org/downloads/windows/)
- [Docker for Windows](https://docs.docker.com/v17.09/docker-for-windows/install/#download-docker-for-windows)
Once you are done, build deepC inside docker container
```bash
git clone https://github.com/ai-techsystems/deepC.git
cd deepC
python buildDocker.py
```
#### 📜 Output
```bash
find include src swig -name \*.h -print0 -o -name \*.cpp -print0 | xargs -0 -P8 -n1 clang-format -i
make -C src
make[1]: Entering directory 'deepC/src'
make -C core
make[2]: Entering directory 'deepC/src/core'
compiling broadcast.cpp
/usr/bin/g++ -O3 -Wall -std=c++14 -fPIC -march=native -msse2 \
-isystem ./packages/eigen-eigen-323c052e1731 -I./include \
-c broadcast.cpp -o obj/broadcast.o
compiling tensor.cpp
...
...
/usr/bin/g++ -shared ./obj/dnnc_swig.o ./obj/dnnc_pyutils.o ./obj/dnnc_api.o -o lib/libdnnc.so
ln -s -f lib/libdnnc.so _dnnc.so
/usr/bin/python3 ../test/swig/basic.py
```
## Current Support
| Supported Architectures | Status |
|------------------------- |----------|
| Arm | ✔️ |
| Armv7 | ✔️ |
| Arm64 | ✔️ |
| AMD64 | ✔️ |
| ppc64le | ✔️ |
| Supported OS | Distributions | Status |
|-------------- |---------------- |-----------|
| Linux | Ubuntu 18.04 | ✔️ |
| Linux | CentOS 6 | ✔️ |
| Linux | Arch Linux | ✔️ |
| Linux | Manjaro | ✔️ |
| Windows | 1803 and above | ✔️ |
| Mac OS | Sierra and above | ✔️ |
## ➕ Contribute
dnn Compiler adopts apache committer model, we aim to create an open source project that is maintained and owned by the community. Checkout the Contributor Guide.
## 🙏 Acknowledgement
We acknowledge the efforts predecessor projects like [LLVM](https://llvm.org/), [ONNX](https://onnx.ai/) etc. to make this project a reality.
---
## 🕵️♂️ Why compiler❔
deepC is targeted towards devices with small formfactor like microcontrollers, which are part of all sorts of household devices: think appliances, cars, and toys. In fact, there are around 30 billion microcontroller-powered devices produced each year. They're cheap, require very little energy, and are very reliable.
By bringing deep learning models to tiny microcontrollers, we can boost the intelligence of billions of devices that we use in our lives, without relying on expensive hardware or reliable internet connections. Imagine smart appliances that can adapt to your daily routine, intelligent industrial sensors that understand the difference between problems and normal operation, and magical toys that can help kids learn in fun and delightful ways.
<a href="https://opencollective.com/dnnc"><img src="https://opencollective.com/dnnc/individuals.svg?width=890"></a>
#### Organizations
Support this project with your organization. Your logo will show up here with a link to your website. [[Contribute](https://opencollective.com/dnnc/contribute)]
---
## Built on/with deepC
### Products
1. **[No code TinyML platform](http://cainvas.ai-tech.systems/)**, built with deepC technology.
2. **[No code TinyML Book](http://thetinymlbook.com/)**, with a chapter on deepC.
### Papers
- Paper: [Deep Neural Network Operators](docs/pubs/DNNC-operators-paper.pdf), appeared in [Proceedings of AITS Summit, 2019](https://www.amazon.com/Proceedings-AITS-Summit-2019-www-ai-techsystems-com-ebook/dp/B083ZJWFGT)
- Letter: [Gesture Recognition with deepC](docs/pubs/IJCRT%20-%20Gesture%20Recognition%20with%20deepC.pdf), appeared in [INTERNATIONAL JOURNAL OF CREATIVE RESEARCH THOUGHTS](https://ijcrt.org/papers/IJCRT2111106.pdf)
- Poster: [Deep Neural Network Compiler and Inference Framework for microcontrollers and microcomputers](docs/pubs/AITS%20poster.pdf), appeared in [IRISS 2020
14th Inter-Research-Institute Student Seminar in Computer Science](https://events.iitgn.ac.in/2020/IRISS/)
### Paper Citations
- Title: [Artificial Intelligence in the IoT Era: A Review of Edge AI Hardware and Software](https://ieeexplore.ieee.org/abstract/document/9770931/)
- [Download pdf](https://fruct.org/publications/fruct31/files/Sip.pdf)
- Title: [Tiny transformers for environmental sound classification at the edge](https://arxiv.org/abs/2103.12157)
- [Download pdf](https://arxiv.org/pdf/2103.12157)
- Title: [Efficient Edge Analytics: Addressing Cyber-Physical MASINT with Machine Learning on Audio at the Edge](https://repository.lib.fit.edu/handle/11141/3223)
- [Download pdf](https://repository.lib.fit.edu/bitstream/handle/11141/3223/ELLIOTT-DISSERTATION-2020.pdf?sequence=1&isAllowed=y)
### Book Chapter
1. deepC Chapter in book [Introduction to TinyML](http://thetinymlbook.com/), available on [Amazon](https://www.amazon.com/dp/B0B662D7ZW/) and other retailers
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti1, typename Ti2>
class OneHot : public baseOperator<To, Ti1, Ti2> {
// OneHot attributes
int _axis;
public:
OneHot(std::string name = "opOneHot", int xis = -1)
: baseOperator<To, Ti1, Ti2>(opOneHot, name), _axis(xis) {}
bool getAttribute(OPATTR attrName, int &xis) override {
if (attrName == attr_axis) {
xis = _axis;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int xis) override {
if (attrName == attr_axis) {
_axis = xis;
return true;
}
return false;
}
tensor<To> compute(tensor<Ti1> indices, Ti2 depth, tensor<To> values) {
if (values.rank() != 1 || values.length() != 2) {
SPDLOG_ERROR("invalid values rank or length.");
return NULL_TENSOR<To>;
}
To off_value = values[0];
To on_value = values[1];
if (!(this->template type_check<Ti1, int, float, double>())) {
SPDLOG_ERROR("Constrain input tensor indices to numeric tensors.");
return NULL_TENSOR<To>;
}
if (!(this->template type_check<Ti2, int, float, double>())) {
SPDLOG_ERROR("Constrain scalar depth to numeric values.");
return NULL_TENSOR<To>;
}
size_t axis = _axis < 0 || static_cast<size_t>(_axis) > indices.rank()
? indices.rank()
: _axis;
std::vector<size_t> rshape = indices.shape();
rshape.insert(rshape.begin() + axis, static_cast<size_t>(depth));
tensor<To> result(rshape, indices.name() + "_1hot", INIT_VALUE, off_value);
// TODO: Now add on_value selectively.
if (indices.rank() == 1) {
} else if (indices.rank() == 2) {
size_t i0 = 0, sz0 = indices.shape()[0];
size_t i1 = 0, sz1 = indices.shape()[1];
for (i0 = 0; i0 < sz0; i0++) {
for (i1 = 0; i1 < sz1; i1++) {
std::vector<size_t> new_index{i0, i1};
Ti1 idx = indices(new_index);
// Any entries in the 'indices' input tensor
// with values outside the range [-depth, depth-1]
// will result in one-hot representation with all
// 'off_value' values in the output tensor.
if (idx < -depth || idx > depth - 1)
continue;
int idepth = idx < 0 ? depth + idx : idx;
new_index.insert(new_index.begin() + axis,
static_cast<size_t>(idepth));
result.load(on_value, new_index);
}
}
} else if (indices.rank() == 3) {
size_t i0 = 0, sz0 = indices.shape()[0];
size_t i1 = 0, sz1 = indices.shape()[1];
size_t i2 = 0, sz2 = indices.shape()[2];
for (i0 = 0; i0 < sz0; i0++) {
for (i1 = 0; i1 < sz1; i1++) {
for (i2 = 0; i2 < sz2; i2++) {
std::vector<size_t> new_index{i0, i1, i2};
Ti1 idx = indices(new_index);
// Any entries in the 'indices' input tensor
// with values outside the range [-depth, depth-1]
// will result in one-hot representation with all
// 'off_value' values in the output tensor.
if (idx < -depth || idx > depth - 1)
continue;
int idepth = idx < 0 ? depth + idx : idx;
new_index.insert(new_index.begin() + axis,
static_cast<size_t>(idepth));
result.load(on_value, new_index);
}
}
}
} else if (indices.rank() == 4) {
size_t i0 = 0, sz0 = indices.shape()[0];
size_t i1 = 0, sz1 = indices.shape()[1];
size_t i2 = 0, sz2 = indices.shape()[2];
size_t i3 = 0, sz3 = indices.shape()[3];
for (i0 = 0; i0 < sz0; i0++) {
for (i1 = 0; i1 < sz1; i1++) {
for (i2 = 0; i2 < sz2; i2++) {
for (i3 = 0; i3 < sz3; i3++) {
std::vector<size_t> new_index{i0, i1, i2, i3};
Ti1 idx = indices(new_index);
// Any entries in the 'indices' input tensor
// with values outside the range [-depth, depth-1]
// will result in one-hot representation with all
// 'off_value' values in the output tensor.
if (idx < -depth || idx > depth - 1)
continue;
int idepth = idx < 0 ? depth + idx : idx;
new_index.insert(new_index.begin() + axis,
static_cast<size_t>(idepth));
result.load(on_value, new_index);
}
}
}
}
}
return result;
}
};
} // namespace dnnc
<file_sep>#https://pytorch.org/docs/stable/torchvision/models.html
import os
import torch
import torchvision.models as models
models = {}
models['resnet18'] = {'dummy_input':'torch.randn(1, 3, 224, 224)'}
for model_name in models.keys():
dummy_input = models[model_name]['dummy_input']
py_file = '''
# Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
\n\n'''
py_file += 'import sys\n'
py_file += 'import onnx\n'
py_file += 'import torch\n'
py_file += 'import torch.nn as nn\n'
py_file += 'import torchvision.models as models\n\n'
py_file += 'sys.path.append("../../../python/parser")\n'
py_file += 'from onnx_parser import *\n\n'
py_file += 'dummy_input = ' + dummy_input + '\n'
py_file += 'model = models.' + model_name + '()\n\n'
py_file += 'onnx_filename = "./testcases/' + model_name + '/' + model_name + '.onnx"\n'
py_file += 'torch.onnx.export(model, dummy_input, onnx_filename)\n'
py_file += 'sym_filename = "./gold_files/' + model_name + '.sym"\n'
py_file += 'text_filename = "./testcases/' + model_name + '/' + model_name + '.txt"\n'
py_file += 'with open(text_filename, "w") as f:\n'
py_file += '\tmodel = onnx.load(onnx_filename)\n'
py_file += '\tf.write(str(model.graph))\n'
py_file += 'parse(onnx_filename, sym_filename, onnx_output_file=text_filename)'
os.system("mkdir testcases/" + model_name)
filename = 'testcases/' + model_name + '/' + model_name + '_generator.py'
with open(filename, 'w') as f:
f.write(py_file)
os.system("python3 " + filename)
<file_sep># base image is ubuntu 18.04
FROM ubuntu:18.04
# update the package-list and install dependencies
RUN apt-get update && apt-get install -y \
build-essential \
python3.6-dev \
python3-pip \
swig \
doxygen \
clang-format \
clang \
clang-8 \
llvm-8 \
llvm-8-dev \
protobuf-compiler \
libprotoc-dev \
vim
# install numpy onnx with pip
# RUN pip3 install numpy onnx
RUN pip3 install numpy==1.15.0 onnx==1.5.0
# copy everything from current directory to container directory
COPY . /dnnCompiler
# below code is commented as we are using bash script to run this
# process because we need to run this differently from root directory and
# from swig directory, but docker doesn't support adding files from
# up a directory. For more information see this link:
# https://stackoverflow.com/questions/27068596/how-to-include-files-outside-of-dockers-build-context
# WORKDIR /dnnCompiler
# CMD make clean \
# && make<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
def temp_dropout(x, ratio):
y = x
return y
class DropoutTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.ratio = 2.0
self.np_float_a = np.random.randn(self.len).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.np_double_a = np.random.randn(self.len).astype(np.float64)
self.dc_double_a = dc.array(list(self.np_double_a))
def test_Dropout1D_float (self):
npr = temp_dropout(self.np_float_a,self.ratio)
dcr = dc.dropout(self.dc_float_a,self.ratio)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Dropout1D_double (self):
npr = temp_dropout(self.np_double_a,self.ratio)
dcr = dc.dropout(self.dc_double_a,self.ratio)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float64),
rtol=1e-3, atol=1e-3)
def test_Dropout2D_float (self):
np_a = np.reshape(self.np_float_a, (6,4))
dc_a = dc.reshape(self.dc_float_a, (6,4))
npr = temp_dropout(np_a,self.ratio)
dcr = dc.dropout(dc_a,self.ratio)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Dropout2D_double (self):
np_a = np.reshape(self.np_double_a, (6,4))
dc_a = dc.reshape(self.dc_double_a, (6,4))
npr = temp_dropout(np_a,self.ratio)
dcr = dc.dropout(dc_a,self.ratio)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Dropout3D_float (self):
np_a = np.reshape(self.np_float_a, (2,4,3))
dc_a = dc.reshape(self.dc_float_a, (2,4,3))
npr = temp_dropout(np_a,self.ratio)
dcr = dc.dropout(dc_a,self.ratio)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Dropout3D_double (self):
np_a = np.reshape(self.np_double_a, (2,4,3))
dc_a = dc.reshape(self.dc_double_a, (2,4,3))
npr = temp_dropout(np_a,self.ratio)
dcr = dc.dropout(dc_a,self.ratio)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Dropout4D_float (self):
np_a = np.reshape(self.np_float_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_float_a, (2,2,2,3))
npr = temp_dropout(np_a,self.ratio)
dcr = dc.dropout(dc_a,self.ratio)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Dropout4D_double (self):
np_a = np.reshape(self.np_double_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_double_a, (2,2,2,3))
npr = temp_dropout(np_a,self.ratio)
dcr = dc.dropout(dc_a,self.ratio)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()<file_sep>## Tutorials
Totorial | Github Notbook | Colab Notebook
---------------|----------------|---------------
Getting Started with deepC | [Read in repo](Getting_Started_With_deepC.ipynb) | [Run in Colab](https://colab.research.google.com/drive/1rWeZxyww1SksYi-kPJ44CSoFlOZxI5xm)
Gradient Descent with deepC | [Read in repo](Gradient_Descent_With_DeepC.ipynb) | [Run in Colab](https://colab.research.google.com/drive/1a6kmnGs8McbRs4W4nOR_x-N-HKlr2rkc)
k nearest neighbor with deepC | [Read in repo](KNN_with_DeepC.ipynb) | [Run in Colab](https://colab.research.google.com/drive/1aDY1js6m1-_WqC0KAWxEbO3eae_RzUhQ)
k means color quantization with deepC | [Read in repo](KMeansImageColorQuantization.ipynb) | [Run in Colab](https://colab.research.google.com/drive/1VSVmCMj-HPAMRo4t11bVqcn-I4DE9ecg)
## Examples
1. [Intermediate codegen and generate binary/bundle for your model](../../master/test/compiler/mnist/README.md)
1. Profiling your deep learning model
1. Debugging deep learning model with intermediate code generation
# deepC on MicroControllers
1. [running your ml model on Arduino](deepC_on_Arduino.md)
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
# This file is required by ../dnnCompiler/swig/op_gen.py
def slicing_indexing():
s = '''
def __getitem__(self, index):
"""
getitem method for tensor.
"""
def get_item_helper_int(item, axis):
flag = 0
start = item
if (start < 0):
start += self.shape()[axis]
stop = start+1
step = 1
if(start >= self.shape()[axis]):
errorMsg = "index value " + str(start) + " along axis " + str(axis) + " is beyond the size " + str(self.shape()[axis]) + " of input tensor along that axis"
raise ValueError(errorMsg)
flag = 1
return start, stop, step, flag
def get_item_helper_slice(item, axis):
flag = 0 # to check if all index items are supported or not
start = 0
stop = self.shape()[axis]
step = 1
if type(item.step) == int:
step = item.step
if step == 0:
errorMsg = "slice step cannot be zero"
raise TypeError(errorMsg)
flag = 1
elif step < 0:
start = self.shape()[axis] -1
stop = -1
elif str(type(item.step)).split("'")[1] == "NoneType":
pass
else:
errorMsg = "step of " + str(type(item.step)) + " not supported!"
raise TypeError(errorMsg)
flag = 1
if type(item.start) == int:
start = item.start
if (start < 0):
start += self.shape()[axis]
elif str(type(item.start)).split("'")[1] == "NoneType":
pass
else:
errorMsg = "start of " + str(type(item.start)) + " not supported!"
raise TypeError(errorMsg)
flag = 1
if type(item.stop) == int:
stop = item.stop
if (stop < 0):
stop += self.shape()[axis]
elif str(type(item.stop)).split("'")[1] == "NoneType":
pass
else:
errorMsg = "stop of " + str(type(item.stop)) + " not supported!"
raise TypeError(errorMsg)
flag = 1
# if(start > self.shape()[axis]):
# errorMsg = "index value " + str(start) + " along axis " + str(axis) + " is beyond the size " + str(self.shape()[axis]) + " of input tensor along that axis"
# raise IndexError(errorMsg)
# flag = 1
# if(stop > self.shape()[axis]):
# errorMsg = "index value " + str(stop) + " along axis " + str(axis) + " is beyond the size " + str(self.shape()[axis]) + " of input tensor along that axis"
# raise IndexError(errorMsg)
# flag = 1
# if (step < 0) and not (start > stop):
# errorMsg = "stop index " + str(stop) + " along axis " + str(axis) + " is greater than start index " + str(start) + " while step is negative"
# raise IndexError(errorMsg)
# flag = 1
# elif (step > 0) and not (start < stop):
# errorMsg = "stop index " + str(stop) + " along axis " + str(axis) + " is smaller than start index " + str(start) + " while step is positive"
# raise IndexError(errorMsg)
# flag = 1
return start, stop, step, flag
if str(type(index)).split("'")[1] == "int":
if self.rank() < 1:
return self.copy()
axis = 0
start, stop, step, flag = get_item_helper_int(index, axis)
if flag:
return
start = array([start]).asTypeInt()
stop = array([stop]).asTypeInt()
axis = array([axis]).asTypeInt()
step = array([step]).asTypeInt()
if (self.rank() == 1):
return self.data()[index]
return slice(self, start, stop, axis, step).reshape(self.shape()[1:])
elif str(type(index)).split("'")[1] == "slice":
if self.rank() < 1:
return self.copy()
axis = 0
start, stop, step, flag = get_item_helper_slice(index, axis)
if flag:
return
start = array([start]).asTypeInt()
stop = array([stop]).asTypeInt()
axis = array([axis]).asTypeInt()
step = array([step]).asTypeInt()
return slice(self, start, stop, axis, step)
elif str(type(index)).split("'")[1] == "ellipsis":
return self.copy()
elif str(type(index)).split("'")[1] == "tuple":
if (len(index) > self.rank()):
errorMsg = "Takes maximum " + str(self.rank()) + " arguments, " + str(len(index)) + " were given!"
raise IndexError(errorMsg)
return
# elif Ellipsis not in index and (len(index) < self.rank()):
# errorMsg = "Takes minimum " + str(self.rank()) + " arguments, " + str(len(index)) + " were given!"
# raise IndexError(errorMsg)
# return
# checks if any float or bool or complex is not present
if any(isinstance(x,(bool,float,complex)) for x in index):
errorMsg = "Restrict to only integers as a slicing argument!"
raise ValueError(errorMsg)
return
start_list = []
stop_list = []
step_list = []
axis_list = []
axis = -1 # -1 for starting axis as 0 in the next loops
reshape_list = [] # reshape list to reshape
replace_start = replace_stop = 0 # replace ellipsis with slice methods by index
if Ellipsis in index:
if (index.count(Ellipsis) > 1):
errorMsg = str(index.count(Ellipsis)) + " 'Ellipsis' found, maximum 1 is supported!"
raise IndexError(errorMsg)
return
elif (index.count(Ellipsis) == 1):
non_ellipsis_count = 0
for item in index:
if str(type(item)).split("'")[1] == "int" or str(type(item)).split("'")[1] == "slice":
non_ellipsis_count += 1
# replace holds start and stop index which will be replaced by slice method in place of ellipsis
replace_start = index.index(Ellipsis)
replace_stop = replace_start + self.rank() - non_ellipsis_count
else:
errorMsg = "Error occured while handling ellipsis!"
raise ValueError(errorMsg)
return
for item in index:
axis += 1
if str(type(item)).split("'")[1] == "ellipsis":
while (axis >= replace_start and axis < replace_stop):
start = 0
stop = self.shape()[axis]
step = 1
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
reshape_list.append(1) # This shape will be taken
axis += 1
axis -= 1 # recovering from last axis increment
elif str(type(item)).split("'")[1] == "int":
start, stop, step, flag = get_item_helper_int(item, axis)
if flag:
return
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
reshape_list.append(0) # This shape will not be taken
elif str(type(item)).split("'")[1] == "slice":
start, stop, step, flag = get_item_helper_slice(item, axis)
if flag:
return
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
reshape_list.append(1) # This shape will be taken
else:
errorMsg = "Doesn't support " + str(item) + " of " + str(type(item)) + " as a slicing argument!"
raise TypeError(errorMsg)
return
while (axis < self.rank()-1):
axis += 1
start = 0
stop = self.shape()[axis]
step = 1
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
reshape_list.append(1) # This shape will be taken
start_list = array(start_list).asTypeInt()
stop_list = array(stop_list).asTypeInt()
axis_list = array(axis_list).asTypeInt()
step_list = array(step_list).asTypeInt()
# print("test start list : ", start_list)
# print("test stop list : ", stop_list)
# print("test axis list : ", axis_list)
# print("test step list : ", step_list)
result = slice(self, start_list, stop_list, axis_list, step_list)
if (result.len() != 0) and (0 in reshape_list):
if not 1 in reshape_list:
return result.data()[0]
return (result.reshape([x for x, y in zip(result.shape(), reshape_list) if y == 1]))
return result
else :
errorMsg = "Doesn't support " + str(index) + " of " + str(type(index)) + " as a slicing argument!"
raise TypeError(errorMsg)
return intTensor()
def __setitem__(self, index, input_tensor):
"""
setitem method for tensor.
"""
value_tensor = ""
if "Tensor" not in str(type(input_tensor)).split("'")[1]:
if str(type(input_tensor)).split("'")[1] in ("int", "float", "bool"):
value_tensor = array([input_tensor]) # passing single number as tensor of length 1
elif str(type(input_tensor)).split("'")[1] in ("list", "tuple"):
value_tensor = array(input_tensor) # passing python lists or tuples as tensor
else:
errorMsg = "could not convert " + str(input_tensor) + " of type " + str(type(input_tensor)) + " to dnnc tensor"
raise ValueError(errorMsg)
return
else:
value_tensor = input_tensor
def set_item_helper_int(item, axis):
flag = 0
start = item
if (start < 0):
start += self.shape()[axis]
stop = start+1
step = 1
if(start >= self.shape()[axis]):
errorMsg = "index value " + str(start) + " along axis " + str(axis) + " is beyond the size " + str(self.shape()[axis]) + " of input tensor along that axis"
raise ValueError(errorMsg)
flag = 1
return start, stop, step, flag
def set_item_helper_slice(item, axis):
flag = 0 # to check if all index items are supported or not
start = 0
stop = self.shape()[axis]
step = 1
if type(item.step) == int:
step = item.step
if step == 0:
errorMsg = "slice step cannot be zero"
raise TypeError(errorMsg)
flag = 1
elif step < 0:
start = self.shape()[axis] -1
stop = -1
elif str(type(item.step)).split("'")[1] == "NoneType":
pass
else:
errorMsg = "step of " + str(type(item.step)) + " not supported!"
raise TypeError(errorMsg)
flag = 1
if type(item.start) == int:
start = item.start
if (start < 0):
start += self.shape()[axis]
elif str(type(item.start)).split("'")[1] == "NoneType":
pass
else:
errorMsg = "start of " + str(type(item.start)) + " not supported!"
raise TypeError(errorMsg)
flag = 1
if type(item.stop) == int:
stop = item.stop
if (stop < 0):
stop += self.shape()[axis]
elif str(type(item.stop)).split("'")[1] == "NoneType":
pass
else:
errorMsg = "stop of " + str(type(item.stop)) + " not supported!"
raise TypeError(errorMsg)
flag = 1
# if(start > self.shape()[axis]):
# errorMsg = "index value " + str(start) + " along axis " + str(axis) + " is beyond the size " + str(self.shape()[axis]) + " of input tensor along that axis"
# raise IndexError(errorMsg)
# flag = 1
# if(stop > self.shape()[axis]):
# errorMsg = "index value " + str(stop) + " along axis " + str(axis) + " is beyond the size " + str(self.shape()[axis]) + " of input tensor along that axis"
# raise IndexError(errorMsg)
# flag = 1
# if (step < 0) and not (start > stop):
# errorMsg = "stop index " + str(stop) + " along axis " + str(axis) + " is greater than start index " + str(start) + " while step is negative"
# raise IndexError(errorMsg)
# flag = 1
# elif (step > 0) and not (start < stop):
# errorMsg = "stop index " + str(stop) + " along axis " + str(axis) + " is smaller than start index " + str(start) + " while step is positive"
# raise IndexError(errorMsg)
# flag = 1
return start, stop, step, flag
if str(type(index)).split("'")[1] == "int":
if self.rank() < 1:
errorMsg = "cannot set items to a null tensor"
raise TypeError(errorMsg)
return
axis = 0
start, stop, step, flag = set_item_helper_int(index, axis)
if flag:
return
start = array([start]).asTypeInt()
stop = array([stop]).asTypeInt()
axis = array([axis]).asTypeInt()
step = array([step]).asTypeInt()
set_slice(self, value_tensor, start, stop, axis, step)
return
elif str(type(index)).split("'")[1] == "slice":
if self.rank() < 1:
self = value_tensor
return
axis = 0
start, stop, step, flag = set_item_helper_slice(index, axis)
if flag:
return
start = array([start]).asTypeInt()
stop = array([stop]).asTypeInt()
axis = array([axis]).asTypeInt()
step = array([step]).asTypeInt()
set_slice(self, value_tensor, start, stop, axis, step)
return
elif str(type(index)).split("'")[1] == "ellipsis":
if self.shape() != value_tensor.shape():
errorMsg = "could not broadcast input array from shape "+str(value_tensor.shape())+" into shape "+str(self.shape())
raise ValueError(errorMsg)
return
elif (self.rank() < 1) or (self.shape() == value_tensor.shape()):
self = value_tensor
return
elif str(type(index)).split("'")[1] == "tuple":
if (len(index) > self.rank()):
errorMsg = "Takes maximum " + str(self.rank()) + " arguments, " + str(len(index)) + " were given!"
raise IndexError(errorMsg)
return
# elif Ellipsis not in index and (len(index) < self.rank()):
# errorMsg = "Takes minimum " + str(self.rank()) + " arguments, " + str(len(index)) + " were given!"
# raise IndexError(errorMsg)
# return
# checks if any float or bool or complex is not present
if any(isinstance(x,(bool,float,complex)) for x in index):
errorMsg = "Restrict to only integers as a slicing argument!"
raise ValueError(errorMsg)
return
start_list = []
stop_list = []
step_list = []
axis_list = []
axis = -1 # -1 for starting axis as 0 in the next loops
replace_start = replace_stop = 0 # replace ellipsis with slice methods by index
if Ellipsis in index:
if (index.count(Ellipsis) > 1):
errorMsg = str(index.count(Ellipsis)) + " 'Ellipsis' found, maximum 1 is supported!"
raise IndexError(errorMsg)
return
elif (index.count(Ellipsis) == 1):
non_ellipsis_count = 0
for item in index:
if str(type(item)).split("'")[1] == "int" or str(type(item)).split("'")[1] == "slice":
non_ellipsis_count += 1
# replace holds start and stop index which will be replaced by slice method in place of ellipsis
replace_start = index.index(Ellipsis)
replace_stop = replace_start + self.rank() - non_ellipsis_count
else:
errorMsg = "Error occured while handling ellipsis!"
raise ValueError(errorMsg)
return
for item in index:
axis += 1
if str(type(item)).split("'")[1] == "ellipsis":
while (axis >= replace_start and axis < replace_stop):
start = 0
stop = self.shape()[axis]
step = 1
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
axis += 1
axis -= 1 # recovering from last axis increment
elif str(type(item)).split("'")[1] == "int":
start, stop, step, flag = set_item_helper_int(item, axis)
if flag:
return
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
elif str(type(item)).split("'")[1] == "slice":
start, stop, step, flag = set_item_helper_slice(item, axis)
if flag:
return
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
else:
errorMsg = "Doesn't support " + str(item) + " of " + str(type(item)) + " as a slicing argument!"
raise TypeError(errorMsg)
return
while (axis < self.rank()-1):
axis += 1
start = 0
stop = self.shape()[axis]
step = 1
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
axis_list.append(axis)
start_list = array(start_list).asTypeInt()
stop_list = array(stop_list).asTypeInt()
axis_list = array(axis_list).asTypeInt()
step_list = array(step_list).asTypeInt()
# print("test start list : ", start_list)
# print("test stop list : ", stop_list)
# print("test axis list : ", axis_list)
# print("test step list : ", step_list)
set_slice(self, value_tensor, start_list, stop_list, axis_list, step_list)
return
else :
errorMsg = "Doesn't support " + str(index) + " of " + str(type(index)) + " as a slicing argument!"
raise TypeError(errorMsg)
return
'''
return s
def overload_python_operator(dc_operator, operator_python, dtype_precedence_dict, flag):
s = ""
if flag == "logical" or flag == "binary":
s = '''
def __<operand>__(self, other):
return <operator>(self, other)
def __r<operand>__(self, other):
return <operator>(other, self)
def __i<operand>__(self, other):
"""
making sure left hand operand is immutable
"""
dtype_precedence_dict = '''
s += str(dtype_precedence_dict) + '''
left_operand_dtype = right_operand_dtype = ""
if "Tensor" in str(type(self)):
left_operand_dtype = str(type(self)).split(".")[-1].split("Tensor")[0]
else:
left_operand_dtype = str(type(self)).split("'")[1]
if "Tensor" in str(type(other)):
right_operand_dtype = str(type(other)).split(".")[-1].split("Tensor")[0]
else:
right_operand_dtype = str(type(other)).split("'")[1]
if (dtype_precedence_dict[left_operand_dtype] < dtype_precedence_dict[right_operand_dtype]):
errorMsg = "cannot modify left hand operand datatype."
raise TypeError(errorMsg)
return <operator>(self, other)
'''
elif flag == "comparison":
s = '''
def __<operand>__(self, other):
return <operator>(self, other)
'''
s = s.replace("<operator>",dc_operator).replace("<operand>",operator_python)
return s
<file_sep>from onnx_runner_generator import generate_onnx_runner
operators = {}
operators['Sigmoid'] = {
'inputs':'[np.array([1, 2, 3]).astype(np.float32)]'
}
operators['BitShift'] = {
'inputs':'[np.random.randn((2,3,4)),np.random.randn((2,3,4))]'
}
for operator, operator_info in operators.items():
generate_onnx_runner(operator, operator_info['inputs'])
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti>
class ReduceSumSquare : public baseOperator<To, Ti, Ti> {
std::vector<int> _axis = {};
int _keepdims = 1;
public:
ReduceSumSquare(std::string name = "opReduceSumSquare")
: baseOperator<To, Ti, Ti>(opReduceSumSquare, name) {}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_keepdims) {
obj = _keepdims;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_keepdims) {
_keepdims = obj;
return true;
}
return false;
}
bool getAttribute(OPATTR attrName, std::vector<int> &obj) override {
if (attrName == attr_axis) {
obj = _axis;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::vector<int> obj) override {
if (attrName == attr_axis) {
_axis = obj;
return true;
}
return false;
}
tensor<To> compute(tensor<Ti> input) override {
int rank = input.rank();
// check that values in _axis are less than or equal to the rank
for (int axis : _axis) {
if (axis > rank - 1) {
SPDLOG_ERROR("Axis is larger than input tensor rank.");
return NULL_TENSOR<To>;
}
if (axis < 0) {
SPDLOG_ERROR("Axis value is negative.");
return NULL_TENSOR<To>;
}
}
if (rank == 0) {
return input.template asType<To>();
}
if (rank == 1 || _axis.empty()) {
tensor<To> result({1});
To in_sum = 0;
for (size_t l0 = 0; l0 < input.length(); l0++) {
in_sum += static_cast<To>(input[l0] * input[l0]);
}
result.load({in_sum});
return result;
}
std::vector<DIMENSION> shape = input.shape();
std::vector<DIMENSION> new_shape = input.shape();
// sort axes from greatest to least
sort(_axis.begin(), _axis.end(), std::greater<int>());
// new shape of the tensor will be composed of the original tensor shape
// with values in _axis removed
for (int axis : _axis) {
new_shape.erase(new_shape.begin() + axis);
}
// resulting tensor has the shape new_shape
tensor<To> result(new_shape, "", INIT_ZERO);
if (rank == 2) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
To in_data = static_cast<To>(input(l0, l1));
result(k) += in_data * in_data;
}
}
if (_keepdims) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
input(l0, l1) = result(k);
}
}
result = input.template asType<To>();
}
}
if (rank == 3) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
for (size_t l2 = 0; l2 < shape[2]; l2++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
k.push_back(l2);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
To in_data = static_cast<To>(input(l0, l1, l2));
result(k) += in_data * in_data;
}
}
}
if (_keepdims) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
for (size_t l2 = 0; l2 < shape[2]; l2++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
k.push_back(l2);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
input(l0, l1, l2) = result(k);
}
}
}
result = input.template asType<To>();
}
}
if (rank == 4) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
for (size_t l2 = 0; l2 < shape[2]; l2++) {
for (size_t l3 = 0; l3 < shape[3]; l3++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
k.push_back(l2);
k.push_back(l3);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
To in_data = static_cast<To>(input(l0, l1, l2, l3));
result(k) += in_data * in_data;
}
}
}
}
if (_keepdims) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
for (size_t l2 = 0; l2 < shape[2]; l2++) {
for (size_t l3 = 0; l3 < shape[3]; l3++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
k.push_back(l2);
k.push_back(l3);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
input(l0, l1, l2, l3) = result(k);
}
}
}
}
result = input.template asType<To>();
}
}
if (rank == 5) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
for (size_t l2 = 0; l2 < shape[2]; l2++) {
for (size_t l3 = 0; l3 < shape[3]; l3++) {
for (size_t l4 = 0; l4 < shape[4]; l4++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
k.push_back(l2);
k.push_back(l3);
k.push_back(l4);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
To in_data = static_cast<To>(input(l0, l1, l2, l3, l4));
result(k) += in_data * in_data;
}
}
}
}
}
if (_keepdims) {
for (size_t l0 = 0; l0 < shape[0]; l0++) {
for (size_t l1 = 0; l1 < shape[1]; l1++) {
for (size_t l2 = 0; l2 < shape[2]; l2++) {
for (size_t l3 = 0; l3 < shape[3]; l3++) {
for (size_t l4 = 0; l4 < shape[4]; l4++) {
std::vector<size_t> k;
k.push_back(l0);
k.push_back(l1);
k.push_back(l2);
k.push_back(l3);
k.push_back(l4);
for (int axis : _axis) {
k.erase(k.begin() + axis);
}
input(l0, l1, l2, l3, l4) = result(k);
}
}
}
}
}
result = input.template asType<To>();
}
}
/*
if keepdims
for every element in the orignal matrix, set it equal to the corresponding
element in the reduced return input.template asType<To>();
*/
return result;
}
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class tensorSanityTest(unittest.TestCase):
def setUp(self):
return;
# compare two tensors element by element.
def isEqual(self, name, brnz, gold):
noEqual = (brnz != gold).sum()
if ( noEqual != False ):
print(name)
print("\t\tgold :\n", gold)
print("\t\tbronz:\n", brnz)
assert(name + "Failed,");
return noEqual ;
# # how to create tensors in different ways
def test_create(self):
# null tensor test
a=dc.array(0);
assert a.isnull() == True
assert a.empty() == True
# test assignment is shallow copy of memory
b=a
assert a.sameas(b) == True
assert a.identifier() == b.identifier()
# tensor without initiliaztion
a=dc.array(2,3,4,5);
assert a.length() == 120
# tensor random initiliaztion
a=dc.random(2,3,4,5);
assert a.length() == 120
# tensor without initiliaztion
a=dc.empty(2,3,4,5);
assert a.length() == 120
# zero tensor
a=dc.zeros(2,3,4,5);
assert np.array(list(a.data())).sum().astype(np.int) == 0
# one tensor
a=dc.ones(2,3,4,5);
assert np.array(list(a.data())).sum().astype(np.int) == 120
# tensor from python list
l1D=[1,3,5]
a=dc.array(l1D).asTypeInt()
np.testing.assert_equal(np.array(l1D), np.array(list(a.data())))
# tensor from python list of lists
l2D=[[1,3,5],[2,4,6]]
a=dc.array(l2D).asTypeInt()
assert a.rank() == 2
assert a.shape() == (2, 3)
np.testing.assert_equal(np.array(l2D).flatten(), \
np.array(list(a.data())))
# copy tensor
b=a.copy()
assert a.sameas(b) == False
assert a.identifier() != b.identifier()
# arange
a=dc.arange(10)
assert a.length() == 10
# add start and step
a=dc.arange(10, 5, 3).asTypeInt()
assert a.data() == (5, 8)
# swap start and stop.
a=dc.arange(5, 10, 3).asTypeInt()
assert a.data() == (5, 8)
# this test is now in SliceAndIndex.py
'''
# test data loading and index
def test_data(self):
# confirm type as class tuple.
a=dc.zeros(2,3).asTypeInt()
adata = a.data()
assert type(adata) == type((1,))
# load new data
new_data_list = [10,11,12,13,14,15]
a.load(dc.vectorInt(new_data_list))
assert a[0] == 10
# load one element with flat index
a[0] = 777
assert a[0] == 777
# reshape, fetch and load with multi indices
a=dc.arange(12).asTypeInt()
a.reshape(dc.vectorSizeT([2,2,3]))
assert a[0,1,1] == 4
a[1,1,1] = 200
assert a[1,1,1] == 200
# negative test
try :
# This throws ValueError
print(a[0,0,9,9,9])
except ValueError as e:
assert e
'''
# test data types
def test_dtypes(self):
a=dc.random(2,3)
assert a.dtype() == 'float'
# transform datatype to int.
aint = a.asTypeInt()
assert aint.dtype() == 'int32_t'
# transform datatype to double.
adbl = a.asTypeDouble();
assert adbl.dtype() == 'double'
# transform datatype to double.
abool = a.asTypeBool()
assert abool.dtype() == 'bool'
# # test shapes
def test_shapes(self):
# test shape tuple
shape1=dc.vectorSizeT([2,3,4,5])
shape2=dc.vectorSizeT([5,4,3,2])
a=dc.random(2,3,4,5).asTypeInt()
assert a.rank() == 4
assert a.shape() == (2, 3, 4, 5)
# reshape to new dimensions
a.reshape(shape2)
assert a.shape() == (5, 4, 3, 2)
# return a new tensor with flattened dimensions.
b=a.flatten()
assert a.shape() == (5, 4, 3, 2)
assert b.shape() == (120,)
# flatten the same tensor
a.flatteninplace()
assert a.shape() == (120,)
shape3=dc.vectorSizeT([8,15,1,1])
# new shape
a.reshape(shape3)
# confirm new shape
assert a.shape() == (8,15,1,1)
# dnnc method to reshape.
a=dc.random(2,3,4,5)
dc.reshape(a,(8,15,1,1))
assert a.shape() == (8,15,1,1)
dc.reshape(a,(120,))
assert a.shape() == (120,)
def test_Slices(self):
a = dc.arange(12).reshape([3,4]).asTypeInt()
self.isEqual("0, 0:\n" , a.slice(0, 0), a)
self.isEqual("0, 1:\n" , a.slice(0, 1), dc.array([[4, 5, 6, 7], [8, 9, 10, 11]]).asTypeInt())
self.isEqual("0, 2:\n" , a.slice(0, 2), dc.array([[8, 9, 10, 11]]).asTypeInt())
self.isEqual("1, 0:\n" , a.slice(1, 0), a)
self.isEqual("1, 1:\n" , a.slice(1, 1), dc.array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]).asTypeInt())
self.isEqual("1, 2:\n" , a.slice(1, 2), dc.array([[2, 3], [6, 7], [10, 11]]).asTypeInt())
self.isEqual("1, 3:\n" , a.slice(1, 3), dc.array([[3], [7], [11]]).asTypeInt())
self.isEqual("1, 1, -1, 2:\n" , a.slice(1, 1, -1, 2), dc.array([[1,3],[5,7],[9,11]]).asTypeInt())
self.isEqual("1, 0, -1, 3:\n" , a.slice(1, 0, -1, 3), dc.array([[0, 3],[4,7],[8,11]]).asTypeInt())
b = dc.arange(24).reshape([2,3,4]).asTypeInt()
self.isEqual("0, 0:\n" , b.slice(0, 0), b)
self.isEqual("0, 1:\n" , b.slice(0, 1), dc.array([[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]).asTypeInt())
self.isEqual("1, 0:\n" , b.slice(1, 0), b)
self.isEqual("1, 1:\n" , b.slice(1, 1)[0], dc.array([[4, 5, 6, 7], [8, 9, 10, 11]]).asTypeInt())
self.isEqual("1, 1:\n" , b.slice(1, 1)[1], dc.array([[16, 17, 18, 19], [20, 21, 22, 23]]).asTypeInt())
self.isEqual("2, 0:\n" , b.slice(2, 0), b)
self.isEqual("2, 1:\n" , b.slice(2, 1)[0], dc.array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]).asTypeInt())
self.isEqual("2, 1:\n" , b.slice(2, 1)[1], dc.array([[13, 14, 15], [17, 18, 19], [21, 22, 23]]).asTypeInt())
self.isEqual("2, 2:\n" , b.slice(2, 2)[0], dc.array([[2, 3], [6, 7], [10, 11]]).asTypeInt())
self.isEqual("2, 2:\n" , b.slice(2, 2)[1], dc.array([[14, 15], [18, 19], [22, 23]]).asTypeInt())
self.isEqual("2, 3:\n" , b.slice(2, 3)[0], dc.array([[3], [7], [11]]).asTypeInt())
self.isEqual("2, 3:\n" , b.slice(2, 3)[1], dc.array([[15], [19], [23]]).asTypeInt())
self.isEqual("1, 0, -1, 2:\n" , b.slice(1, 0, -1, 2)[0], dc.array([[0, 1, 2, 3], [8, 9, 10, 11]]).asTypeInt())
self.isEqual("1, 0, -1, 2:\n" , b.slice(1, 0, -1, 2)[1], dc.array([[12, 13, 14, 15], [20, 21, 22, 23]]).asTypeInt())
self.isEqual("1, 0, -1, 3:\n" , b.slice(1, 0, -1, 3)[0], dc.array([[0, 1, 2, 3]]).asTypeInt())
self.isEqual("1, 0, -1, 3:\n" , b.slice(1, 0, -1, 3)[1], dc.array([[12, 13, 14, 15]]).asTypeInt())
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
#
# CORE must be first, all others depend on CORE
MAKEFLAGS += -j8
all: CORE OPERATORS GRAPH CODEGEN
FORCE:
CORE: FORCE
$(MAKE) -C core
OPERATORS: CORE
$(MAKE) -C operators
GRAPH: OPERATORS
$(MAKE) -C graph
CODEGEN: GRAPH
$(MAKE) -C codegen
clean:
$(MAKE) -C core clean
$(MAKE) -C operators clean
$(MAKE) -C graph clean
$(MAKE) -C codegen clean
.PHONY: print_vars
print_vars:
@echo
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class SliceTest(unittest.TestCase):
def setUp(self):
self.len = 4096
self.np_a = np.random.randn(self.len).astype(np.float32)
self.dc_a = dc.array(list(self.np_a));
def test_Slice1D (self):
len = self.len
np_a = self.np_a
dc_a = self.dc_a
np_start = np.random.randint(len/2)
np_end = np.random.randint(len/2 + 1, len)
np_step = np.random.randint(1, len-1)
np_axes = 0
dc_start = dc.array(1).asTypeInt()
dc_end = dc.array(1).asTypeInt()
dc_axes = dc.array(1).asTypeInt()
dc_step = dc.array(1).asTypeInt()
dc_start[0] = np_start
dc_end[0] = np_end
dc_axes[0] = np_axes
dc_step[0] = np_step
npr = np_a[np_start:np_end:np_step]
dcr = dc.slice(dc_a, dc_start, dc_end, dc_axes, dc_step)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Slice2D (self):
len = np.sqrt(self.len).astype(int)
np_a = np.reshape(self.np_a, (len, len))
# dc_a = dc.reshape(self.dc_a, (len, len)) $ BUG (reported bug.py) - for now using a WA
dc_a = dc.reshape(self.dc_a, (64, 64))
np_start = np.random.randint(len/2)
np_end = np.random.randint(len/2 + 1, len)
np_step = np.random.randint(1, len-1)
np_axes = 0
np_start2 = np.random.randint(len/2)
np_end2 = np.random.randint(len/2 + 1, len)
np_step2 = np.random.randint(1, len-1)
np_axes2 = 1
dc_start = dc.array(2).asTypeInt()
dc_end = dc.array(2).asTypeInt()
dc_axes = dc.array(2).asTypeInt()
dc_step = dc.array(2).asTypeInt()
dc_start[0] = np_start
dc_end[0] = np_end
dc_axes[0] = np_axes
dc_step[0] = np_step
dc_start[1] = np_start2
dc_end[1] = np_end2
dc_axes[1] = np_axes2
dc_step[1] = np_step2
npr = np_a[np_start:np_end:np_step, np_start2:np_end2:np_step2]
dcr = dc.slice(dc_a, dc_start, dc_end, dc_axes, dc_step)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Slice3D (self):
len = np.cbrt(self.len).astype(int)
np_a = np.reshape(self.np_a, (len, len, len))
# dc_a = dc.reshape(self.dc_a, (len, len, len))
dc_a = dc.reshape(self.dc_a, (16, 16, 16))
np_start = np.random.randint(len/2)
np_end = np.random.randint(len/2 + 1, len)
np_step = np.random.randint(1, len-1)
np_axes = 0
np_start2 = np.random.randint(len/2)
np_end2 = np.random.randint(len/2 + 1, len)
np_step2 = np.random.randint(1, len-1)
np_axes2 = 1
np_start3 = np.random.randint(len/2)
np_end3 = np.random.randint(len/2 + 1, len)
np_step3 = np.random.randint(1, len-1)
np_axes3 = 2
dc_start = dc.array(3).asTypeInt()
dc_end = dc.array(3).asTypeInt()
dc_axes = dc.array(3).asTypeInt()
dc_step = dc.array(3).asTypeInt()
dc_start[0] = np_start
dc_end[0] = np_end
dc_axes[0] = np_axes
dc_step[0] = np_step
dc_start[1] = np_start2
dc_end[1] = np_end2
dc_axes[1] = np_axes2
dc_step[1] = np_step2
dc_start[2] = np_start3
dc_end[2] = np_end3
dc_axes[2] = np_axes3
dc_step[2] = np_step3
npr = np_a[np_start:np_end:np_step, np_start2:np_end2:np_step2, np_start3:np_end3:np_step3]
dcr = dc.slice(dc_a, dc_start, dc_end, dc_axes, dc_step)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Slice4D (self):
len = np.sqrt(np.sqrt(self.len)).astype(int)
np_a = np.reshape(self.np_a, (len, len, len, len))
# dc_a = dc.reshape(self.dc_a, (len, len, len))
dc_a = dc.reshape(self.dc_a, (8, 8, 8, 8))
np_start = np.random.randint(len/2)
np_end = np.random.randint(len/2 + 1, len)
np_step = np.random.randint(1, len-1)
np_axes = 0
np_start2 = np.random.randint(len/2)
np_end2 = np.random.randint(len/2 + 1, len)
np_step2 = np.random.randint(1, len-1)
np_axes2 = 1
np_start3 = np.random.randint(len/2)
np_end3 = np.random.randint(len/2 + 1, len)
np_step3 = np.random.randint(1, len-1)
np_axes3 = 2
np_start4 = np.random.randint(len/2)
np_end4 = np.random.randint(len/2 + 1, len)
np_step4 = np.random.randint(1, len-1)
np_axes4 = 3
dc_start = dc.array(4).asTypeInt()
dc_end = dc.array(4).asTypeInt()
dc_axes = dc.array(4).asTypeInt()
dc_step = dc.array(4).asTypeInt()
dc_start[0] = np_start
dc_end[0] = np_end
dc_axes[0] = np_axes
dc_step[0] = np_step
dc_start[1] = np_start2
dc_end[1] = np_end2
dc_axes[1] = np_axes2
dc_step[1] = np_step2
dc_start[2] = np_start3
dc_end[2] = np_end3
dc_axes[2] = np_axes3
dc_step[2] = np_step3
dc_start[3] = np_start4
dc_end[3] = np_end4
dc_axes[3] = np_axes4
dc_step[3] = np_step4
npr = np_a[np_start:np_end:np_step, np_start2:np_end2:np_step2, np_start3:np_end3:np_step3, np_start4:np_end4:np_step4]
dcr = dc.slice(dc_a, dc_start, dc_end, dc_axes, dc_step)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/flag.h"
#include "core/iterator.h"
#include "graph/irData.h"
#include "operators/baseOperator.h"
#include <vector>
namespace dnnc {
// Forward declaration
class graph;
class dnnParameters {
protected:
std::string _name;
std::vector<DIMENSION> _shape;
irTypeData _value;
public:
dnnParameters(std::string n, std::vector<DIMENSION> shape, irTypeData &v)
: _name(n), _shape(shape), _value(v) {}
std::string name() { return _name; }
std::vector<DIMENSION> shape() { return _shape; }
irTypeData data() { return _value; }
};
class nodeAttribute {
protected:
OPATTR _name = attr_invalid;
irTypeData _value;
public:
nodeAttribute(OPATTR n, irTypeData &v) : _name(n), _value(v) {}
OPATTR name() { return _name; }
irTypeData data() { return _value; }
};
/*! Graph node
* */
class node {
protected:
std::string _name;
flag _properties; // used in other algorithms like DFS, TopoSort etc.
public:
enum NODE_TYPE { NONE = 0, INPUT, OUTPUT, OPERATOR };
enum NODE_PROP { NOT_VISITED = 0, VISITING, VISITED };
// properties methods.
void mark(short prop) { _properties.set(prop); }
void unmark(short prop) { _properties.reset(prop); }
bool isMarked(short prop) const { return _properties.get(prop); }
void resetMarks() { _properties = 0; }
node(std::string n = "") : _name(n) {}
void setName(std::string n) { _name = n; }
std::string name() { return _name; }
virtual OPCODE symbol() { return opInvalid; }
virtual NODE_TYPE ntype() { return NONE; }
virtual DNNC_DataType dtype() { return NOTYPE; }
virtual bool inputNodes(graph &g, std::vector<node *> &nodes) = 0;
virtual bool outputNodes(graph &g, std::vector<node *> &nodes) = 0;
virtual ~node() {}
};
/*! Compute Graph IO Node.
* It represents place holder unit (for inputs and outputs)
* represented as memory buffer in underlying hardware.
* */
class ioNode : public node {
protected:
NODE_TYPE _ntype;
DNNC_DataType _dtype;
std::vector<size_t> _shape;
ioNode() = delete;
bool getNodes(graph &, std::vector<node *> &, bool input = true);
public:
ioNode(std::string n, NODE_TYPE nt, DNNC_DataType dt, std::vector<size_t> shp)
: node(n), _ntype(nt), _dtype(dt), _shape(shp) {}
DNNC_DataType dtype() override { return _dtype; }
NODE_TYPE ntype() override { return _ntype; }
std::vector<size_t> shape() { return _shape; }
bool outputNodes(graph &g, std::vector<node *> &nodes) override {
return getNodes(g, nodes, true);
};
bool inputNodes(graph &g, std::vector<node *> &nodes) override {
return getNodes(g, nodes, false);
}
};
/*! Compute Graph operator Node.
* It represents basic computational unit (like adder/multiplier)
* available in underlying hardware.
* */
class opNode : public node {
protected:
OPCODE _symbol = opInvalid; /*!< operator aka symbol */
DNNC_DataType _dtype = NOTYPE; /*<! inferred data type for outputs */
std::vector<std::string>
_inputs; /*!< inputs, i.e. tensors coming to this node */
std::vector<std::string>
_outputs; /*!< outputs, i.e tensor going from this node */
std::vector<nodeAttribute> _attributes; /*!< attributes of the node, i.e.
values that don't flow in and out */
bool getNodes(graph &, std::vector<node *> &, bool input = true);
opNode() = delete; /*!< default constructor not allowed */
public:
opNode(OPCODE sym, std::string n = "") : node(n), _symbol(sym) {}
~opNode() {}
void addInput(std::string n) { _inputs.push_back(n); }
void addOutput(std::string n) { _outputs.push_back(n); }
void addAttribute(nodeAttribute &attr) {
_attributes.push_back(attr);
if (_symbol == opConstant && attr.name() == attr_value) {
IR_DataType data_type = attr.data().type();
if (data_type == IR_DataType::TENSOR_BOOL)
_dtype = BOOL;
else if (data_type == IR_DataType::TENSOR_INT)
_dtype = INT64;
else if (data_type == IR_DataType::TENSOR_FLOAT)
_dtype = DOUBLE;
else
_dtype = static_cast<dnnc::DNNC_DataType>(data_type);
}
}
OPCODE symbol() override { return _symbol; }
NODE_TYPE ntype() override { return OPERATOR; }
/*!< inferred dtype. */
void dtype(DNNC_DataType dtype) { _dtype = dtype; }
DNNC_DataType dtype() override { return _dtype; }
std::vector<std::string> inputs() { return _inputs; }
std::vector<std::string> outputs() { return _outputs; }
bool inputNodes(graph &g, std::vector<node *> &nodes) override {
return getNodes(g, nodes, true);
};
bool outputNodes(graph &g, std::vector<node *> &nodes) override {
return getNodes(g, nodes, false);
}
#ifndef SWIGPYTHON
struct attr_iter {
int pos;
inline void next(const opNode *ref) { ++pos; }
inline void begin(const opNode *ref) { pos = 0; }
inline void end(const opNode *ref) { pos = ref->_attributes.size(); }
inline nodeAttribute &get(opNode *ref) { return ref->_attributes[pos]; }
inline const nodeAttribute &get(const opNode *ref) {
return ref->_attributes[pos];
}
inline bool cmp(const attr_iter &s) const { return pos != s.pos; }
};
SETUP_ITERATORS(opNode, nodeAttribute &, attr_iter)
#endif
};
} // namespace dnnc
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/datatypes.h"
#include "core/iterator.h"
#include "core/macros.h"
#include "core/placeHolder.h"
#ifndef SWIGPYTHON
#include <fstream>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <stdlib.h> // malloc, free
#endif
#include <assert.h>
#include <random>
namespace dnnc {
enum INIT_TYPE { INIT_NONE = 0, INIT_RANDOM, INIT_ZERO, INIT_ONE, INIT_VALUE };
template <class To, class Ti1, class Ti2> class baseOperator;
template <typename T> class tensor;
template <typename T> static tensor<T> NULL_TENSOR;
// Tensor with arbitrary rank.
template <typename T> class tensor : public placeHolder<T> {
template <class To, class Ti1, class Ti2> friend class baseOperator;
protected:
//////////// protected members /////////////////
size_t *_ref; //! reference count of tensor
T *_mem_layout; //! memory layout of the tensor. TODO: add tiling.
//////////// protected methods /////////////////
/// \brief Get the memory for tensor
T *getMemory(size_t sz) {
_mem_layout = sz ? static_cast<T *>(malloc(sizeof(T) * sz)) : 0x0;
if ((sz && !_mem_layout))
SPDLOG_ERROR("Could not allocate memory for tensor.");
return _mem_layout;
}
/// \brief initialize reference count of the tensor to 1
void init_ref() {
_ref = static_cast<size_t *>(malloc(sizeof(size_t)));
if (!_ref) {
SPDLOG_ERROR("Could not allocate memory for tensor ref.");
} else {
*_ref = 1; // init reference count.
}
}
/// \brief only constructors call init method. Argument type
/// INIT_TYPE initializes _mem_layout to 0, 1, random or uninitialized.
void init(INIT_TYPE fill = INIT_NONE, T val = 0) {
init_ref();
DIMENSION msize = this->length(); // flat array length
if (this->rank() == 0 || msize == 0)
return;
_mem_layout = getMemory(msize);
// initilize with normal distribution.
if (fill == INIT_NONE) {
; // no fill
} else if (fill == INIT_RANDOM) {
std::default_random_engine generator;
std::normal_distribution<double> distribution(127.5, 20.0);
for (size_t i = 0; i < msize; i++)
_mem_layout[i] = static_cast<T>(distribution(generator));
} else if (fill == INIT_ZERO) {
for (size_t i = 0; i < msize; i++)
_mem_layout[i] = static_cast<T>(0);
} else if (fill == INIT_ONE) {
for (size_t i = 0; i < msize; i++)
_mem_layout[i] = static_cast<T>(1);
} else if (fill == INIT_VALUE) {
for (size_t i = 0; i < msize; i++)
_mem_layout[i] = val;
}
}
public:
/// tensor constructor with arbitrary dimension up to 4.
/// CTOR 1: Use this contructor with shape vector and to initialize
/// with zero, one, or random numbers.
tensor(std::vector<DIMENSION> dimn = std::vector<DIMENSION>(),
std::string n = "", INIT_TYPE fill = INIT_NONE, T init_val = 0)
: placeHolder<T>(n, dimn), _ref(0x0), _mem_layout(0x0) {
init(fill, init_val);
}
/// USE WITH CAUTION.
/// CTOR 2: Use this contructor to handover the externally allocated and
/// initialized memory to tensor.
/// This object will own the memory passed to it and free it in the
/// destructor. This exists solely for performance reasons.
tensor(T *data, std::vector<DIMENSION> dimn, std::string n = "")
: placeHolder<T>(n, dimn), _ref(0x0), _mem_layout(data) {
init_ref();
}
/*
#ifdef SWIGPYTHON
// CTOR 3: This constructs a tensor of one element and shape(1)
// created for convenience for implicit conversion of
// numbers to tensor in python interface only.
tensor(T num, std::string n = "")
: placeHolder<T>({1}, n), _ref(0x0), _mem_layout(0x0) {
init(INIT_VALUE, num);
}
#endif
*/
/// \brief Copy Constructor
tensor(tensor const &other) : placeHolder<T>(other) {
_ref = other._ref;
_mem_layout = other._mem_layout;
(*_ref)++;
}
/// \brief Assignment Operator
tensor &operator=(tensor const &other) {
// Gracefully handle self assignment
if (this == &other)
return *this;
_ref = other._ref;
_mem_layout = other._mem_layout;
(*_ref)++;
placeHolder<T>::operator=(other);
return *this;
}
/// \brief Comparison Operator
bool operator==(const tensor &other) {
if (_mem_layout == other._mem_layout) {
return this->shape() == other.shape() ? true : false;
}
if (this->shape() != other.shape())
return false;
for (size_t i = 0; i < this->length(); i++) {
if (!(_mem_layout[i] == other._mem_layout[i]))
return false;
}
return true;
}
~tensor() {
if (_ref)
--(*_ref);
if (_ref && *_ref == 0 && _mem_layout) {
free(_ref);
free(_mem_layout);
}
}
operator bool() const { return this->rank() != 0; }
/*! Description: creates a deep copy of the tensor
* Returns: new tensor*/
tensor<T> copy() const {
if (isnull())
return NULL_TENSOR<T>;
tensor<T> result(this->shape(), this->name());
result.load(_mem_layout);
return result;
}
/// \brief invert the sign of each element of the tensor
tensor<T> negate() const {
tensor<T> result = copy();
DIMENSION msize = result.length(); // flat array length
for (size_t i = 0; i < msize; i++)
result._mem_layout[i] = -_mem_layout[i];
return result;
}
/// \brief absolute value of each element of the tensor
tensor<T> absolute() const {
tensor<T> result = copy();
DIMENSION msize = result.length(); // flat array length
for (size_t i = 0; i < msize; i++)
result._mem_layout[i] =
_mem_layout[i] < static_cast<T>(0) ? -_mem_layout[i] : _mem_layout[i];
return result;
}
/// \brief identifier of the tensor
size_t identifier() const {
return reinterpret_cast<size_t>(_mem_layout - 0xfff);
}
/// \brief check if this tensor has same id as other.
bool sameas(const tensor<T> &other) const {
return identifier() == other.identifier();
}
/// \brief Return copy of the tensor, cast to a specified type.
template <typename newT> tensor<newT> asType() {
// if (typeid(T) == typeid(newT))
// return *dynamic_cast<tensor<newT*>>(this);
tensor<newT> result(this->shape(), this->name());
DIMENSION msize = this->length(); // flat array length
for (size_t i = 0; i < msize; i++)
result[i] = _mem_layout[i];
return result;
}
/// \brief return a copy of the tensor, cast to double
tensor<double> asTypeDouble() { return asType<double>(); }
/// \brief return a copy of the tensor, cast to float
tensor<float> asTypeFloat() { return asType<float>(); }
/// \brief return a copy of the tensor, cast to int
tensor<int> asTypeInt() { return asType<int>(); }
/// \brief return a copy of the tensor, cast to uint8
tensor<uint8_t> asTypeUint8() { return asType<uint8_t>(); }
/// \brief return a copy of the tensor, cast to long
tensor<long> asTypeLong() { return asType<long>(); }
/// \brief return a copy of the tensor, cast to unsigned long
tensor<size_t> asTypeULong() { return asType<size_t>(); }
/// \brief return a copy of the tensor, cast to bool
tensor<bool> asTypeBool() { return asType<bool>(); }
/// \brief load single data into tensor.
inline void load(const T &data, std::vector<size_t> indices) {
this->operator()(indices) = data;
}
inline void load(const T &data, size_t i, size_t j = 0, size_t k = 0,
size_t l = 0, size_t m = 0) {
this->operator()(i, j, k, l, m) = data;
}
/// \brief load 1D vector into the tensor
void load(std::vector<T> data) {
size_t sz = this->length();
for (size_t i = 0; i < data.size() && i < sz; i++)
_mem_layout[i] = data[i];
}
/// \brief UNSAFE METHOD. Load flat array into the tensor.
/// UNSAFE because data size MUST be at least as large as tensor length,
/// otherwise, it'll lead to crash.
/// USE WITH CAUTION.
void load(const T *data) {
if (!data || isnull())
return;
for (size_t i = 0; i < this->length(); i++)
_mem_layout[i] = data[i];
}
#ifdef TENSOR_CONVERSION
/*<! implicit conversion operators are sometimes
* needed for compiler. They are a last resort to
* complete compilation without error.
* They'll eventually be removed, since they cause
* performance overhead.
* */
operator tensor<float>() { return asType<float>(); }
operator tensor<uint8_t>() { return asType<uint8_t>(); }
operator tensor<int8_t>() { return asType<int8_t>(); }
operator tensor<uint16_t>() { return asType<uint16_t>(); }
operator tensor<int16_t>() { return asType<int16_t>(); }
operator tensor<int32_t>() { return asType<int32_t>(); }
operator tensor<int64_t>() { return asType<int64_t>(); }
operator tensor<bool>() { return asType<bool>(); }
operator tensor<double>() { return asType<double>(); }
operator tensor<uint32_t>() { return asType<uint32_t>(); }
operator tensor<uint64_t>() { return asType<uint64_t>(); }
#endif
#ifndef SWIGPYTHON
/*<! convert tensor to a vector */
operator std::vector<T>() const {
std::vector<T> vec;
for (size_t i = 0; i < this->length(); i++)
vec.push_back(_mem_layout[i]);
return vec;
}
friend std::ostream &operator<<(std::ostream &os, const tensor<T> &t) {
if (t.name().size())
os << t.name() << "=";
os << t.to_string();
return os;
}
struct it_state {
size_t pos;
inline void next(const tensor<T> *ref) { ++pos; }
inline void begin(const tensor<T> *ref) { pos = 0; }
inline void end(const tensor<T> *ref) { pos = ref->length(); }
inline T &get(tensor<T> *ref) { return ref->_mem_layout[pos]; }
inline const float &get(const tensor<T> *ref) {
return ref->_mem_layout[pos];
}
inline bool cmp(const it_state &s) const { return pos != s.pos; }
};
SETUP_ITERATORS(tensor<T>, T &, it_state);
#endif
std::string to_string(const size_t max_el = DNNC_TENSOR_MAX_EL) const {
std::string str = this->name().size() ? this->name() + "=" : "";
if (this->rank() == 0) {
str += "null tensor";
} else if (this->rank() == 1 && this->length() == 1) {
str += std::to_string(_mem_layout[0]);
} else if (this->rank() == 1) {
str += "[";
size_t i = 0;
for (i = 0; i < this->length() && i < max_el; i++)
str += (i ? " " : "") + std::to_string(_mem_layout[i]);
str += i == max_el ? "...]" : "]";
} else if (this->rank() == 2) {
str += "[";
size_t i = 0;
for (i = 0; i < this->shape()[0] && i < max_el; i++) {
str += i ? "\n [" : "[";
size_t j = 0;
for (j = 0; j < this->shape()[1] && j < max_el; j++) {
size_t index = i * this->shape()[1] + j;
str += (j ? " " : "") + std::to_string(_mem_layout[index]);
}
str += (j == max_el ? "...]" : "]");
}
str += i == max_el ? "...]" : "]";
} else if (this->rank() == 3) {
str += "[";
size_t i = 0;
for (i = 0; i < this->shape()[0] && i < max_el; i++) {
str += i ? "\n [" : "[";
size_t j = 0;
for (j = 0; j < this->shape()[1] && j < max_el; j++) {
str += j ? "\n [" : "[";
size_t k = 0;
for (k = 0; k < this->shape()[2] && k < max_el; k++) {
size_t index = i * this->shape()[1] * this->shape()[2] +
j * this->shape()[2] + k;
str += (k ? " " : "") + std::to_string(_mem_layout[index]);
}
str += k == max_el ? "...]" : "]";
}
str += j == max_el ? "...]" : "]";
}
str += i == max_el ? "...]" : "]";
} else if (this->rank() == 4) {
str += "[";
size_t i = 0;
for (i = 0; i < this->shape()[0] && i < max_el; i++) {
str += i ? "\n [" : "[";
size_t j = 0;
for (j = 0; j < this->shape()[1] && j < max_el; j++) {
str += j ? "\n [" : "[";
size_t k = 0;
for (k = 0; k < this->shape()[2] && k < max_el; k++) {
str += k ? "\n [" : "[";
size_t l = 0;
for (l = 0; l < this->shape()[3] && l < max_el; l++) {
size_t index =
i * this->shape()[1] * this->shape()[2] * this->shape()[3] +
j * this->shape()[2] * this->shape()[3] +
k * this->shape()[3] + l;
str += (l ? " " : "") + std::to_string(_mem_layout[index]);
}
str += l == max_el ? "...]" : "]";
}
str += k == max_el ? "...]" : "]";
}
str += j == max_el ? "...]" : "]";
}
str += i == max_el ? "...]" : "]";
}
return str;
}
/// \brief return 1D flat array
const std::vector<T> data() const {
return isnull() ? std::vector<T>()
: std::vector<T>(_mem_layout, _mem_layout + this->length());
}
// public methods
tensor<T> reshape(std::vector<size_t> &new_shape) {
DIMENSION newLength = new_shape.size() ? 1 : 0;
for (size_t i = 0; i < new_shape.size(); i++)
newLength = newLength * new_shape[i];
// ensure new_shape is same length as original length
if (newLength == 0)
SPDLOG_ERROR("new reshape length can't be zero.");
if (newLength != this->length()) {
std::string msg = "new reshape length " + std::to_string(newLength) +
" does not match tensor\'s original length " +
std::to_string(this->length()) + ".\n";
SPDLOG_ERROR(msg.c_str());
} else {
this->_shape = new_shape;
}
return *this;
}
tensor<T> flatten() {
if (isnull())
return NULL_TENSOR<T>;
std::vector<size_t> new_shape;
new_shape.push_back(this->length());
tensor<T> result(new_shape, this->name());
result.load(_mem_layout);
return result;
}
bool isnull() const { return _mem_layout == 0x0; }
// flat index, unsafe method
T &operator[](const INDEX &index) const {
if (isnull() || index >= this->length()) {
std::string msg = "illegal tensor index " + std::to_string(index);
msg += isnull() ? "on null tensor." : ".";
throw std::out_of_range(msg.c_str());
assert(msg.c_str()); // crash and burn.
}
return _mem_layout[index];
}
T &operator()(std::vector<INDEX> &indices) const {
INDEX index = 0;
if (this->rank() == 5 && indices.size() == 5) {
index =
indices[0] * this->shape()[1] * this->shape()[2] * this->shape()[3] *
this->shape()[4] +
indices[1] * this->shape()[2] * this->shape()[3] * this->shape()[4] +
indices[2] * this->shape()[3] * this->shape()[4] +
indices[3] * this->shape()[4] + indices[4];
} else if (this->rank() == 4 && indices.size() == 4) {
index =
indices[0] * this->shape()[1] * this->shape()[2] * this->shape()[3] +
indices[1] * this->shape()[2] * this->shape()[3] +
indices[2] * this->shape()[3] + indices[3];
} else if (this->rank() == 3 && indices.size() == 3) {
index = indices[0] * this->shape()[1] * this->shape()[2] +
indices[1] * this->shape()[2] + indices[2];
} else if (this->rank() == 2 && indices.size() == 2) {
index = indices[0] * this->shape()[1] + indices[1];
} else if (this->rank() == 1 && indices.size() == 1) {
index = indices[0];
} else {
if (indices.size() > this->rank()) {
std::string msg = "number of supplied indices " +
std::to_string(indices.size()) +
" is more than rank of the tensor " +
std::to_string(this->rank()) + ".\n";
SPDLOG_ERROR(msg.c_str());
}
for (size_t i = 0; i < indices.size() && i < this->rank(); i++) {
DIMENSION dsz = 1;
for (size_t j = i + 1; j < this->rank(); j++)
dsz *= this->shape()[j];
index += indices[i] * dsz;
}
}
return this->operator[](index);
}
T &operator()(const INDEX x = 0, const INDEX y = 0, const INDEX z = 0,
const INDEX w = 0, const INDEX u = 0) const {
std::vector<INDEX> indices;
indices.push_back(x);
if (this->rank() > 1)
indices.push_back(y);
if (this->rank() > 2)
indices.push_back(z);
if (this->rank() > 3)
indices.push_back(w);
if (this->rank() > 4)
indices.push_back(u);
return this->operator()(indices);
}
// slice the tensor across a dimension 'dimn' starting with index 'start' by
// strides of 'incr' ending at 'end'.
tensor<T> slice(DIMENSION dimn = 0, INDEX start = 0, int end = -1,
int incr = 1) {
std::vector<DIMENSION> shape = this->shape();
DIMENSION rank = this->rank();
if (dimn >= rank)
return NULL_TENSOR<T>;
std::vector<DIMENSION> new_shape = shape;
end = end == -1 ? shape[dimn] - 1 : end;
if (start > end)
return NULL_TENSOR<T>;
new_shape[dimn] = std::floor((end - start) / incr) + 1;
tensor<T> new_tensor(new_shape);
INDEX ti = 0;
if (rank == 1) {
for (size_t i = start; i <= end; i = i + incr)
new_tensor._mem_layout[ti++] = _mem_layout[i];
} else if (rank == 2) {
for (size_t i = (dimn == 0 ? start : 0);
i <= (dimn == 0 ? end : shape[0] - 1);
i = i + (dimn == 0 ? incr : 1)) {
size_t row_start = i * shape[1];
for (size_t j = (dimn == 1 ? start : 0);
j <= (dimn == 1 ? end : shape[1] - 1);
j = j + (dimn == 1 ? incr : 1)) {
new_tensor._mem_layout[ti++] = _mem_layout[row_start + j];
}
}
} else if (rank == 3) {
for (size_t i = (dimn == 0 ? start : 0);
i <= (dimn == 0 ? end : shape[0] - 1);
i = i + (dimn == 0 ? incr : 1)) {
size_t i_start = i * shape[1] * shape[2];
for (size_t j = (dimn == 1 ? start : 0);
j <= (dimn == 1 ? end : shape[1] - 1);
j = j + (dimn == 1 ? incr : 1)) {
size_t j_start = j * shape[2];
for (size_t k = (dimn == 2 ? start : 0);
k <= (dimn == 2 ? end : shape[2] - 1);
k = k + (dimn == 2 ? incr : 1)) {
new_tensor._mem_layout[ti++] = _mem_layout[i_start + j_start + k];
}
}
}
}
return new_tensor;
}
bool empty() { return this->length() == 0; }
std::string dtype() {
T dummy = 0;
return dTypeName(dummy);
}
std::string to_proto() // return proto string
{
std::string tensor_proto = "";
return tensor_proto;
}
T min() const {
assert(_mem_layout);
T result = _mem_layout[0];
for (size_t i = 1; i < this->length(); i++)
result = result > _mem_layout[i] ? _mem_layout[i] : result;
return result;
}
T max() const {
assert(_mem_layout);
T result = _mem_layout[0];
for (size_t i = 1; i < this->length(); i++)
result = result < _mem_layout[i] ? _mem_layout[i] : result;
return result;
}
T sum() const {
T result = 0;
for (size_t i = 0; i < this->length(); i++)
result += _mem_layout[i];
return result;
}
#ifndef SWIGPYTHON
// \brief load tensor data from file.
// returns true on success, false for failure.
// tokens equal to the number of tensor-length
// are read, rest are discarded.
// if numbers are less, it returns false.
// if tokes aren't numbers, it will fail.
bool read(std::string fileName) {
std::fstream fs;
fs.open(fileName, std::ios::in);
// parameter file could not be opened.
if (!fs.is_open() || fs.fail()) {
throw std::runtime_error("Could not open file " + fileName + ".");
return false;
}
std::cout << "reading file " << fileName
<< (this->name().size() ? " for tensor " + this->name() : "")
<< ".\n";
size_t len = this->length();
std::string typedStr;
T fNum;
size_t index = 0;
while (std::getline(fs, typedStr)) {
std::stringstream linestream(typedStr);
while (linestream >> fNum) {
if (index >= len) {
break;
}
_mem_layout[index++] = fNum;
}
if (index >= len) {
break;
}
}
fs.close();
// parameter file did not have parametres equal to tensor length.
if (index < len) {
return false;
}
return true;
}
bool write(std::string fileName) {
std::fstream fs;
fs.open(fileName, std::ios::out);
// parameter file could not be opened.
if (!fs.is_open() || fs.fail()) {
throw std::runtime_error("Could not open file " + fileName +
" to write.");
return false;
}
std::cout << "writing file " << fileName
<< (this->name().size() ? " for tensor " + this->name() : "")
<< ".\n";
for (size_t i = 0; i < this->length(); i++) {
fs << (i ? " " : "") << _mem_layout[i];
}
return true;
}
#endif
}; // class tensor
#ifndef SWIGPYTHON
// \brief return a tensor of type T from a text file with
// name _name present in bundle directory
// full of T-type elements.
template <typename T>
tensor<T> readTensor(placeHolder<T> ph, std::string bundleDir = "") {
std::string fileName =
bundleDir.empty() ? bundleDir + FS_PATH_SEPARATOR + ph.name() : ph.name();
std::fstream fs;
fs.open(fileName, std::ios::in);
// parameter file could not be opened.
if (!fs.is_open() || fs.fail()) {
return NULL_TENSOR<T>;
}
std::string typedStr;
T fNum;
T *data = new T[ph.length()];
size_t index = 0;
while (std::getline(fs, typedStr)) {
std::stringstream linestream(typedStr);
while (linestream >> fNum) {
if (index >= ph.length()) {
break;
}
data[index++] = fNum;
}
if (index >= ph.length()) {
break;
}
}
fs.close();
// parameter file did not have parametres equal to tensor length.
if (index < ph.length()) {
delete[] data;
return NULL_TENSOR<T>;
}
tensor<T> newTensor(ph.shape(), ph.name());
newTensor.load(data);
return newTensor;
}
template <typename T> struct tensorCmp {
bool operator()(const tensor<T> &lhs, const tensor<T> &rhs) {
return lhs.identifier() < rhs.identifier();
}
};
#endif
} // namespace dnnc
<file_sep># How to create parser testcases
This video covers the procedure for making a testcase for the parser. The parser is used to convert from .onnx to .sym, a custom representation of the graph..
[<img src="https://img.youtube.com/vi/yZ2kue4s_b0/maxresdefault.jpg" width="100%">](https://youtu.be/yZ2kue4s_b0)
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
def temp_gemm(np_a, np_b, np_c, alpha, beta, transA, transB):
np_a = np_a.T if (transA==1) else np_a
np_b = np_b.T if (transB==1) else np_b
y = (alpha * np.dot(np_a, np_b)) + (beta * np_c)
return y
class GemmTest(unittest.TestCase):
def setUp(self):
self.len_a_b = 48
self.len_c = 64
self.alpha = 0.5
self.beta = 0.5
self.np_float_a = np.random.randn(self.len_a_b).astype(np.float32)
self.np_float_b = np.random.randn(self.len_a_b).astype(np.float32)
self.np_float_c = np.random.randn(self.len_c).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.dc_float_b = dc.array(list(self.np_float_b))
self.dc_float_c = dc.array(list(self.np_float_c))
self.np_double_a = np.random.randn(self.len_a_b).astype(np.double)
self.np_double_b = np.random.randn(self.len_a_b).astype(np.double)
self.np_double_c = np.random.randn(self.len_c).astype(np.double)
self.dc_double_a = dc.array(list(self.np_double_a))
self.dc_double_b = dc.array(list(self.np_double_b))
self.dc_double_c = dc.array(list(self.np_double_c))
# Gemm by default takes 2D tensor only
def test_Gemm2D_float_1 (self):
shape_a = (8,6)
shape_b = (6,8)
shape_c = (8,8)
transA = 0
transB = 0
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_float_2 (self):
shape_a = (8,6)
shape_b = (8,6)
shape_c = (8,8)
transA = 0
transB = 1
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_float_3 (self):
shape_a = (6,8)
shape_b = (6,8)
shape_c = (8,8)
transA = 1
transB = 0
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_float_4 (self):
shape_a = (6,8)
shape_b = (8,6)
shape_c = (8,8)
transA = 1
transB = 1
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_1 (self):
shape_a = (8,6)
shape_b = (6,8)
shape_c = (8,8)
transA = 0
transB = 0
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_2 (self):
shape_a = (8,6)
shape_b = (8,6)
shape_c = (8,8)
transA = 0
transB = 1
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_3 (self):
shape_a = (6,8)
shape_b = (6,8)
shape_c = (8,8)
transA = 1
transB = 0
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_4 (self):
shape_a = (6,8)
shape_b = (8,6)
shape_c = (8,8)
transA = 1
transB = 1
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2017 <NAME> <<EMAIL>>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename T>
void check_abs() {
typedef typename NumTraits<T>::Real Real;
if(NumTraits<T>::IsSigned)
VERIFY_IS_EQUAL(numext::abs(-T(1)), T(1));
VERIFY_IS_EQUAL(numext::abs(T(0)), T(0));
VERIFY_IS_EQUAL(numext::abs(T(1)), T(1));
for(int k=0; k<g_repeat*100; ++k)
{
T x = internal::random<T>();
if(!internal::is_same<T,bool>::value)
x = x/Real(2);
if(NumTraits<T>::IsSigned)
{
VERIFY_IS_EQUAL(numext::abs(x), numext::abs(-x));
VERIFY( numext::abs(-x) >= Real(0));
}
VERIFY( numext::abs(x) >= Real(0));
VERIFY_IS_APPROX( numext::abs2(x), numext::abs2(numext::abs(x)) );
}
}
void test_numext() {
CALL_SUBTEST( check_abs<bool>() );
CALL_SUBTEST( check_abs<signed char>() );
CALL_SUBTEST( check_abs<unsigned char>() );
CALL_SUBTEST( check_abs<short>() );
CALL_SUBTEST( check_abs<unsigned short>() );
CALL_SUBTEST( check_abs<int>() );
CALL_SUBTEST( check_abs<unsigned int>() );
CALL_SUBTEST( check_abs<long>() );
CALL_SUBTEST( check_abs<unsigned long>() );
CALL_SUBTEST( check_abs<half>() );
CALL_SUBTEST( check_abs<float>() );
CALL_SUBTEST( check_abs<double>() );
CALL_SUBTEST( check_abs<long double>() );
CALL_SUBTEST( check_abs<std::complex<float> >() );
CALL_SUBTEST( check_abs<std::complex<double> >() );
}
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
from tensor_interface_helper import *
import ast, argparse
def check_comments(s):
if "*/" in s:
print("\nUnmatched '*/' comment syntax at:\n\n"+s[s.find("*/")-100:s.find("*/")+100])
return 1
if "/*" in s:
print("\nUnmatched '/*' comment syntax at:\n\n"+s[s.find("/*")-100:s.find("/*")+100])
return 1
return 0
def remove_comments(s):
# going with string replace
for i in range(s.count("/*")):
comment_block = s[s.find("/*"):s.find("*/\n")+4] # +4 is to remove "\n" after "*/"
s = s.replace(comment_block,"")
return s
def get_dtype_dictionary(s):
string_dict = s.replace(" ","").split("dtype=")[1].split("}")[0].replace("\n","").replace("\t","")+"}"
dtype = ast.literal_eval(string_dict)
return dtype
def remove_dtype(s):
dtype_string = ""
if "\tdtype" in s:
dtype_string = s[s.find("\tdtype"):s.find("}",s.find("dtype"))+2]
elif " dtype" in s:
dtype_string = s[s.find(" dtype"):s.find("}",s.find("dtype"))+2]
else:
dtype_string = s[s.find("dtype"):s.find("}",s.find("dtype"))+2]
s = s.replace("} ","}").replace(dtype_string,"")
return s
def get_swig_extern(dc_operator, s):
s = "\textern "+s.split("{")[0].replace(dc_operator,"\\\n\t\t"+dc_operator,1)+";\n"
return s
def change_dtype(output,i):
s = ""
if i == 1:
s += "\ttensor<"+output+"> "+output+"_a = a.asType<"+output+">();\n"
elif i == 2:
s += "\ttensor<"+output+"> "+output+"_b = b.asType<"+output+">();\n"
return s
def change_compute(s):
dtype = s.split(".asType<")[1].split(">")[0]
if s.count("asType<") == 1:
if s[s.find(".asType")-1:s.find(".asType")] == "a":
s = s.replace("return op.compute(a, b);","return op.compute("+dtype+"_a, b);")
elif s[s.find(".asType")-1:s.find(".asType")] == "b":
s = s.replace("return op.compute(a, b);","return op.compute(a, "+dtype+"_b);")
elif s.count("asType<") == 2:
s = s.replace("return op.compute(a, b);","return op.compute("+dtype+"_a, "+dtype+"_b);")
else:
raise Exception("asType() count is wrong, try again!")
return s
def get_scalar(dc_operator, i):
s = ""
if i == 1:
s = '''tensor<output> dc_operator(tensor<input1> &a, input2 b) {
tensor<input2> tensor_b(std::vector<size_t>(1,1));
tensor_b.load(&b);
return dc_operator(a, tensor_b);
}
'''
if i == 2:
s = '''tensor<output> dc_operator(input1 a, tensor<input2> &b) {
tensor<input1> tensor_a(std::vector<size_t>(1,1));
tensor_a.load(&a);
return dc_operator(tensor_a, b);
}
'''
if i == 3:
s = '''output dc_operator(input1 a, input2 b) {
tensor<input1> tensor_a(std::vector<size_t>(1,1));
tensor<input2> tensor_b(std::vector<size_t>(1,1));
tensor_a.load(&a);
tensor_b.load(&b);
return dc_operator(tensor_a, tensor_b)[0];
}
'''
s = s.replace("dc_operator", dc_operator)
return s
def binary_operators(s, dtype_precedence_dict):
cpp_file = swig_extern_file = py_file = ""
operator_list = ast.literal_eval(s.split("\n\n")[0].split("operator_list = ")[1])
dtype = ast.literal_eval(s.split("\n\n")[1].split("dtype = ")[1])
temp_content = s.split("\n\n")[2]
for dc_operator, dc_operator_values in operator_list.items():
for i in range (4):
if i==0:
content = temp_content[:]
operator_header, operator_python = dc_operator_values
content = content.replace("dc_operator", dc_operator).replace("operator_header", operator_header)
if "dtype" in content:
raise Exception("dtype block could not be removed, try again!")
py_file += overload_python_operator(dc_operator, operator_python, dtype_precedence_dict, "binary")
for output, input_2d in dtype.items():
# true_div only outputs in float
if (dc_operator == "true_div"):
output = "float"
# floor_div only outputs in int
if (dc_operator == "floor_div"):
output = "int"
# bitwise operators only outputs in int or bool
if ("bitwise" in dc_operator):
if (output != "bool"):
output = "int"
for input_1d in input_2d:
input1, input2 = input_1d
temp_typecast = ") {\n"
if (input1 != output):
temp_typecast += change_dtype(output,1)
if (input2 != output):
temp_typecast += change_dtype(output,2)
temp = content.replace("input1",input1).replace("input2",input2).replace("input",output).replace("output",output) + "\n\n"
temp = temp.replace(") {\n",temp_typecast)
if "asType" in temp:
temp = change_compute(temp)
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
if i>0 and i<4:
content = get_scalar(dc_operator, i)
for output, input_2d in dtype.items():
# true_div only outputs in float
if (dc_operator == "true_div"):
output = "float"
# floor_div only outputs in int
if (dc_operator == "floor_div"):
output = "int"
# bitwise operators only outputs in int or bool
if ("bitwise" in dc_operator):
if (output != "bool"):
output = "int"
for input_1d in input_2d:
input1, input2 = input_1d
temp = content.replace("input1",input1).replace("input2",input2).replace("output",output) + "\n"
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
return cpp_file, swig_extern_file, py_file
def logical_operators(s, dtype_precedence_dict):
cpp_file = swig_extern_file = py_file = ""
operator_list = ast.literal_eval(s.split("\n\n")[0].split("operator_list = ")[1])
dtype = ast.literal_eval(s.split("\n\n")[1].split("dtype = ")[1])
temp_content = s.split("\n\n")[2]
for dc_operator, dc_operator_values in operator_list['logical'].items():
for i in range (4):
if i==0:
content = temp_content[:]
operator_header, operator_python = dc_operator_values
content = content.replace("dc_operator", dc_operator).replace("operator_header", operator_header)
if "dtype" in content:
raise Exception("dtype block could not be removed, try again!")
# py_file += overload_python_operator(dc_operator, operator_python, dtype_precedence_dict, "logical")
for output, input_2d in dtype.items():
for input_1d in input_2d:
input1, input2 = input_1d
temp_typecast = ") {\n"
if (input1 != output):
temp_typecast += change_dtype(output,1)
if (input2 != output):
temp_typecast += change_dtype(output,2)
temp = content.replace("input1",input1).replace("input2",input2).replace("input",output).replace("output",output) + "\n\n"
temp = temp.replace(") {\n",temp_typecast)
if "asType" in temp:
temp = change_compute(temp)
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
if i>0 and i<4:
content = get_scalar(dc_operator, i)
for output, input_2d in dtype.items():
for input_1d in input_2d:
input1, input2 = input_1d
temp = content.replace("input1",input1).replace("input2",input2).replace("output",output) + "\n"
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
return cpp_file, swig_extern_file, py_file
def comparison_operators(s, dtype_precedence_dict):
cpp_file = swig_extern_file = py_file = ""
operator_list = ast.literal_eval(s.split("\n\n")[0].split("operator_list = ")[1])
dtype = ast.literal_eval(s.split("\n\n")[1].split("dtype = ")[1])
temp_content = s.split("\n\n")[2]
for dc_operator, dc_operator_values in operator_list['comparison'].items():
for i in range (4):
if i==0:
content = temp_content[:]
operator_header, operator_python = dc_operator_values
content = content.replace("dc_operator", dc_operator).replace("operator_header", operator_header)
if "dtype" in content:
raise Exception("dtype block could not be removed, try again!")
py_file += overload_python_operator(dc_operator, operator_python, dtype_precedence_dict, "comparison")
for output, input_2d in dtype.items():
for input_1d in input_2d:
input1, input2 = input_1d
temp_typecast = ") {\n"
input = ""
if (input1 != input2):
if (dtype_precedence_dict[input1] > dtype_precedence_dict[input2]):
input = input1
temp_typecast += change_dtype(input,2)
elif (dtype_precedence_dict[input1] < dtype_precedence_dict[input2]):
input = input2
temp_typecast += change_dtype(input,1)
else:
raise Exception("different datatypes can't have same precedence, try again!")
else:
input = input1
temp = content.replace("input1",input1).replace("input2",input2).replace("input",input).replace("output",output) + "\n\n"
temp = temp.replace(") {\n",temp_typecast)
if "asType" in temp:
temp = change_compute(temp)
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
if i>0 and i<4:
content = get_scalar(dc_operator, i)
for output, input_2d in dtype.items():
for input_1d in input_2d:
input1, input2 = input_1d
temp = content.replace("input1",input1).replace("input2",input2).replace("output",output) + "\n"
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
return cpp_file, swig_extern_file, py_file
def normal_operators(s):
cpp_file = swig_extern_file = ""
for content in s.split("\n\n"):
dc_operator = content.split("> ")[1].split("(")[0]
if "<output>" not in content and "<input>" not in content:
if "dtype" in content:
raise Exception("input output not mentioned, try again!")
temp = content + "\n\n"
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
continue
if "dtype" not in content:
raise Exception("dtype not mentioned, try again!")
dtype = get_dtype_dictionary(content)
content = remove_dtype(content)
if "dtype" in content:
raise Exception("dtype block could not be removed, try again!")
for output, input in dtype.items():
temp = ""
if type(input) is tuple:
for input_items in input:
if type(input_items) is list:
temp = content.replace("output",output) + "\n\n"
for i, input_item in enumerate(input_items):
temp = temp.replace("input"+str(i+1), input_item)
elif type(input_items) is str:
temp = content.replace("input",input_items).replace("output",output) + "\n\n"
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
else:
if type(input) is list:
temp = content.replace("output",output) + "\n\n"
for i, input_item in enumerate(input):
temp = temp.replace("input"+str(i+1), input_item)
elif type(input) is str:
temp = content.replace("input",input).replace("output",output) + "\n\n"
cpp_file += temp.replace("\n","\n\t")
temp = get_swig_extern(dc_operator, temp)
swig_extern_file += temp
return cpp_file, swig_extern_file
def main():
try:
with open ( "dnnc.api" , "r") as f:
print("Reading 'dnnc.api'")
contents = f.read()
except:
print("'dnnc.api' not found !")
return 1
else:
split_string = "\n<\\/>\n\n"
split_position = contents.find(split_string,1)
cpp_file = contents[:split_position] + "\nnamespace dnnc {\n\n\t"
swig_extern_file = contents.split("#include")[0] + "namespace dnnc {\n"
py_file = "\n%pythoncode %{\n"
contents = remove_comments(contents)
if check_comments(contents):
return 1
parser = argparse.ArgumentParser(description="generate api for swig")
parser.add_argument("-dev", "--developer", action="store_true", help="skip generating cpps for binary operators for faster development purposes")
args = parser.parse_args()
py_file += slicing_indexing()
if not args.developer:
dtype_precedence_dict = ast.literal_eval(contents[split_position:].split(split_string)[1].split("dtype_precedence_dict = ")[1])
temp_cpp_file, temp_swig_extern_file, temp_py_file = binary_operators(contents[split_position:].split(split_string)[2][:-1], dtype_precedence_dict)
cpp_file += temp_cpp_file
swig_extern_file += temp_swig_extern_file
py_file += temp_py_file
temp_cpp_file, temp_swig_extern_file, temp_py_file = logical_operators(contents[split_position:].split(split_string)[3][:-1], dtype_precedence_dict)
cpp_file += temp_cpp_file
swig_extern_file += temp_swig_extern_file
py_file += temp_py_file
temp_cpp_file, temp_swig_extern_file, temp_py_file = comparison_operators(contents[split_position:].split(split_string)[3][:-1], dtype_precedence_dict)
cpp_file += temp_cpp_file
swig_extern_file += temp_swig_extern_file
py_file += temp_py_file
temp_cpp_file, temp_swig_extern_file = normal_operators(contents[split_position:].split(split_string)[4])
cpp_file += temp_cpp_file
swig_extern_file += temp_swig_extern_file
cpp_file += "\n}\n"
swig_extern_file += "}\n"
py_file += "\n%}"
with open ("dnnc_api.cpp" ,"w") as f:
print("Saving 'dnnc_api.cpp'")
f.write(cpp_file)
with open ("dnnc_swig_externs.h" ,"w") as f:
print("Saving 'dnnc_swig_externs.h'")
f.write(swig_extern_file)
try:
with open ("tensor.i", "r") as f:
s = f.read()
except:
print("'tensor.i' not found !")
return 1
else:
comment = "// <\\/>"
try:
s = s.split(comment)[0] + comment + py_file + comment + s.split(comment)[2]
except:
print("'"+comment+"' not found 'tensor.i'!")
return 1
else:
with open ("tensor.i" ,"w") as f:
print("Saving 'tensor.i'")
f.write(s)
if __name__=="__main__":
main()
<file_sep># High level architecture
<img width="600" alt="Architecture" src="../misc/dnnCompilerArch.jpg">
## Front End
This part of the design produces [LLVM 8.0 IR](https://releases.llvm.org/8.0.0/docs/LangRef.html) (Internal Representation)
without regard to accelerator specific optimization, which are handled in the back-end support for each device individually.
## ONNX support
While, ONNX has two official ONNX variants;
1. The neural-network-only **ONNX** and
2. it's classical Machine Learning extension, **ONNX-ML**.
**DNNC** supports neural-network-only ONNX with support for tensors as input and output types *(no support for sequences and maps)*
### Rererence
1. [ONNX support](https://github.com/onnx/onnx/blob/master/docs/IR.md)
2. [LLVM docs](https://releases.llvm.org/8.0.0/docs)
<file_sep># Parser: x to symbol table
## Overview
The first module of the DNN Compiler encompasses conversion of the neural network graph in various formats (.caffe, .pb, .onnx, etc.) to a universal symbol table representation. This symbol table will be used to construct the LLVM IR graph, optimization, and compilation into various backend platforms.
In this section, we discuss parsing of neural network graphs into the symbol table. Thus far, we have added functionality for an ONNX parser, enabling conversion from an ONNX graph (.onnx) to our symbol table (.sym).
## Symbol Table Syntax
This section covers the syntax for our symbol table representation.
### Nodes
```
op_type
<name>
[ [<input-1> <input-1_attr>] [<input-2> <input-2_attr>] ... [<input-n> <input-n_attr>] ]
[ [<output-1> <output-1_attr>] [<output-1> <output-1_attr>] ... [<output-n> <output-n_attr>] ]
[ parameters ]
```
### Input/Output (ValueInfoProto)
```
<Input/Output>
<name>
[ <dimensions> ]
```
### Initalizers
```
<Initializer>
<name>
[ <dims> ]
[ <data> ]
```
## Sample Symbol Table
This section provides a sample symbol table for a neural network involving the convolution operation.
```
Conv
""
[ ["0"] ["weight"] ["bias"] ]
[ ["3"] ]
Attr
"/dilations"
[ [1, 1] ]
Attr
"/group"
[ 1 ]
Attr
"/kernel_shape"
[ [3, 3] ]
Attr
"/pads"
[ [0, 0, 0, 0] ]
Attr
"/strides"
[ [1, 1] ]
Input
"0"
[ 1, 4, 5, 6 ]
Input
"weight"
[ 5, 4, 3, 3 ]
Input
"bias"
[ 5 ]
Output
"3"
[ 1, 5, 3, 4 ]
Initializer
"bias"
[ 5 ]
[ b"\x1e'\x9f=\xd0\xf67<'C\x04\xbe\xb2\x9e\xc7=gG&>" ]
Initializer
"weight"
[ 5, 4, 3, 3 ]
[ b'c9\x9f\xbd...\x86\x81\x94\xbd' ]
```
## ONNX Parser
### ONNX Operators
ONNX operators for which testcases have been generated: \
`add, sub, mul, div, cat, mm, addmm, neg, sqrt, tanh, sigmoid, mean, sum, prod, t, expand, transpose, view, split, squeeze, prelu, threshold, leaky_relu, glu, softmax, avg_pool2d, log_softmax, unfold, elu, concat, abs, index_select, pow, clamp, max, min, eq, gt, lt, ge, le, exp, sin, cos, tan, asin, acos, atan, permute, Conv, BatchNorm, MaxPool1d, MaxPool2d, MaxPool3d, Embedding, RNN, ConstantPad1d, ConstantPad2d, ConstantPad3d`
Testing incomplete:
```
FeatureDropout, Index, Expand, Unfold
```
## Parser Testing
### Testing Phase 1: Unit ONNX operator testcases
* For each operator in the list below (common PyTorch operators supported in ONNX): created a .onnx binary file and .txt file representing the onnx in human-readable format.
In order to test the ONNX parser and to ensure compatibility of the output symbol table with that of other parsers, we created sample .onnx files with graphs involving common unit operations. For example, we created a file `sigmoid.onnx` for the unit operation of element-wise sigmoid. To assist in the creation of the .onnx file, we first created the graph in PyTorch, then exported the graph to ONNX using the `torch.onnx.export`. However, one drawback of this approach is that only 62 of the 139 ONNX operators in the most recent version. [Link](https://pytorch.org/docs/stable/onnx.html#supported-operators.) for supported ONNX Operators in PyTorch.
Because there was significant redundancy in the creation of PyTorch graphs for various operators, we automated the process such that only a few characteristics of the operator need to be specified, such as operator name, output operation, etc. The output of the automated function includes the python file to create the binary .onnx and readable .txt file for the given operator. [Link](https://github.com/ai-techsystems/dnnCompiler/blob/master/test/pytorch-to-onnx/onnx_generator.py) for automated python file generator.
* Wrote a generator to create outputs for each individual operator; generator requires only a short string specifying details for the particular test case, and creates a python file that produces .onnx and .txt file.
Link to ONNX Unit Operator Testcases: https://github.com/ai-techsystems/dnnCompiler/tree/master/test/pytorch-to-onnx/testcases
Example py file, for the max operator:
```
import torch.onnx
import torch.nn as nn
import numpy as np
import onnx
onnx_filename = "./max/max.onnx"
text_filename = "./max/max.txt"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
output = torch.max(x)
return output
model = Net()
test_input = torch.full([3, 3], 4)
torch.onnx.export(model, test_input, onnx_filename)
with open(text_filename, 'w') as f:
model = onnx.load(onnx_filename)
f.write(str(model.graph))
operators = {}
operators['acos'] = {'output_string':'torch.acos(x)', 'test_input_string':'torch.randn(1, 5)'}
// ...
operators['Dropout'] = {'output_string': 'torch.nn.Dropout(p=0.3)', 'test_input_string': 'torch.full([4, 4], 3)', 'is_module': True}
//generate .py file
//run .py file → .onnx file, .txt file
```
### Testing Phase 2: Converting test cases to .sym.gold files
Using a https://github.com/ai-techsystems/dnnCompiler/blob/master/test/gold_generator.py, we converted the testcases for unit operators to gold standard files (.sym.gold) for future testing.
Output generated by parser: https://github.com/ai-techsystems/dnnCompiler/tree/master/test/pytorch-to-onnx/gold_files
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import math
class LRNTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.np_a = np.random.randn(self.len).astype(np.float32)
self.dc_a = dc.array(list(self.np_a))
self.alpha = 0.0001
self.beta = 0.75
self.bias = 1.0
self.size = 2
def test_LRN3D_1 (self):
np_a = np.reshape(self.np_a, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3))
square_sum = np.zeros((2,4,3)).astype(np.float32)
for n, c, h in np.ndindex(np_a.shape):
square_sum[n, c, h] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size,self.alpha,self.beta,self.bias)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN3D_2 (self):
np_a = np.reshape(self.np_a, (2,2,6))
dc_a = dc.reshape(self.dc_a, (2,2,6))
square_sum = np.zeros((2,2,6)).astype(np.float32)
for n, c, h in np.ndindex(np_a.shape):
square_sum[n, c, h] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size,self.alpha,self.beta,self.bias)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN3D_3 (self):
np_a = np.reshape(self.np_a, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3))
square_sum = np.zeros((2,4,3)).astype(np.float32)
self.size = 3
for n, c, h in np.ndindex(np_a.shape):
square_sum[n, c, h] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size,self.alpha,self.beta,self.bias)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN3D_4 (self):
np_a = np.reshape(self.np_a, (2,2,6))
dc_a = dc.reshape(self.dc_a, (2,2,6))
self.alpha = 0.0002
self.beta = 0.5
self.bias = 2.0
self.size = 3
square_sum = np.zeros((2,2,6)).astype(np.float32)
for n, c, h in np.ndindex(np_a.shape):
square_sum[n, c, h] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size,self.alpha,self.beta,self.bias)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN4D_1 (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
square_sum = np.zeros((2,2,2,3)).astype(np.float32)
for n, c, h,w in np.ndindex(np_a.shape):
square_sum[n, c, h,w] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h,w] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN4D_2 (self):
np_a = np.reshape(self.np_a, (2,2,1,6))
dc_a = dc.reshape(self.dc_a, (2,2,1,6))
square_sum = np.zeros((2,2,1,6)).astype(np.float32)
for n, c, h,w in np.ndindex(np_a.shape):
square_sum[n, c, h,w] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h,w] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN4D_3 (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
square_sum = np.zeros((2,2,2,3)).astype(np.float32)
self.alpha = 0.0002
self.beta = 0.5
self.bias = 2.0
self.size = 3
for n, c, h,w in np.ndindex(np_a.shape):
square_sum[n, c, h,w] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h,w] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size,self.alpha,self.beta,self.bias)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_LRN4D_4 (self):
np_a = np.reshape(self.np_a, (2,2,1,6))
dc_a = dc.reshape(self.dc_a, (2,2,1,6))
self.alpha = 0.0002
self.beta = 0.5
self.bias = 2.0
self.size = 3
square_sum = np.zeros((2,2,1,6)).astype(np.float32)
for n, c, h,w in np.ndindex(np_a.shape):
square_sum[n, c, h,w] = sum(np_a[n,max(0, c - int(math.floor((self.size - 1) / 2))):min(5, c + int(math.ceil((self.size - 1) / 2)) + 1),h,w] ** 2)
npr = np_a / ((self.bias + (self.alpha / self.size) * square_sum) ** self.beta)
dcr = dc.lrn(dc_a,self.size,self.alpha,self.beta,self.bias)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename To, typename Ti>
class Gather : public baseOperator<To, To, Ti> {
protected:
int axis = 0; /*!< Which axis to gather on. Negative value means
counting dimensions from the back. Accepted
range is [-r, r-1] where r = rank(data). */
// Gather attributes
public:
Gather(std::string name = "opGather", int axis = 0)
: baseOperator<To, To, Ti>(opGather, name) {
this->axis = axis;
}
bool getAttribute(OPATTR attrName, float &obj) override {
if (attrName == attr_axis) {
obj = axis;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, float obj) override {
if (attrName == attr_axis) {
axis = obj;
return true;
}
return false;
}
// duplicate of numpy.take(), link:
// "https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html"
tensor<To> compute(tensor<To> &a /*!<[float,double]: ND tensor*/,
tensor<Ti> &indices /*!<[int]: ND tensor*/) {
if (!(this->template type_check<Ti, int>())) {
SPDLOG_ERROR("Constrain axis tensor to integer type.");
return NULL_TENSOR<To>;
}
if (a.rank() < 1) {
SPDLOG_ERROR("Constrain input tensor rank greater than 0.");
return NULL_TENSOR<To>;
}
if (axis < -a.rank() || axis > a.rank() - 1) {
SPDLOG_ERROR("Constrain axis in range [-r,r-1] where r = rank(data)");
return NULL_TENSOR<To>;
}
std::vector<size_t> Ni, Nj, Nk;
tensor<To> result(a);
for (int i = 0; i < axis; i++) {
Ni.push_back(a.shape()[i]);
}
for (int i = axis; i < a.rank(); i++) {
Nk.push_back(a.shape()[i]);
}
Nj = indices.shape();
for (int i = 0; i < size(Ni); i++) {
for (int j = 0; j < size(Nj); j++) {
for (int k = 0; k < size(Nk); k++) {
// out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
// result[Ni[i] + Nj[j] + Nk[k]] = a[Ni[i] + ]
}
}
}
return result;
}
};
} // namespace dnnc
<file_sep>
# [Docker](https://www.docker.com/)
Read the official documentation **[here](https://docs.docker.com/)**
## Downloads
#### Windows 10
* **[Docker for Windows 10](https://docs.docker.com/v17.09/docker-for-windows/install/#download-docker-for-windows)**
#### Mac
* **[Docker for Mac](https://docs.docker.com/v17.09/docker-for-mac/install/#download-docker-for-mac)**
#### Ubuntu
```bash
sudo apt-get update && apt-get install docker
```
#### Arch Linux/ Manjaro
```bash
sudo pacman -Syu docker
```
## Usage
```bash
git clone "https://github.com/ai-techsystems/dnnCompiler/"
```
#### Then depending upon your workflow, if you want to do a top level `make`
```bash
cd dnnCompiler
python buildDocker.py
```
#### If you want to run `make` from swig directory
```bash
cd dnnCompiler/swig
python buildDocker.py
```
## Explicit Usage
#### If you want to know the workflow:
* Go to the base directory
```bash
cd dnnCompiler
```
* You should be able to see the **[Dockerfile](../Dockerfile)**. That has the instruction to create a Ubuntu 18.04 image and download required depedencies on top of that.
* Next you have to start docker service if not running already.
```bash
systemctl start docker
```
* Now to create the image from the **[Dockerfile](../Dockerfile)** run
```bash
sudo docker build -t dnnc .
```
* Now the image is created. But need to run the image as container that will execute your code. For that
```bash
sudo docker run -it dnnc /bin/bash -c "cd /dnnCompiler && make clean && make"
```
What this does is, runs the image as container, goes inside dnnCompiler directory, and runs `make` as normally you would.<file_sep>import tensorflow as tf
import keras
from tensorflow.keras.models import Model
import keras.backend as K
K.set_learning_phase(0)
def keras_to_pb(model, output_filename, output_node_names):
"""
This is the function to convert the Keras model to pb.
Args:
model: The Keras model.
output_filename: The output .pb file name.
output_node_names: The output nodes of the network. If None, then
the function gets the last layer name as the output node.
"""
# Get the names of the input and output nodes.
in_name = model.layers[0].get_output_at(0).name.split(':')[0]
if output_node_names is None:
output_node_names = [model.layers[-1].get_output_at(0).name.split(':')[0]]
sess = keras.backend.get_session()
# The TensorFlow freeze_graph expects a comma-separated string of output node names.
output_node_names_tf = ','.join(output_node_names)
frozen_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph_def,
output_node_names)
sess.close()
wkdir = ''
tf.train.write_graph(frozen_graph_def, wkdir, output_filename, as_text=False)
return in_name, output_node_names
# load a pretrained model, say ResNet-50 trained on ImageNet
model = keras.applications.resnet.ResNet50(include_top=True, weights='imagenet', input_tensor=None, \
input_shape=None, pooling=None, classes=1000)
# Convert the Keras model to a .pb file
in_tensor_name, out_tensor_names = keras_to_pb(model, "models/tf_model.pb", None)
# Next Steps
# 1. Once the model has been converted to pb format, use tf2onnx to convert it to onnx format and use deepC
# % python -m tf2onnx.convert --input /Path/to/tf_model.pb --inputs input_1:0 --outputs probs/Softmax:0 --output tf_model.onnx
# 2. Use deepC to compile onnx model to deepC
# % python deepC/scripts/onnx2exe.py tf_model.onnx
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#include "operators/Min.h"
using namespace dnnc;
using namespace Eigen;
#ifdef DNNC_MIN_TEST
#include <iostream>
int main() {
float data1[12] = {0.521434, 0.57921106, -2.0741816, -0.34119776,
-1.0926818, -0.11949139, 0.47781935, -0.32272545,
-2.029931, -0.59571075, 0.00333933, -1.2404536};
float data2[12] = {-0.72546995, -0.66292864, 0.03466121, 1.1469446,
0.62084216, 1.8464565, 2.229277, 0.6344861,
0.28158414, -0.9978712, 0.10784209, -0.4692914};
tensor<float> fTensor1({1, 12});
fTensor1.load(data1);
tensor<float> fTensor2({1, 12});
fTensor2.load(data2);
// std::cout << fTensor1 << std::endl;
// std::cout << fTensor2 << std::endl;
std::vector<tensor<float>> vt;
vt.push_back(fTensor1);
vt.push_back(fTensor2);
Min<float> m("localOpName");
auto result = m.compute(vt);
std::cout << result << "\n";
}
#endif
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/broadcast.h"
#include "operators/baseOperator.h"
#include <string>
using namespace Eigen;
namespace dnnc {
/*! This does element wise binary left shift and right shift operation of two
given N D tensors of same size. This operator supports multidirectional
(i.e., Numpy-style) broadcasting.*/
template <typename T> class BitShift : public baseOperator<T, T, T> {
protected:
std::string direction = ""; /*!< Direction of BitShift. */
// Eigen does not support bitshift
// So binaryExpr is needed to work around that limitation.
// https://stackoverflow.com/questions/29127497/bitwise-operations-in-eigen
/*! Element wise Left-Shift-Function*/
static T left_shift_func(T x, T y) { return (x << y); }
/*! Element wise Right-Shift-Function*/
static T right_shift_func(T x, T y) { return (x >> y); }
template <typename Scalar>
inline DNNC_EIGEN_VECTOR_CTOR(Scalar)
eigenArrayLeftShift(Map<DNNC_EIGEN_VECTOR_CTOR(Scalar)> &a,
Map<DNNC_EIGEN_VECTOR_CTOR(Scalar)> &b) {
return (a.array().binaryExpr(b.array(), &left_shift_func));
}
template <typename Scalar>
inline DNNC_EIGEN_VECTOR_CTOR(Scalar)
eigenArrayRightShift(Map<DNNC_EIGEN_VECTOR_CTOR(Scalar)> &a,
Map<DNNC_EIGEN_VECTOR_CTOR(Scalar)> &b) {
return (a.array().binaryExpr(b.array(), &right_shift_func));
}
public:
BitShift(std::string name = "opBitShift", std::string direction = "")
: baseOperator<T, T, T>(opBitShift, name) {
this->direction = direction;
}
bool getAttribute(OPATTR attrName, std::string &obj) override {
if (attrName == attr_direction) {
obj = direction;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, std::string obj) override {
if (attrName == attr_direction) {
direction = obj;
return true;
}
return false;
}
tensor<T> compute(tensor<T> a /*!<[int]: ND tensor*/,
tensor<T> b /*!<[int]: ND tensor*/) override {
std::vector<DIMENSION> resultShape = binaryBroadcastReShape(a, b);
tensor<T> result(resultShape);
if (a.shape() != b.shape()) {
SPDLOG_ERROR("tensor dimenions not appropriate for BitShift operator.");
return NULL_TENSOR<T>;
}
if ((direction != "LEFT") && (direction != "RIGHT")) {
SPDLOG_ERROR("Specify direction to 'LEFT' or 'RIGHT'");
return NULL_TENSOR<T>;
}
if (!(this->template type_check<T, int>())) {
SPDLOG_ERROR("Constrain input and output types to int tensors.");
return NULL_TENSOR<T>;
}
DNNC_EIGEN_ARRAY_MAP(eigenVectorA, T, a);
DNNC_EIGEN_ARRAY_MAP(eigenVectorB, T, b);
DNNC_EIGEN_VECTOR_CTOR(T) eResult;
if (direction == "LEFT")
eResult.array() = eigenVectorA.array().binaryExpr(eigenVectorB.array(),
&left_shift_func);
else if (direction == "RIGHT")
eResult.array() = eigenVectorA.array().binaryExpr(eigenVectorB.array(),
&right_shift_func);
result.load(eResult.data());
return result;
}
/*!<
\return The output tensor of the same shape and type as input.
*/
};
} // namespace dnnc
<file_sep># Guide to profile your code using gprof
```
% cd dnnCompiler
% make PROF=Y; # profiler build for profiler data
% cd <test>
% <dnnc-exe> test.py
% gprof `which <dnnc-exe>` gmon.out |& tee gprof.rpt
```
<file_sep>
Compile onnx model for your target machine
=======
Checkout [mnist.ir](mnist.ir)
## Step 1:
Generate intermediate code
```% onnx2cpp mnist.onnx```
## Step 2:
Optimize and compile
```% /usr/bin/clang++-8 -O3 mnist.cpp -I ../../../include/ -isystem ../../../packages/eigen-eigen-323c052e1731/ -o mnist.exe```
## Step 3:
Test run
```% ./mnist.exe```
## Step 4:
Run it in the loop
```% python demo.mnist.py```
More Info
=======
1. batch size 1
1. pytorch version 1.2
1. ONNX IR version 1.4
1. [Training colab notebook](https://colab.research.google.com/drive/1JTcR5A0dQ8y_TKy_DbqpMv1caWjaO_jL)
1. [Watch this video](https://youtu.be/BpFs83MU3HM)
[](https://youtu.be/BpFs83MU3HM)
<file_sep>import os, argparse
def main():
parser = argparse.ArgumentParser(description="run docker in any os")
parser.add_argument("-dev", "--developer", action="store_true", help="ssh inside docker container without running make")
args = parser.parse_args()
if args.developer:
# Mac and Linux
if os.name == "posix":
try:
os.system('systemctl start docker')
except:
os.system('systemctl unmask docker.service && systemctl unmask docker.socket && systemctl start docker.service')
os.system('sudo docker build -t dnnc .')
os.system('sudo docker run -it dnnc /bin/bash')
# Windows
elif os.name == "nt":
os.system('docker build -t dnnc .')
# don't use single quotes inside command, always use duble quotes, similar problem listed below
# https://stackoverflow.com/questions/24673698/unexpected-eof-while-looking-for-matching-while-using-sed
os.system('docker run -it dnnc /bin/bash -c "cd /dnnCompiler && make clean && make all"')
else:
# Mac and Linux
if os.name == "posix":
try:
os.system('systemctl start docker')
except:
os.system('systemctl unmask docker.service && systemctl unmask docker.socket && systemctl start docker.service')
os.system('sudo docker build -t dnnc .')
os.system('sudo docker run -it dnnc /bin/bash -c "cd /dnnCompiler && make clean && make all"')
# Windows
elif os.name == "nt":
os.system('docker build -t dnnc .')
# don't use single quotes inside command, always use duble quotes, similar problem listed below
# https://stackoverflow.com/questions/24673698/unexpected-eof-while-looking-for-matching-while-using-sed
os.system('docker run -it dnnc /bin/bash -c "cd /dnnCompiler && make clean && make all"')
if __name__ == "__main__":
main()<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
namespace dnnc {
/*!< This class uses individual bits to store ENUM information
* for any given object. Efficient in storage and compute.
* It can accomodate upto 15 enums */
class flag {
protected:
short _info;
public:
flag() : _info(0){};
flag(short info) : _info(info) {}
bool get(short index) const { return _info & (1 << index); }
void set(short index) { _info |= (1 << index); }
void reset(short index) { _info &= ~(1 << index); }
flag operator|(const flag &rhs) const { return _info | rhs._info; }
bool operator<(const flag &rhs) const { return (_info < rhs._info); }
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class NegTest(unittest.TestCase):
def setUp(self):
self.len = 24
self.np_a = np.random.randn(self.len).astype(np.float32)
self.dc_a = dc.array(list(self.np_a))
def test_Neg1D (self):
npr = np.negative(self.np_a)
dcr = dc.neg(self.dc_a)
np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Neg2D (self):
np_a = np.reshape(self.np_a, (6,4))
dc_a = dc.reshape(self.dc_a, (6,4))
npr = np.negative(np_a)
dcr = dc.neg(dc_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Neg3D (self):
np_a = np.reshape(self.np_a, (2,4,3))
dc_a = dc.reshape(self.dc_a, (2,4,3))
npr = np.negative(np_a)
dcr = dc.neg(dc_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Neg4D (self):
np_a = np.reshape(self.np_a, (2,2,2,3))
dc_a = dc.reshape(self.dc_a, (2,2,2,3))
npr = np.negative(np_a)
dcr = dc.neg(dc_a)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "tensor.h"
#include <sstream>
#include <string>
// reference: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
namespace dnnc {
template <typename T>
std::vector<DIMENSION> getTargetShape(const tensor<T> a, const tensor<T> b) {
bool mismatch = false;
std::vector<DIMENSION> targetShape;
DIMENSION aNumDims = a.shape().size();
DIMENSION bNumDims = b.shape().size();
if (a.shape() == b.shape()) {
targetShape = a.shape();
} else if (bNumDims >= aNumDims) {
DIMENSION i;
DIMENSION offset = bNumDims - aNumDims;
for (i = 0; i < offset; i++) {
targetShape.push_back(b.shape()[i]);
}
for (; i < bNumDims; i++) {
if (a.shape()[i - offset] == b.shape()[i]) {
targetShape.push_back(a.shape()[i - offset]);
} else if (b.shape()[i] == 1) {
targetShape.push_back(a.shape()[i - offset]);
} else if (a.shape()[i - offset] == 1) {
targetShape.push_back(b.shape()[i]);
} else {
mismatch = true;
break;
}
}
} else {
DIMENSION i;
DIMENSION offset = aNumDims - bNumDims;
for (i = 0; i < offset; i++) {
targetShape.push_back(a.shape()[i]);
}
for (; i < aNumDims; i++) {
if (a.shape()[i] == b.shape()[i - offset]) {
targetShape.push_back(b.shape()[i - offset]);
} else if (b.shape()[i - offset] == 1) {
targetShape.push_back(a.shape()[i]);
} else if (a.shape()[i] == 1) {
targetShape.push_back(b.shape()[i - offset]);
} else {
mismatch = true;
break;
}
}
}
if (mismatch) {
std::stringstream errMsg;
errMsg << "operands could not be broadcast together with shapes "
<< "(";
for (size_t i = 0; i < a.rank() - 1; i++) {
errMsg << a.shape()[i] << ",";
}
errMsg << a.shape()[a.rank() - 1] << ") (";
for (size_t i = 0; i < b.rank() - 1; i++) {
errMsg << b.shape()[i] << ",";
}
errMsg << b.shape()[b.rank() - 1] << ")" << std::endl;
SPDLOG_ERROR(errMsg.str().c_str());
targetShape.clear();
}
return targetShape;
}
template <typename T>
tensor<T> broadcast(const tensor<T> a,
const std::vector<DIMENSION> targetShape) {
DIMENSION aNumDims = a.shape().size();
DIMENSION targetNumDims = targetShape.size();
// multi-directional broadcasting
if (aNumDims > targetNumDims) {
// Can't broadcast to fewer dimensions!
return dnnc::NULL_TENSOR<T>;
}
if (a.shape() == targetShape) {
// nothing to do
return a;
} else if ((a.rank() == 1) && (a.shape()[0] == 1)) {
// a is a scalar
size_t num_elems = std::accumulate(begin(targetShape), end(targetShape), 1,
std::multiplies<>());
T *mem_data = (T *)malloc(num_elems * sizeof(T));
for (size_t i = 0; i < num_elems; i++) {
mem_data[i] = a.data()[0];
}
tensor<T> result(mem_data, targetShape);
return result;
}
if (aNumDims == targetNumDims) {
std::vector<size_t> resultShape(targetNumDims);
// Determine broadcast result shape
for (size_t i = 0; i < targetNumDims; i++) {
if ((a.shape()[i] == targetShape[i]) || (a.shape()[i] == 1)) {
resultShape[i] = targetShape[i];
} else {
// Can't broadcast unless a's dimensions is 1
return dnnc::NULL_TENSOR<T>;
}
}
tensor<T> result(resultShape);
// Determine broadcast result values
DIMENSION d0 = targetShape[0];
DIMENSION d1 = targetShape[1];
DIMENSION d2 = targetShape[2];
DIMENSION d3 = targetShape[3];
if (targetNumDims == 4) {
for (size_t i = 0; i < d0; i++) {
for (size_t j = 0; j < d1; j++) {
for (size_t k = 0; k < d2; k++) {
for (size_t l = 0; l < d3; l++) {
size_t i1 = i, j1 = j, k1 = k, l1 = l;
if (a.shape()[0] != d0) {
i1 = 0;
}
if (a.shape()[1] != d1) {
j1 = 0;
}
if (a.shape()[2] != d2) {
k1 = 0;
}
if (a.shape()[3] != d3) {
l1 = 0;
}
result(i, j, k, l) = a(i1, j1, k1, l1);
}
}
}
}
} else if (targetNumDims == 3) {
for (size_t i = 0; i < d0; i++) {
for (size_t j = 0; j < d1; j++) {
for (size_t k = 0; k < d2; k++) {
size_t i1 = i, j1 = j, k1 = k;
if (a.shape()[0] != d0) {
i1 = 0;
}
if (a.shape()[1] != d1) {
j1 = 0;
}
if (a.shape()[2] != d2) {
k1 = 0;
}
result(i, j, k) = a(i1, j1, k1);
}
}
}
} else if (targetNumDims == 2) {
for (size_t i = 0; i < d0; i++) {
for (size_t j = 0; j < d1; j++) {
size_t i1 = i, j1 = j;
if (a.shape()[0] != d0) {
i1 = 0;
}
if (a.shape()[1] != d1) {
j1 = 0;
}
result(i, j) = a(i1, j1);
}
}
} else {
SPDLOG_ERROR("Unsupported!");
}
return result;
} else if (aNumDims < targetNumDims) {
std::vector<size_t> aReShape(targetNumDims);
size_t diffNumDims = targetNumDims - aNumDims;
for (size_t i = 0; i < targetNumDims; i++) {
if (i < diffNumDims) {
aReShape[i] = 1;
} else {
aReShape[i] = a.shape()[i - diffNumDims];
}
}
tensor<T> aReShaped(aReShape);
aReShaped.load(a.data());
return broadcast<T>(aReShaped, targetShape);
} else {
SPDLOG_ERROR("Not supported");
}
return dnnc::NULL_TENSOR<T>;
}
template <typename T>
std::vector<DIMENSION> binaryBroadcastReShape(tensor<T> &a, tensor<T> &b) {
std::vector<DIMENSION> targetShape = getTargetShape(a, b);
// if targetShape is NULL, then broadcast was
// not possible, so returning the input tensor
if (targetShape.size() == 0)
return targetShape;
a = broadcast<T>(a, targetShape);
b = broadcast<T>(b, targetShape);
return targetShape;
}
template <typename T>
std::vector<DIMENSION> vecBroadcastReShape(std::vector<tensor<T>> &inputs) {
std::vector<DIMENSION> targetShape;
for (size_t i = 0; i < (inputs.size() - 1); i++) {
targetShape = binaryBroadcastReShape(inputs[i], inputs[i + 1]);
if (targetShape.size() == 0) {
// one incompatible shape breaks the operation, no point in going further
break;
}
}
return targetShape;
}
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import sys
class ConvTest(unittest.TestCase):
def setUp(self):
## random testcase
self.channels = 1
self.featuremaps = 1
self.batchsize = 1
#self.oneK = 1024
self.oneK = 50
self.X_h = self.oneK + np.random.randint(self.oneK*3)
self.X_w = self.oneK + np.random.randint(self.oneK*3)
self.K_h = 3 + np.random.randint(97)
self.K_w = 3 + np.random.randint(97)
self.np_strides = np.zeros(2).astype(np.float32)
self.np_strides[0] = 1 + np.random.randint(self.K_w - 1)
self.np_strides[1] = 1 + np.random.randint(self.K_h - 1)
self.np_B = np.zeros(self.featuremaps).astype(np.float32)
self.np_X_data = np.random.randn(self.X_w * self.X_h).astype(np.float32)
self.np_K_data = np.random.randn(self.K_w * self.K_h).astype(np.float32)
self.np_X = np.reshape(self.np_X_data, (self.X_h, self.X_w))
self.np_K = np.reshape(self.np_K_data, (self.K_h, self.K_w))
self.dc_X = dc.reshape(dc.array(list(self.np_X_data)), (self.batchsize, self.channels, self.X_h, self.X_w)).asTypeFloat()
self.dc_K = dc.reshape(dc.array(list(self.np_K_data)), (self.featuremaps, self.channels, self.K_h, self.K_w)).asTypeFloat()
self.dc_B = dc.zeros(self.featuremaps).asTypeFloat()
self.dc_strides = dc.reshape(dc.array(list(self.np_strides)), (2)).asTypeInt()
self.dc_nullT = dc.array(0)
## onnx conv example testcase
self.onnx_dc_X = dc.reshape(dc.array([0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,21.,22.,23.,24.]),(1,1,5,5))
self.onnx_dc_X2 = dc.reshape(dc.array([0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,31.,32.,33.,34.]),(1,1,7,5))
self.onnx_dc_W = dc.reshape(dc.array([1.,1.,1.,1.,1.,1.,1.,1.,1.]),(1,1,3,3))
self.onnx_npr_su = np.array([12.,21.,27., 33., 24., 33., 54., 63., 72., 51., 63.,99.,108.,117., 81., 93.,144.,153.,162.,111., 72.,111.,117.,123., 84.])
self.onnx_npr_vl = np.array([54.,63.,72.,99.,108.,117.,144.,153.,162.])
self.onnx_npr_vl_s2 = np.array([54.,72.,144.,162.,234.,252.])
self.onnx_npr_sp_s2 = np.array([12.,27.,24.,63.,108.,81.,123.,198.,141.,112.,177.,124.])
self.onnx_npr_ap_s2 = np.array([21.,33.,99.,117.,189.,207.,171.,183.])
self.onnx_dc_BIGW = dc.reshape(dc.array(list(np.ones(900))),(1,1,30,30))
# def test_conv_nopad (self):
# npr = signal.convolve2d(self.np_X, self.np_K)
# dcr = dc.conv(self.dc_X, # input
# self.dc_K, # filter
# self.dc_B ) # bias
# "VALID", # autopad
# self.dc_nullT, # dilations
# 1, # group
# self.dc_K.shape(), # kernel shape
# self.dc_nullT, # pads
# self.dc_nullT # strides
# )
# npr and cr don't match - just a sanity test for now
# np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# Ref: https://github.com/onnx/onnx/blob/master/onnx/backend/test/case/node/conv.py
# padding for same size
def test_onnx_conv_same_upper (self):
dcr = dc.conv(self.onnx_dc_X, self.onnx_dc_W, self.dc_B, "SAME_UPPER")
np.testing.assert_allclose(self.onnx_npr_su.astype(np.float32), np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# padding for same size
def test_onnx_conv_same_lower (self):
dcr = dc.conv(self.onnx_dc_X, self.onnx_dc_W, self.dc_B, "SAME_LOWER")
np.testing.assert_allclose(self.onnx_npr_su.astype(np.float32), np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# no padding
def test_onnx_conv_valid (self):
dcr = dc.conv(self.onnx_dc_X, self.onnx_dc_W, self.dc_B, "VALID")
np.testing.assert_allclose(self.onnx_npr_vl.astype(np.float32), np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# stride 2, no padding
def test_onnx_conv_valid_s2 (self):
dcr = dc.conv(self.onnx_dc_X2, self.onnx_dc_W, self.dc_B,
"VALID",
dc.vectorInt([]),
1,
dc.vectorInt([]),
dc.vectorInt([]),
dc.vectorInt([2,2]))
np.testing.assert_allclose(self.onnx_npr_vl_s2.astype(np.float32), np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# stride 2, explicit symmetrical padding
def test_onnx_conv_ns_s2 (self):
dcr = dc.conv(self.onnx_dc_X2, self.onnx_dc_W, self.dc_B,
"NOTSET",
dc.vectorInt([]),
1,
dc.vectorInt([]),
dc.vectorInt([1,1,1,1]),
dc.vectorInt([2,2]))
np.testing.assert_allclose(self.onnx_npr_sp_s2.astype(np.float32), np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# stride 2, explicit asymmetrical padding
def test_onnx_conv_ns_ap_s2 (self):
dcr = dc.conv(self.onnx_dc_X2, self.onnx_dc_W, self.dc_B,
"NOTSET",
dc.vectorInt([]),
1,
dc.vectorInt([]),
dc.vectorInt([1,0,1,0]),
dc.vectorInt([2,2]))
np.testing.assert_allclose(self.onnx_npr_ap_s2.astype(np.float32), np.array(dcr.data()).astype(np.float32),rtol=1e-3, atol=1e-3)
# negative, kernel too big
# def test_onnx_conv_same_upper (self):
# dcr = dc.conv(self.onnx_dc_X, self.onnx_dc_BIGW, self.dc_B, "VALID")
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#ifndef SWIGPYTHON
#include <string>
#include <vector>
#endif
namespace dnnc {
template <typename T> class placeHolder {
protected:
std::string _name; //! placeHolder name
std::vector<DIMENSION> _shape; //! placeHolder shape
void init(std::vector<DIMENSION> &dimn) {
for (auto num : dimn) {
if (num == 0)
break;
_shape.push_back(num);
}
}
// class tensor uses this constructor to allow for
// _shape with no dimension, i.e. rank 0.
placeHolder(std::string n, std::vector<DIMENSION> dimn) : _name(n) {
init(dimn);
}
public:
placeHolder(std::vector<DIMENSION> dimn, std::string n = "") : _name(n) {
init(dimn);
}
void name(std::string n) { _name = n; }
std::string name() const { return _name; }
const DIMENSION rank() const { return _shape.size(); }
const std::vector<DIMENSION> shape() const { return _shape; }
/// \brief Return number of elements in the tensor.
const DIMENSION length() const {
DIMENSION sz = rank() ? 1 : 0;
for (size_t i = 0; i < rank(); i++)
sz = sz * _shape[i];
return sz;
}
/*<! It flattens tensor in place, reducing the tensor's rank to
* 1 as in flat 1D array */
void flatteninplace() {
DIMENSION sz = length();
_shape.clear();
_shape.push_back(sz);
return;
}
};
} // namespace dnnc
<file_sep># DNNC tensor operators, NN operators and methods (similar to numpy)
---
## Philosophy
DNNC tensor variable is a **fixed-length multi-dimensional array**. It is a mutable object with ability to transform constrained by algebraic rules. Similar to numpy ndarray objects, dnnc.tensor objects have a data type and a shape. Unlike numpy, tensor, once created is kept fixed in size. This is because, DNNC is a compiler designed to produce efficient executables for smaller devices.
**Legends** ✔️: ToBeDone, ✅:Done, 🚫:Rejected
```python
✅a = numpy.array([1,2,3])
✅numpy.array([1,2]) #1D
✅numpy.array([[1,2],[10,20]]) #2D
# For complex types
🚫numpy.array([1,2], dtype=complex) #1D complex
# For randomized 3d array
🚫Array3d = numpy.random.randint(10, size=(3, 4, 5))
# generate uniformly distributed numbers
✅a = numpy.random.rand(3,2) #(3 rows, 2 cols)
# Create empty 2D array (2 rows, 3 columns)
✅a_empty = numpy.empty(2,3)
# Create 0 initiallized 2D array (3 rows, 2 columns)
✅numpy.zeros(3,2)
# Create 1 initiallized 2D array (3 rows, 2 columns)
✅numpy.ones(3,2)
# Create a range of elements
✅array = numpy.arange(3) # array will contain 0,1,2
# Create a Numpy array from Python sequence of elements
✅a = numpy.asarray([1,2])
# Create an array with values that are evenly spaced
✔️a = numpy.array(0,6,2) # create 0-5, 2 apart, returns [0,2,4]
# Ccreate an array where the values are linearly spaced between an interval numpy.linspace(first, last, number)
✔️a = numpy.linspace(0,10,5) # returns [0,2.5,5,7.5,10]
```
---
### Some advanced features (Array manipulation)
**NOTE** DNNC tensor allows manipulation in size/contents, prohibits operation that lead to chang in size/length of memory.
```python
# Add
a = [3,4,5]
🚫a = numpy.append(a, [1,2]) #returns [3,4,5,1,2]
#Join
🚫numpy.concatenate(a,b)
✔️numpy.stack(a,b)
✔️numpy.hstack(a,b)
✔️numpy.vstack(a,b)
# Delete
🚫a = numpy.delete(array,2) # 2 is going to be deleted from the array
# Sort
✔️numpy.sort(array1, axis=1, kind = 'quicksort', order ='column name')
# Deep copy
✔️new_array = numpy.copy(array)
```
---
### Array functions and attributes
##### Shape and Dimension
```python
# Shape
✅array = numpy.array([[..],[..]])
✅array.shape
# Reshape by setting shape property
✅array.shape = (1,2) # (1 row, 2 columns)
# resize(x,y) can also be used to resize an array
# Dimensions of an array:
array.dim
# Find length of each element of an array:
array.itemsize
```
##### Slicing
```python
✅array = numpy.arange(100)
# Get 3rd element:
✅array[2]
# Get items within indexes
✔️array[3:5] #3 is start, 5 is end
# Get 3-10 element, step size 4 increments:
✔️array[2:9:4]
# Get all elements from 2nd element onwards
✔️array[1:]
# Can also pass in N-Dimensional Index
✔️array[[0,1],[1,2]]
# Get all NAN elements
✔️array[numpy.isnan(array)]
# Using where()
✔️numpy.where(array > 5) # will return all elements that meet the criteria
```
##### Broadcasting
```python
# 5 rows, 3 columns array
✅bigger_array = arange(5,3)
# 5 rows, 1 column array
✅smaller_array = arange(5)
✔️final_array = bigger_array + smaller_array
```
---
### Mathematical functions
---
#### Can be done through DNNC opearators:
* **:heavy_check_mark: numpy.sin()**
* **:heavy_check_mark: numpy.cos()**
* **:heavy_check_mark: numpy.tan()**
* **:heavy_check_mark: numpy.sinh()**
* **:heavy_check_mark: numpy.cosh()**
* **:heavy_check_mark: numpy.tanh()**
* **:heavy_check_mark: numpy.arcsin()**
* **:heavy_check_mark: numpy.arccos()**
* **:heavy_check_mark: numpy.arctan()**
* **:heavy_check_mark: numpy.arcsinh()**
* **:heavy_check_mark: numpy.arccosh()**
* **:heavy_check_mark: numpy.arctanh()**
* **:heavy_check_mark: numpy.add()**
* **:heavy_check_mark: numpy.subtract()**
* **:heavy_check_mark: numpy.cross()**
* **:heavy_check_mark: numpy.divide()**
* **:x: numpy.power()**
* **:heavy_check_mark: numpy.round()**
* **:heavy_check_mark: numpy.floor()**
* **:heavy_check_mark: numpy.ceil()**
* **:heavy_check_mark: numpy.exp()**
* **:heavy_check_mark: numpy.log()**
* **:heavy_check_mark: numpy.sqrt()**
* **:heavy_check_mark: numpy.absolute()**
* **:x: numpy.clip()**
* **:x: numpy.convolve()**
---
#### Have to be implemented:
* **numpy.dot()**
> dot product of 2 arrays
* **numpy.inner()**
> inner product of 2 arrays
* **numpy.determinant()**
> determinant of an array
* **numpy.transpose()**
> permute the dimensions of matrix
* **numpy.inverse()**
> inverse of a matrix
* **numpy.solve()**
> solves matrix equation
* **numpy.multiply()**
> element wise multiplication of 2 arrays (not to be confused with matrix multiplication)
* **numpy.true_divide()**
> element wise division of 2 arrays (uses `/` in python)
* **numpy.floor_divide()**
> element wise division of 2 arrays (uses `//` in python)
* **numpy.degrees() / numpy.rad2deg()**
> radian to degree converter
* **numpy.radians() / numpy.deg2rad()**
> degree to radian converter
* **numpy.median()**
> Finds the median
* **numpy.average()**
> Finds average
* **numpy.mean()**
> Finds mean
* **numpy.var()**
> Finds variance
* **numpy.rint()**
> round elements of the array to the nearest integer
* **numpy.fix()**
> round elements of the array to the nearest integer towards zero
* **numpy.trunc()**
> returns the truncated value of the elements of array
* **numpy.log10()**
> return the base 10 logarithm of the input array, element-wise
* **numpy.log2()**
> return the base 10 logarithm of the input array, element-wise
* **numpy.expm1()**
> calculate exp(x) – 1 for all elements in the array
* **numpy.exp2()**
> calculate (2^p) for all p in the input array
* **numpy.logaddexp()**
> logarithm of the sum of exponentiations of the inputs
* **numpy.logaddexp2()**
> logarithm of the sum of exponentiations of the inputs in base-2
* **numpy.reciprocal()**
> calculate (1/x) for all x in the input array
* **numpy.positive()**
> make every element positive
* **numpy.negetive()**
> make every element negetive
* **numpy.remainder()**
> return element wise remainder of division
* **numpy.divmod()**
> return element-wise quotient and remainder simultaneously
* **numpy.isreal()**
> test element-wise whether it is a real number or not(not infinity or not Not a Number) and return the result as a boolean array
* **numpy.conj()**
> The conjugate of a complex number is obtained by changing the sign of its imaginary part. If the complex number is (2+5j) then its conjugate is (2-5j)
* **numpy.cbrt()**
> mathematical function helps user to calculate cube root of x for all x being the array elements
* **numpy.square()**
> return the non-negative square-root of an array, element-wise
* **numpy.maximum()**
> find the element-wise maximum of array elements
* **numpy.minimum()**
> find the element-wise minimum of array elements
* **numpy.interp()**
> returns the one-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x
* **numpy.nan_to_num()**
> replace NaN with zero and infinity with large finite numbers
* **numpy.real_if_close()**
> if complex input returns a real array if complex parts are close to zero
* **numpy.heaviside()**
> heaviside(x1, x2) = {0 if x1 < 0}, {x2 if x1 == 0}. {1 if x1 > 0}
---
## Resource:
* Basic Functions are taken from **[Medium](https://medium.com/fintechexplained/why-should-we-use-numpy-c14a4fb03ee9)**
* Mathematical functions are taken from **[GeeksForGeeks](https://www.geeksforgeeks.org/numpy-mathematical-function/)**
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
# This file is auto generated by tensor_op_gen.py
import common
import deepC.dnnc as dc
import numpy as np
import unittest
class tensorOperatorsGeneratedTest(unittest.TestCase):
def setUp(self):
self.np_bool_0_4 = np.arange(5).astype(np.bool)
self.np_bool_5_9 = np.arange(5,10).astype(np.bool)
self.np_int_0_4 = np.arange(5).astype(np.int)
self.np_int_5_9 = np.arange(5,10).astype(np.int)
self.np_float_0_4 = np.arange(5).astype(np.float)
self.np_float_5_9 = np.arange(5,10).astype(np.float)
self.np_double_0_4 = np.arange(5).astype(np.double)
self.np_double_5_9 = np.arange(5,10).astype(np.double)
self.dc_bool_0_4 = dc.arange(5).asTypeBool()
self.dc_bool_5_9 = dc.arange(5,10).asTypeBool()
self.dc_int_0_4 = dc.arange(5).asTypeInt()
self.dc_int_5_9 = dc.arange(5,10).asTypeInt()
self.dc_float_0_4 = dc.arange(5).asTypeFloat()
self.dc_float_5_9 = dc.arange(5,10).asTypeFloat()
self.dc_double_0_4 = dc.arange(5).asTypeDouble()
self.dc_double_5_9 = dc.arange(5,10).asTypeDouble()
# Assignment Add
# bool_tensor_1 += bool_scalar
def test_Assignment_Add_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4.copy()
temp_np += True
temp_dc = self.dc_bool_0_4.copy()
temp_dc += True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 += bool_tensor_2
def test_Assignment_Add_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4.copy()
temp_np += self.np_bool_5_9
temp_dc = self.dc_bool_0_4.copy()
temp_dc += self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 += bool_scalar
def test_Assignment_Add_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np += True
temp_dc = self.dc_int_0_4.copy()
temp_dc += True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 += bool_tensor_2
def test_Assignment_Add_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np += self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc += self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 += int_scalar
def test_Assignment_Add_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np += 5
temp_dc = self.dc_int_0_4.copy()
temp_dc += 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 += int_tensor_2
def test_Assignment_Add_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np += self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc += self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 += bool_scalar
def test_Assignment_Add_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np += True
temp_dc = self.dc_float_0_4.copy()
temp_dc += True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 += bool_tensor_2
def test_Assignment_Add_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np += self.np_bool_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc += self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 += float_scalar
def test_Assignment_Add_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np += 5.0
temp_dc = self.dc_float_0_4.copy()
temp_dc += 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 += float_tensor_2
def test_Assignment_Add_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np += self.np_float_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc += self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 += int_scalar
def test_Assignment_Add_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np += 5
temp_dc = self.dc_float_0_4.copy()
temp_dc += 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 += int_tensor_2
def test_Assignment_Add_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np += self.np_int_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc += self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Sub
# int_tensor_1 -= bool_scalar
def test_Assignment_Sub_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np -= True
temp_dc = self.dc_int_0_4.copy()
temp_dc -= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 -= bool_tensor_2
def test_Assignment_Sub_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np -= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc -= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 -= int_scalar
def test_Assignment_Sub_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np -= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc -= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 -= int_tensor_2
def test_Assignment_Sub_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np -= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc -= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 -= bool_scalar
def test_Assignment_Sub_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np -= True
temp_dc = self.dc_float_0_4.copy()
temp_dc -= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 -= bool_tensor_2
def test_Assignment_Sub_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np -= self.np_bool_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc -= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 -= float_scalar
def test_Assignment_Sub_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np -= 5.0
temp_dc = self.dc_float_0_4.copy()
temp_dc -= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 -= float_tensor_2
def test_Assignment_Sub_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np -= self.np_float_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc -= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 -= int_scalar
def test_Assignment_Sub_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np -= 5
temp_dc = self.dc_float_0_4.copy()
temp_dc -= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 -= int_tensor_2
def test_Assignment_Sub_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np -= self.np_int_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc -= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Mul
# bool_tensor_1 *= bool_scalar
def test_Assignment_Mul_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4.copy()
temp_np *= True
temp_dc = self.dc_bool_0_4.copy()
temp_dc *= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 *= bool_tensor_2
def test_Assignment_Mul_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4.copy()
temp_np *= self.np_bool_5_9
temp_dc = self.dc_bool_0_4.copy()
temp_dc *= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 *= bool_scalar
def test_Assignment_Mul_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np *= True
temp_dc = self.dc_int_0_4.copy()
temp_dc *= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 *= bool_tensor_2
def test_Assignment_Mul_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np *= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc *= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 *= int_scalar
def test_Assignment_Mul_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np *= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc *= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 *= int_tensor_2
def test_Assignment_Mul_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np *= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc *= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 *= bool_scalar
def test_Assignment_Mul_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np *= True
temp_dc = self.dc_float_0_4.copy()
temp_dc *= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 *= bool_tensor_2
def test_Assignment_Mul_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np *= self.np_bool_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc *= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 *= float_scalar
def test_Assignment_Mul_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np *= 5.0
temp_dc = self.dc_float_0_4.copy()
temp_dc *= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 *= float_tensor_2
def test_Assignment_Mul_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np *= self.np_float_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc *= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 *= int_scalar
def test_Assignment_Mul_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np *= 5
temp_dc = self.dc_float_0_4.copy()
temp_dc *= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 *= int_tensor_2
def test_Assignment_Mul_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np *= self.np_int_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc *= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment True_Div
# float_tensor_1 /= bool_scalar
def test_Assignment_True_Div_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np /= True
temp_dc = self.dc_float_0_4.copy()
temp_dc /= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 /= bool_tensor_2
def test_Assignment_True_Div_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np /= self.np_bool_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc /= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Floor_Div
# int_tensor_1 //= bool_scalar
def test_Assignment_Floor_Div_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np //= True
temp_dc = self.dc_int_0_4.copy()
temp_dc //= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 //= bool_tensor_2
def test_Assignment_Floor_Div_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np //= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc //= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 //= int_scalar
def test_Assignment_Floor_Div_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np //= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc //= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 //= int_tensor_2
def test_Assignment_Floor_Div_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np //= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc //= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 //= bool_scalar
def test_Assignment_Floor_Div_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np //= True
temp_dc = self.dc_float_0_4.copy()
temp_dc //= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 //= bool_tensor_2
def test_Assignment_Floor_Div_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np //= self.np_bool_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc //= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 //= float_scalar
def test_Assignment_Floor_Div_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np //= 5.0
temp_dc = self.dc_float_0_4.copy()
temp_dc //= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 //= float_tensor_2
def test_Assignment_Floor_Div_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np //= self.np_float_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc //= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 //= int_scalar
def test_Assignment_Floor_Div_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np //= 5
temp_dc = self.dc_float_0_4.copy()
temp_dc //= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 //= int_tensor_2
def test_Assignment_Floor_Div_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np //= self.np_int_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc //= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Lshift
# int_tensor_1 <<= bool_scalar
def test_Assignment_Lshift_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np <<= True
temp_dc = self.dc_int_0_4.copy()
temp_dc <<= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <<= int_scalar
def test_Assignment_Lshift_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np <<= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc <<= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <<= int_tensor_2
def test_Assignment_Lshift_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np <<= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc <<= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Rshift
# int_tensor_1 >>= bool_scalar
def test_Assignment_Rshift_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np >>= True
temp_dc = self.dc_int_0_4.copy()
temp_dc >>= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >>= int_scalar
def test_Assignment_Rshift_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np >>= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc >>= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >>= int_tensor_2
def test_Assignment_Rshift_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np >>= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc >>= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Power
# int_tensor_1 **= bool_scalar
def test_Assignment_Power_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np **= True
temp_dc = self.dc_int_0_4.copy()
temp_dc **= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 **= bool_tensor_2
def test_Assignment_Power_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np **= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc **= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 **= int_scalar
def test_Assignment_Power_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np **= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc **= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 **= int_tensor_2
def test_Assignment_Power_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np **= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc **= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 **= bool_scalar
def test_Assignment_Power_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np **= True
temp_dc = self.dc_float_0_4.copy()
temp_dc **= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 **= bool_tensor_2
def test_Assignment_Power_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np **= self.np_bool_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc **= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 **= float_scalar
def test_Assignment_Power_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np **= 5.0
temp_dc = self.dc_float_0_4.copy()
temp_dc **= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 **= float_tensor_2
def test_Assignment_Power_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np **= self.np_float_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc **= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 **= int_scalar
def test_Assignment_Power_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4.copy()
temp_np **= 5
temp_dc = self.dc_float_0_4.copy()
temp_dc **= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 **= int_tensor_2
def test_Assignment_Power_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4.copy()
temp_np **= self.np_int_5_9
temp_dc = self.dc_float_0_4.copy()
temp_dc **= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment And
# bool_tensor_1 &= bool_scalar
def test_Assignment_And_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4.copy()
temp_np &= True
temp_dc = self.dc_bool_0_4.copy()
temp_dc &= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 &= bool_tensor_2
def test_Assignment_And_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4.copy()
temp_np &= self.np_bool_5_9
temp_dc = self.dc_bool_0_4.copy()
temp_dc &= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 &= bool_scalar
def test_Assignment_And_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np &= True
temp_dc = self.dc_int_0_4.copy()
temp_dc &= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 &= bool_tensor_2
def test_Assignment_And_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np &= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc &= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 &= int_scalar
def test_Assignment_And_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np &= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc &= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 &= int_tensor_2
def test_Assignment_And_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np &= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc &= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Or
# bool_tensor_1 |= bool_scalar
def test_Assignment_Or_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4.copy()
temp_np |= True
temp_dc = self.dc_bool_0_4.copy()
temp_dc |= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 |= bool_tensor_2
def test_Assignment_Or_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4.copy()
temp_np |= self.np_bool_5_9
temp_dc = self.dc_bool_0_4.copy()
temp_dc |= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 |= bool_scalar
def test_Assignment_Or_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np |= True
temp_dc = self.dc_int_0_4.copy()
temp_dc |= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 |= bool_tensor_2
def test_Assignment_Or_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np |= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc |= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 |= int_scalar
def test_Assignment_Or_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np |= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc |= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 |= int_tensor_2
def test_Assignment_Or_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np |= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc |= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Assignment Xor
# bool_tensor_1 ^= bool_scalar
def test_Assignment_Xor_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4.copy()
temp_np ^= True
temp_dc = self.dc_bool_0_4.copy()
temp_dc ^= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ^= bool_tensor_2
def test_Assignment_Xor_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4.copy()
temp_np ^= self.np_bool_5_9
temp_dc = self.dc_bool_0_4.copy()
temp_dc ^= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^= bool_scalar
def test_Assignment_Xor_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np ^= True
temp_dc = self.dc_int_0_4.copy()
temp_dc ^= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^= bool_tensor_2
def test_Assignment_Xor_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np ^= self.np_bool_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc ^= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^= int_scalar
def test_Assignment_Xor_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4.copy()
temp_np ^= 5
temp_dc = self.dc_int_0_4.copy()
temp_dc ^= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^= int_tensor_2
def test_Assignment_Xor_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4.copy()
temp_np ^= self.np_int_5_9
temp_dc = self.dc_int_0_4.copy()
temp_dc ^= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Add
# bool_tensor_1 + bool_scalar
def test_Binary_Add_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 + True
temp_dc = self.dc_bool_0_4 + True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 + bool_tensor_2
def test_Binary_Add_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 + self.np_bool_5_9
temp_dc = self.dc_bool_0_4 + self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 + float_scalar
def test_Binary_Add_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 + 5.0
temp_dc = self.dc_bool_0_4 + 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 + float_tensor_2
def test_Binary_Add_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 + self.np_float_5_9
temp_dc = self.dc_bool_0_4 + self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 + int_scalar
def test_Binary_Add_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 + 5
temp_dc = self.dc_bool_0_4 + 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 + int_tensor_2
def test_Binary_Add_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 + self.np_int_5_9
temp_dc = self.dc_bool_0_4 + self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 + bool_scalar
def test_Binary_Add_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 + True
temp_dc = self.dc_int_0_4 + True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 + bool_tensor_2
def test_Binary_Add_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 + self.np_bool_5_9
temp_dc = self.dc_int_0_4 + self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 + float_scalar
def test_Binary_Add_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 + 5.0
temp_dc = self.dc_int_0_4 + 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 + float_tensor_2
def test_Binary_Add_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 + self.np_float_5_9
temp_dc = self.dc_int_0_4 + self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 + int_scalar
def test_Binary_Add_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 + 5
temp_dc = self.dc_int_0_4 + 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 + int_tensor_2
def test_Binary_Add_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 + self.np_int_5_9
temp_dc = self.dc_int_0_4 + self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 + bool_scalar
def test_Binary_Add_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 + True
temp_dc = self.dc_float_0_4 + True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 + bool_tensor_2
def test_Binary_Add_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 + self.np_bool_5_9
temp_dc = self.dc_float_0_4 + self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 + float_scalar
def test_Binary_Add_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 + 5.0
temp_dc = self.dc_float_0_4 + 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 + float_tensor_2
def test_Binary_Add_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 + self.np_float_5_9
temp_dc = self.dc_float_0_4 + self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 + int_scalar
def test_Binary_Add_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 + 5
temp_dc = self.dc_float_0_4 + 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 + int_tensor_2
def test_Binary_Add_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 + self.np_int_5_9
temp_dc = self.dc_float_0_4 + self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Sub
# bool_tensor_1 - float_scalar
def test_Binary_Sub_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 - 5.0
temp_dc = self.dc_bool_0_4 - 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 - float_tensor_2
def test_Binary_Sub_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 - self.np_float_5_9
temp_dc = self.dc_bool_0_4 - self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 - int_scalar
def test_Binary_Sub_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 - 5
temp_dc = self.dc_bool_0_4 - 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 - int_tensor_2
def test_Binary_Sub_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 - self.np_int_5_9
temp_dc = self.dc_bool_0_4 - self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 - bool_scalar
def test_Binary_Sub_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 - True
temp_dc = self.dc_int_0_4 - True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 - bool_tensor_2
def test_Binary_Sub_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 - self.np_bool_5_9
temp_dc = self.dc_int_0_4 - self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 - float_scalar
def test_Binary_Sub_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 - 5.0
temp_dc = self.dc_int_0_4 - 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 - float_tensor_2
def test_Binary_Sub_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 - self.np_float_5_9
temp_dc = self.dc_int_0_4 - self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 - int_scalar
def test_Binary_Sub_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 - 5
temp_dc = self.dc_int_0_4 - 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 - int_tensor_2
def test_Binary_Sub_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 - self.np_int_5_9
temp_dc = self.dc_int_0_4 - self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 - bool_scalar
def test_Binary_Sub_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 - True
temp_dc = self.dc_float_0_4 - True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 - bool_tensor_2
def test_Binary_Sub_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 - self.np_bool_5_9
temp_dc = self.dc_float_0_4 - self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 - float_scalar
def test_Binary_Sub_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 - 5.0
temp_dc = self.dc_float_0_4 - 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 - float_tensor_2
def test_Binary_Sub_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 - self.np_float_5_9
temp_dc = self.dc_float_0_4 - self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 - int_scalar
def test_Binary_Sub_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 - 5
temp_dc = self.dc_float_0_4 - 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 - int_tensor_2
def test_Binary_Sub_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 - self.np_int_5_9
temp_dc = self.dc_float_0_4 - self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Mul
# bool_tensor_1 * bool_scalar
def test_Binary_Mul_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 * True
temp_dc = self.dc_bool_0_4 * True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 * bool_tensor_2
def test_Binary_Mul_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 * self.np_bool_5_9
temp_dc = self.dc_bool_0_4 * self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 * float_scalar
def test_Binary_Mul_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 * 5.0
temp_dc = self.dc_bool_0_4 * 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 * float_tensor_2
def test_Binary_Mul_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 * self.np_float_5_9
temp_dc = self.dc_bool_0_4 * self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 * int_scalar
def test_Binary_Mul_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 * 5
temp_dc = self.dc_bool_0_4 * 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 * int_tensor_2
def test_Binary_Mul_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 * self.np_int_5_9
temp_dc = self.dc_bool_0_4 * self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 * bool_scalar
def test_Binary_Mul_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 * True
temp_dc = self.dc_int_0_4 * True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 * bool_tensor_2
def test_Binary_Mul_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 * self.np_bool_5_9
temp_dc = self.dc_int_0_4 * self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 * float_scalar
def test_Binary_Mul_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 * 5.0
temp_dc = self.dc_int_0_4 * 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 * float_tensor_2
def test_Binary_Mul_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 * self.np_float_5_9
temp_dc = self.dc_int_0_4 * self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 * int_scalar
def test_Binary_Mul_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 * 5
temp_dc = self.dc_int_0_4 * 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 * int_tensor_2
def test_Binary_Mul_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 * self.np_int_5_9
temp_dc = self.dc_int_0_4 * self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 * bool_scalar
def test_Binary_Mul_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 * True
temp_dc = self.dc_float_0_4 * True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 * bool_tensor_2
def test_Binary_Mul_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 * self.np_bool_5_9
temp_dc = self.dc_float_0_4 * self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 * float_scalar
def test_Binary_Mul_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 * 5.0
temp_dc = self.dc_float_0_4 * 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 * float_tensor_2
def test_Binary_Mul_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 * self.np_float_5_9
temp_dc = self.dc_float_0_4 * self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 * int_scalar
def test_Binary_Mul_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 * 5
temp_dc = self.dc_float_0_4 * 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 * int_tensor_2
def test_Binary_Mul_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 * self.np_int_5_9
temp_dc = self.dc_float_0_4 * self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary True_Div
# bool_tensor_1 / bool_scalar
def test_Binary_True_Div_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 / True
temp_dc = self.dc_bool_0_4 / True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 / bool_tensor_2
def test_Binary_True_Div_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 / self.np_bool_5_9
temp_dc = self.dc_bool_0_4 / self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 / bool_scalar
def test_Binary_True_Div_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 / True
temp_dc = self.dc_int_0_4 / True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 / bool_tensor_2
def test_Binary_True_Div_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 / self.np_bool_5_9
temp_dc = self.dc_int_0_4 / self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 / bool_scalar
def test_Binary_True_Div_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 / True
temp_dc = self.dc_float_0_4 / True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 / bool_tensor_2
def test_Binary_True_Div_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 / self.np_bool_5_9
temp_dc = self.dc_float_0_4 / self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Floor_Div
# bool_tensor_1 // bool_scalar
def test_Binary_Floor_Div_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 // True
temp_dc = self.dc_bool_0_4 // True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 // bool_tensor_2
def test_Binary_Floor_Div_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 // self.np_bool_5_9
temp_dc = self.dc_bool_0_4 // self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 // float_scalar
def test_Binary_Floor_Div_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 // 5.0
temp_dc = self.dc_bool_0_4 // 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 // float_tensor_2
def test_Binary_Floor_Div_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 // self.np_float_5_9
temp_dc = self.dc_bool_0_4 // self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 // int_scalar
def test_Binary_Floor_Div_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 // 5
temp_dc = self.dc_bool_0_4 // 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 // int_tensor_2
def test_Binary_Floor_Div_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 // self.np_int_5_9
temp_dc = self.dc_bool_0_4 // self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 // bool_scalar
def test_Binary_Floor_Div_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 // True
temp_dc = self.dc_int_0_4 // True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 // bool_tensor_2
def test_Binary_Floor_Div_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 // self.np_bool_5_9
temp_dc = self.dc_int_0_4 // self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 // float_scalar
def test_Binary_Floor_Div_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 // 5.0
temp_dc = self.dc_int_0_4 // 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 // float_tensor_2
def test_Binary_Floor_Div_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 // self.np_float_5_9
temp_dc = self.dc_int_0_4 // self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 // int_scalar
def test_Binary_Floor_Div_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 // 5
temp_dc = self.dc_int_0_4 // 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 // int_tensor_2
def test_Binary_Floor_Div_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 // self.np_int_5_9
temp_dc = self.dc_int_0_4 // self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 // bool_scalar
def test_Binary_Floor_Div_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 // True
temp_dc = self.dc_float_0_4 // True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 // bool_tensor_2
def test_Binary_Floor_Div_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 // self.np_bool_5_9
temp_dc = self.dc_float_0_4 // self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 // float_scalar
def test_Binary_Floor_Div_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 // 5.0
temp_dc = self.dc_float_0_4 // 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 // float_tensor_2
def test_Binary_Floor_Div_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 // self.np_float_5_9
temp_dc = self.dc_float_0_4 // self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 // int_scalar
def test_Binary_Floor_Div_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 // 5
temp_dc = self.dc_float_0_4 // 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 // int_tensor_2
def test_Binary_Floor_Div_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 // self.np_int_5_9
temp_dc = self.dc_float_0_4 // self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Mod
# bool_tensor_1 % bool_scalar
def test_Binary_Mod_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 % True
temp_dc = self.dc_bool_0_4 % True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 % bool_tensor_2
def test_Binary_Mod_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 % self.np_bool_5_9
temp_dc = self.dc_bool_0_4 % self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 % float_scalar
def test_Binary_Mod_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 % 5.0
temp_dc = self.dc_bool_0_4 % 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 % float_tensor_2
def test_Binary_Mod_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 % self.np_float_5_9
temp_dc = self.dc_bool_0_4 % self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 % int_scalar
def test_Binary_Mod_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 % 5
temp_dc = self.dc_bool_0_4 % 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 % int_tensor_2
def test_Binary_Mod_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 % self.np_int_5_9
temp_dc = self.dc_bool_0_4 % self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 % bool_scalar
def test_Binary_Mod_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 % True
temp_dc = self.dc_int_0_4 % True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 % bool_tensor_2
def test_Binary_Mod_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 % self.np_bool_5_9
temp_dc = self.dc_int_0_4 % self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 % float_scalar
def test_Binary_Mod_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 % 5.0
temp_dc = self.dc_int_0_4 % 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 % float_tensor_2
def test_Binary_Mod_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 % self.np_float_5_9
temp_dc = self.dc_int_0_4 % self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 % int_scalar
def test_Binary_Mod_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 % 5
temp_dc = self.dc_int_0_4 % 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 % int_tensor_2
def test_Binary_Mod_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 % self.np_int_5_9
temp_dc = self.dc_int_0_4 % self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 % bool_scalar
def test_Binary_Mod_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 % True
temp_dc = self.dc_float_0_4 % True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 % bool_tensor_2
def test_Binary_Mod_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 % self.np_bool_5_9
temp_dc = self.dc_float_0_4 % self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 % float_scalar
def test_Binary_Mod_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 % 5.0
temp_dc = self.dc_float_0_4 % 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 % float_tensor_2
def test_Binary_Mod_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 % self.np_float_5_9
temp_dc = self.dc_float_0_4 % self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 % int_scalar
def test_Binary_Mod_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 % 5
temp_dc = self.dc_float_0_4 % 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 % int_tensor_2
def test_Binary_Mod_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 % self.np_int_5_9
temp_dc = self.dc_float_0_4 % self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Lshift
# bool_tensor_1 << bool_scalar
def test_Binary_Lshift_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 << True
temp_dc = self.dc_bool_0_4 << True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 << int_scalar
def test_Binary_Lshift_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 << 5
temp_dc = self.dc_bool_0_4 << 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 << int_tensor_2
def test_Binary_Lshift_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 << self.np_int_5_9
temp_dc = self.dc_bool_0_4 << self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 << bool_scalar
def test_Binary_Lshift_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 << True
temp_dc = self.dc_int_0_4 << True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 << int_scalar
def test_Binary_Lshift_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 << 5
temp_dc = self.dc_int_0_4 << 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 << int_tensor_2
def test_Binary_Lshift_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 << self.np_int_5_9
temp_dc = self.dc_int_0_4 << self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Rshift
# bool_tensor_1 >> bool_scalar
def test_Binary_Rshift_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 >> True
temp_dc = self.dc_bool_0_4 >> True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >> int_scalar
def test_Binary_Rshift_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 >> 5
temp_dc = self.dc_bool_0_4 >> 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >> int_tensor_2
def test_Binary_Rshift_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 >> self.np_int_5_9
temp_dc = self.dc_bool_0_4 >> self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >> bool_scalar
def test_Binary_Rshift_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 >> True
temp_dc = self.dc_int_0_4 >> True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >> int_scalar
def test_Binary_Rshift_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 >> 5
temp_dc = self.dc_int_0_4 >> 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >> int_tensor_2
def test_Binary_Rshift_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 >> self.np_int_5_9
temp_dc = self.dc_int_0_4 >> self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Power
# bool_tensor_1 ** float_scalar
def test_Binary_Power_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 ** 5.0
temp_dc = self.dc_bool_0_4 ** 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ** float_tensor_2
def test_Binary_Power_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 ** self.np_float_5_9
temp_dc = self.dc_bool_0_4 ** self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ** int_scalar
def test_Binary_Power_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 ** 5
temp_dc = self.dc_bool_0_4 ** 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ** int_tensor_2
def test_Binary_Power_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 ** self.np_int_5_9
temp_dc = self.dc_bool_0_4 ** self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ** bool_scalar
def test_Binary_Power_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 ** True
temp_dc = self.dc_int_0_4 ** True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ** bool_tensor_2
def test_Binary_Power_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 ** self.np_bool_5_9
temp_dc = self.dc_int_0_4 ** self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ** float_scalar
def test_Binary_Power_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 ** 5.0
temp_dc = self.dc_int_0_4 ** 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ** float_tensor_2
def test_Binary_Power_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 ** self.np_float_5_9
temp_dc = self.dc_int_0_4 ** self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ** int_scalar
def test_Binary_Power_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 ** 5
temp_dc = self.dc_int_0_4 ** 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ** int_tensor_2
def test_Binary_Power_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 ** self.np_int_5_9
temp_dc = self.dc_int_0_4 ** self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 ** bool_scalar
def test_Binary_Power_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 ** True
temp_dc = self.dc_float_0_4 ** True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 ** bool_tensor_2
def test_Binary_Power_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 ** self.np_bool_5_9
temp_dc = self.dc_float_0_4 ** self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 ** float_scalar
def test_Binary_Power_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 ** 5.0
temp_dc = self.dc_float_0_4 ** 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 ** float_tensor_2
def test_Binary_Power_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 ** self.np_float_5_9
temp_dc = self.dc_float_0_4 ** self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 ** int_scalar
def test_Binary_Power_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 ** 5
temp_dc = self.dc_float_0_4 ** 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 ** int_tensor_2
def test_Binary_Power_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 ** self.np_int_5_9
temp_dc = self.dc_float_0_4 ** self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary And
# bool_tensor_1 & bool_scalar
def test_Binary_And_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 & True
temp_dc = self.dc_bool_0_4 & True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 & bool_tensor_2
def test_Binary_And_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 & self.np_bool_5_9
temp_dc = self.dc_bool_0_4 & self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 & int_scalar
def test_Binary_And_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 & 5
temp_dc = self.dc_bool_0_4 & 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 & int_tensor_2
def test_Binary_And_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 & self.np_int_5_9
temp_dc = self.dc_bool_0_4 & self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 & bool_scalar
def test_Binary_And_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 & True
temp_dc = self.dc_int_0_4 & True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 & bool_tensor_2
def test_Binary_And_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 & self.np_bool_5_9
temp_dc = self.dc_int_0_4 & self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 & int_scalar
def test_Binary_And_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 & 5
temp_dc = self.dc_int_0_4 & 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 & int_tensor_2
def test_Binary_And_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 & self.np_int_5_9
temp_dc = self.dc_int_0_4 & self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Or
# bool_tensor_1 | bool_scalar
def test_Binary_Or_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 | True
temp_dc = self.dc_bool_0_4 | True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 | bool_tensor_2
def test_Binary_Or_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 | self.np_bool_5_9
temp_dc = self.dc_bool_0_4 | self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 | int_scalar
def test_Binary_Or_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 | 5
temp_dc = self.dc_bool_0_4 | 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 | int_tensor_2
def test_Binary_Or_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 | self.np_int_5_9
temp_dc = self.dc_bool_0_4 | self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 | bool_scalar
def test_Binary_Or_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 | True
temp_dc = self.dc_int_0_4 | True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 | bool_tensor_2
def test_Binary_Or_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 | self.np_bool_5_9
temp_dc = self.dc_int_0_4 | self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 | int_scalar
def test_Binary_Or_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 | 5
temp_dc = self.dc_int_0_4 | 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 | int_tensor_2
def test_Binary_Or_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 | self.np_int_5_9
temp_dc = self.dc_int_0_4 | self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Binary Xor
# bool_tensor_1 ^ bool_scalar
def test_Binary_Xor_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 ^ True
temp_dc = self.dc_bool_0_4 ^ True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ^ bool_tensor_2
def test_Binary_Xor_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 ^ self.np_bool_5_9
temp_dc = self.dc_bool_0_4 ^ self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ^ int_scalar
def test_Binary_Xor_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 ^ 5
temp_dc = self.dc_bool_0_4 ^ 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 ^ int_tensor_2
def test_Binary_Xor_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 ^ self.np_int_5_9
temp_dc = self.dc_bool_0_4 ^ self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^ bool_scalar
def test_Binary_Xor_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 ^ True
temp_dc = self.dc_int_0_4 ^ True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^ bool_tensor_2
def test_Binary_Xor_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 ^ self.np_bool_5_9
temp_dc = self.dc_int_0_4 ^ self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^ int_scalar
def test_Binary_Xor_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 ^ 5
temp_dc = self.dc_int_0_4 ^ 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 ^ int_tensor_2
def test_Binary_Xor_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 ^ self.np_int_5_9
temp_dc = self.dc_int_0_4 ^ self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Unary Pos
# + int_tensor_1
def test_Unary_Pos_int_tensor_1 (self):
temp_np = + self.np_int_0_4
temp_dc = + self.dc_int_0_4
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# + float_tensor_1
def test_Unary_Pos_float_tensor_1 (self):
temp_np = + self.np_float_0_4
temp_dc = + self.dc_float_0_4
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Unary Neg
# - int_tensor_1
def test_Unary_Neg_int_tensor_1 (self):
temp_np = - self.np_int_0_4
temp_dc = - self.dc_int_0_4
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# - float_tensor_1
def test_Unary_Neg_float_tensor_1 (self):
temp_np = - self.np_float_0_4
temp_dc = - self.dc_float_0_4
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Comparison Greater_Than
# bool_tensor_1 > bool_scalar
def test_Comparison_Greater_Than_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 > True
temp_dc = self.dc_bool_0_4 > True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 > bool_tensor_2
def test_Comparison_Greater_Than_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 > self.np_bool_5_9
temp_dc = self.dc_bool_0_4 > self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 > float_scalar
def test_Comparison_Greater_Than_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 > 5.0
temp_dc = self.dc_bool_0_4 > 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 > float_tensor_2
def test_Comparison_Greater_Than_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 > self.np_float_5_9
temp_dc = self.dc_bool_0_4 > self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 > int_scalar
def test_Comparison_Greater_Than_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 > 5
temp_dc = self.dc_bool_0_4 > 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 > int_tensor_2
def test_Comparison_Greater_Than_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 > self.np_int_5_9
temp_dc = self.dc_bool_0_4 > self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 > bool_scalar
def test_Comparison_Greater_Than_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 > True
temp_dc = self.dc_int_0_4 > True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 > bool_tensor_2
def test_Comparison_Greater_Than_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 > self.np_bool_5_9
temp_dc = self.dc_int_0_4 > self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 > float_scalar
def test_Comparison_Greater_Than_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 > 5.0
temp_dc = self.dc_int_0_4 > 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 > float_tensor_2
def test_Comparison_Greater_Than_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 > self.np_float_5_9
temp_dc = self.dc_int_0_4 > self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 > int_scalar
def test_Comparison_Greater_Than_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 > 5
temp_dc = self.dc_int_0_4 > 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 > int_tensor_2
def test_Comparison_Greater_Than_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 > self.np_int_5_9
temp_dc = self.dc_int_0_4 > self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 > bool_scalar
def test_Comparison_Greater_Than_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 > True
temp_dc = self.dc_float_0_4 > True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 > bool_tensor_2
def test_Comparison_Greater_Than_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 > self.np_bool_5_9
temp_dc = self.dc_float_0_4 > self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 > float_scalar
def test_Comparison_Greater_Than_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 > 5.0
temp_dc = self.dc_float_0_4 > 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 > float_tensor_2
def test_Comparison_Greater_Than_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 > self.np_float_5_9
temp_dc = self.dc_float_0_4 > self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 > int_scalar
def test_Comparison_Greater_Than_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 > 5
temp_dc = self.dc_float_0_4 > 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 > int_tensor_2
def test_Comparison_Greater_Than_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 > self.np_int_5_9
temp_dc = self.dc_float_0_4 > self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Comparison Greater_Equal
# bool_tensor_1 >= bool_scalar
def test_Comparison_Greater_Equal_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 >= True
temp_dc = self.dc_bool_0_4 >= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >= bool_tensor_2
def test_Comparison_Greater_Equal_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 >= self.np_bool_5_9
temp_dc = self.dc_bool_0_4 >= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >= float_scalar
def test_Comparison_Greater_Equal_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 >= 5.0
temp_dc = self.dc_bool_0_4 >= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >= float_tensor_2
def test_Comparison_Greater_Equal_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 >= self.np_float_5_9
temp_dc = self.dc_bool_0_4 >= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >= int_scalar
def test_Comparison_Greater_Equal_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 >= 5
temp_dc = self.dc_bool_0_4 >= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 >= int_tensor_2
def test_Comparison_Greater_Equal_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 >= self.np_int_5_9
temp_dc = self.dc_bool_0_4 >= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >= bool_scalar
def test_Comparison_Greater_Equal_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 >= True
temp_dc = self.dc_int_0_4 >= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >= bool_tensor_2
def test_Comparison_Greater_Equal_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 >= self.np_bool_5_9
temp_dc = self.dc_int_0_4 >= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >= float_scalar
def test_Comparison_Greater_Equal_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 >= 5.0
temp_dc = self.dc_int_0_4 >= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >= float_tensor_2
def test_Comparison_Greater_Equal_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 >= self.np_float_5_9
temp_dc = self.dc_int_0_4 >= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >= int_scalar
def test_Comparison_Greater_Equal_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 >= 5
temp_dc = self.dc_int_0_4 >= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 >= int_tensor_2
def test_Comparison_Greater_Equal_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 >= self.np_int_5_9
temp_dc = self.dc_int_0_4 >= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 >= bool_scalar
def test_Comparison_Greater_Equal_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 >= True
temp_dc = self.dc_float_0_4 >= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 >= bool_tensor_2
def test_Comparison_Greater_Equal_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 >= self.np_bool_5_9
temp_dc = self.dc_float_0_4 >= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 >= float_scalar
def test_Comparison_Greater_Equal_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 >= 5.0
temp_dc = self.dc_float_0_4 >= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 >= float_tensor_2
def test_Comparison_Greater_Equal_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 >= self.np_float_5_9
temp_dc = self.dc_float_0_4 >= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 >= int_scalar
def test_Comparison_Greater_Equal_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 >= 5
temp_dc = self.dc_float_0_4 >= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 >= int_tensor_2
def test_Comparison_Greater_Equal_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 >= self.np_int_5_9
temp_dc = self.dc_float_0_4 >= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Comparison Less_Than
# bool_tensor_1 < bool_scalar
def test_Comparison_Less_Than_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 < True
temp_dc = self.dc_bool_0_4 < True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 < bool_tensor_2
def test_Comparison_Less_Than_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 < self.np_bool_5_9
temp_dc = self.dc_bool_0_4 < self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 < float_scalar
def test_Comparison_Less_Than_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 < 5.0
temp_dc = self.dc_bool_0_4 < 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 < float_tensor_2
def test_Comparison_Less_Than_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 < self.np_float_5_9
temp_dc = self.dc_bool_0_4 < self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 < int_scalar
def test_Comparison_Less_Than_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 < 5
temp_dc = self.dc_bool_0_4 < 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 < int_tensor_2
def test_Comparison_Less_Than_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 < self.np_int_5_9
temp_dc = self.dc_bool_0_4 < self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 < bool_scalar
def test_Comparison_Less_Than_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 < True
temp_dc = self.dc_int_0_4 < True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 < bool_tensor_2
def test_Comparison_Less_Than_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 < self.np_bool_5_9
temp_dc = self.dc_int_0_4 < self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 < float_scalar
def test_Comparison_Less_Than_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 < 5.0
temp_dc = self.dc_int_0_4 < 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 < float_tensor_2
def test_Comparison_Less_Than_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 < self.np_float_5_9
temp_dc = self.dc_int_0_4 < self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 < int_scalar
def test_Comparison_Less_Than_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 < 5
temp_dc = self.dc_int_0_4 < 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 < int_tensor_2
def test_Comparison_Less_Than_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 < self.np_int_5_9
temp_dc = self.dc_int_0_4 < self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 < bool_scalar
def test_Comparison_Less_Than_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 < True
temp_dc = self.dc_float_0_4 < True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 < bool_tensor_2
def test_Comparison_Less_Than_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 < self.np_bool_5_9
temp_dc = self.dc_float_0_4 < self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 < float_scalar
def test_Comparison_Less_Than_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 < 5.0
temp_dc = self.dc_float_0_4 < 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 < float_tensor_2
def test_Comparison_Less_Than_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 < self.np_float_5_9
temp_dc = self.dc_float_0_4 < self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 < int_scalar
def test_Comparison_Less_Than_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 < 5
temp_dc = self.dc_float_0_4 < 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 < int_tensor_2
def test_Comparison_Less_Than_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 < self.np_int_5_9
temp_dc = self.dc_float_0_4 < self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Comparison Less_Equal
# bool_tensor_1 <= bool_scalar
def test_Comparison_Less_Equal_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 <= True
temp_dc = self.dc_bool_0_4 <= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 <= bool_tensor_2
def test_Comparison_Less_Equal_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 <= self.np_bool_5_9
temp_dc = self.dc_bool_0_4 <= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 <= float_scalar
def test_Comparison_Less_Equal_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 <= 5.0
temp_dc = self.dc_bool_0_4 <= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 <= float_tensor_2
def test_Comparison_Less_Equal_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 <= self.np_float_5_9
temp_dc = self.dc_bool_0_4 <= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 <= int_scalar
def test_Comparison_Less_Equal_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 <= 5
temp_dc = self.dc_bool_0_4 <= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 <= int_tensor_2
def test_Comparison_Less_Equal_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 <= self.np_int_5_9
temp_dc = self.dc_bool_0_4 <= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <= bool_scalar
def test_Comparison_Less_Equal_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 <= True
temp_dc = self.dc_int_0_4 <= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <= bool_tensor_2
def test_Comparison_Less_Equal_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 <= self.np_bool_5_9
temp_dc = self.dc_int_0_4 <= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <= float_scalar
def test_Comparison_Less_Equal_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 <= 5.0
temp_dc = self.dc_int_0_4 <= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <= float_tensor_2
def test_Comparison_Less_Equal_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 <= self.np_float_5_9
temp_dc = self.dc_int_0_4 <= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <= int_scalar
def test_Comparison_Less_Equal_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 <= 5
temp_dc = self.dc_int_0_4 <= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 <= int_tensor_2
def test_Comparison_Less_Equal_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 <= self.np_int_5_9
temp_dc = self.dc_int_0_4 <= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 <= bool_scalar
def test_Comparison_Less_Equal_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 <= True
temp_dc = self.dc_float_0_4 <= True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 <= bool_tensor_2
def test_Comparison_Less_Equal_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 <= self.np_bool_5_9
temp_dc = self.dc_float_0_4 <= self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 <= float_scalar
def test_Comparison_Less_Equal_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 <= 5.0
temp_dc = self.dc_float_0_4 <= 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 <= float_tensor_2
def test_Comparison_Less_Equal_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 <= self.np_float_5_9
temp_dc = self.dc_float_0_4 <= self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 <= int_scalar
def test_Comparison_Less_Equal_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 <= 5
temp_dc = self.dc_float_0_4 <= 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 <= int_tensor_2
def test_Comparison_Less_Equal_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 <= self.np_int_5_9
temp_dc = self.dc_float_0_4 <= self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Comparison Equal
# bool_tensor_1 == bool_scalar
def test_Comparison_Equal_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 == True
temp_dc = self.dc_bool_0_4 == True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 == bool_tensor_2
def test_Comparison_Equal_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 == self.np_bool_5_9
temp_dc = self.dc_bool_0_4 == self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 == float_scalar
def test_Comparison_Equal_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 == 5.0
temp_dc = self.dc_bool_0_4 == 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 == float_tensor_2
def test_Comparison_Equal_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 == self.np_float_5_9
temp_dc = self.dc_bool_0_4 == self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 == int_scalar
def test_Comparison_Equal_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 == 5
temp_dc = self.dc_bool_0_4 == 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 == int_tensor_2
def test_Comparison_Equal_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 == self.np_int_5_9
temp_dc = self.dc_bool_0_4 == self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 == bool_scalar
def test_Comparison_Equal_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 == True
temp_dc = self.dc_int_0_4 == True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 == bool_tensor_2
def test_Comparison_Equal_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 == self.np_bool_5_9
temp_dc = self.dc_int_0_4 == self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 == float_scalar
def test_Comparison_Equal_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 == 5.0
temp_dc = self.dc_int_0_4 == 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 == float_tensor_2
def test_Comparison_Equal_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 == self.np_float_5_9
temp_dc = self.dc_int_0_4 == self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 == int_scalar
def test_Comparison_Equal_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 == 5
temp_dc = self.dc_int_0_4 == 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 == int_tensor_2
def test_Comparison_Equal_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 == self.np_int_5_9
temp_dc = self.dc_int_0_4 == self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 == bool_scalar
def test_Comparison_Equal_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 == True
temp_dc = self.dc_float_0_4 == True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 == bool_tensor_2
def test_Comparison_Equal_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 == self.np_bool_5_9
temp_dc = self.dc_float_0_4 == self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 == float_scalar
def test_Comparison_Equal_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 == 5.0
temp_dc = self.dc_float_0_4 == 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 == float_tensor_2
def test_Comparison_Equal_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 == self.np_float_5_9
temp_dc = self.dc_float_0_4 == self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 == int_scalar
def test_Comparison_Equal_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 == 5
temp_dc = self.dc_float_0_4 == 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 == int_tensor_2
def test_Comparison_Equal_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 == self.np_int_5_9
temp_dc = self.dc_float_0_4 == self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# Comparison Not_Equal
# bool_tensor_1 != bool_scalar
def test_Comparison_Not_Equal_bool_tensor_1_bool_scalar (self):
temp_np = self.np_bool_0_4 != True
temp_dc = self.dc_bool_0_4 != True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 != bool_tensor_2
def test_Comparison_Not_Equal_bool_tensor_1_bool_tensor_2 (self):
temp_np = self.np_bool_0_4 != self.np_bool_5_9
temp_dc = self.dc_bool_0_4 != self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 != float_scalar
def test_Comparison_Not_Equal_bool_tensor_1_float_scalar (self):
temp_np = self.np_bool_0_4 != 5.0
temp_dc = self.dc_bool_0_4 != 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 != float_tensor_2
def test_Comparison_Not_Equal_bool_tensor_1_float_tensor_2 (self):
temp_np = self.np_bool_0_4 != self.np_float_5_9
temp_dc = self.dc_bool_0_4 != self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 != int_scalar
def test_Comparison_Not_Equal_bool_tensor_1_int_scalar (self):
temp_np = self.np_bool_0_4 != 5
temp_dc = self.dc_bool_0_4 != 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# bool_tensor_1 != int_tensor_2
def test_Comparison_Not_Equal_bool_tensor_1_int_tensor_2 (self):
temp_np = self.np_bool_0_4 != self.np_int_5_9
temp_dc = self.dc_bool_0_4 != self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 != bool_scalar
def test_Comparison_Not_Equal_int_tensor_1_bool_scalar (self):
temp_np = self.np_int_0_4 != True
temp_dc = self.dc_int_0_4 != True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 != bool_tensor_2
def test_Comparison_Not_Equal_int_tensor_1_bool_tensor_2 (self):
temp_np = self.np_int_0_4 != self.np_bool_5_9
temp_dc = self.dc_int_0_4 != self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 != float_scalar
def test_Comparison_Not_Equal_int_tensor_1_float_scalar (self):
temp_np = self.np_int_0_4 != 5.0
temp_dc = self.dc_int_0_4 != 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 != float_tensor_2
def test_Comparison_Not_Equal_int_tensor_1_float_tensor_2 (self):
temp_np = self.np_int_0_4 != self.np_float_5_9
temp_dc = self.dc_int_0_4 != self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 != int_scalar
def test_Comparison_Not_Equal_int_tensor_1_int_scalar (self):
temp_np = self.np_int_0_4 != 5
temp_dc = self.dc_int_0_4 != 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# int_tensor_1 != int_tensor_2
def test_Comparison_Not_Equal_int_tensor_1_int_tensor_2 (self):
temp_np = self.np_int_0_4 != self.np_int_5_9
temp_dc = self.dc_int_0_4 != self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 != bool_scalar
def test_Comparison_Not_Equal_float_tensor_1_bool_scalar (self):
temp_np = self.np_float_0_4 != True
temp_dc = self.dc_float_0_4 != True
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 != bool_tensor_2
def test_Comparison_Not_Equal_float_tensor_1_bool_tensor_2 (self):
temp_np = self.np_float_0_4 != self.np_bool_5_9
temp_dc = self.dc_float_0_4 != self.dc_bool_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 != float_scalar
def test_Comparison_Not_Equal_float_tensor_1_float_scalar (self):
temp_np = self.np_float_0_4 != 5.0
temp_dc = self.dc_float_0_4 != 5.0
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 != float_tensor_2
def test_Comparison_Not_Equal_float_tensor_1_float_tensor_2 (self):
temp_np = self.np_float_0_4 != self.np_float_5_9
temp_dc = self.dc_float_0_4 != self.dc_float_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 != int_scalar
def test_Comparison_Not_Equal_float_tensor_1_int_scalar (self):
temp_np = self.np_float_0_4 != 5
temp_dc = self.dc_float_0_4 != 5
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
# float_tensor_1 != int_tensor_2
def test_Comparison_Not_Equal_float_tensor_1_int_tensor_2 (self):
temp_np = self.np_float_0_4 != self.np_int_5_9
temp_dc = self.dc_float_0_4 != self.dc_int_5_9
np.testing.assert_array_equal(temp_np, np.array(temp_dc.data()))
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: <EMAIL> (<NAME>)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print ('SKIPPED', filename)
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print ('UPDATED', filename)
else:
print (' ', filename)
<file_sep>#!/bin/bash
# base name of the bench
# it reads $1.out
# and generates $1.pdf
WHAT=$1
bench=$2
header="rev "
while read line
do
if [ ! -z '$line' ]; then
header="$header \"$line\""
fi
done < $bench"_settings.txt"
echo $header > $WHAT.out.header
cat $WHAT.out >> $WHAT.out.header
echo "set title '$WHAT'" > $WHAT.gnuplot
echo "set key autotitle columnhead outside " >> $WHAT.gnuplot
echo "set xtics rotate 1" >> $WHAT.gnuplot
echo "set term pdf color rounded enhanced fontscale 0.35 size 7in,5in" >> $WHAT.gnuplot
echo set output "'"$WHAT.pdf"'" >> $WHAT.gnuplot
col=`cat $bench"_settings.txt" | wc -l`
echo "plot for [col=2:$col+1] '$WHAT.out.header' using 0:col:xticlabels(1) with lines" >> $WHAT.gnuplot
echo " " >> $WHAT.gnuplot
gnuplot -persist < $WHAT.gnuplot
# generate a png file
# convert -background white -density 120 -rotate 90 -resize 800 +dither -colors 256 -quality 0 $WHAT.ps -background white -flatten .$WHAT.png
# clean
rm $WHAT.out.header $WHAT.gnuplot<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
//
#pragma once
#include "core/broadcast.h"
#include "operators/baseOperator.h"
#include <math.h>
#include <string>
using namespace Eigen;
namespace dnnc {
template <typename T> class Mod : public baseOperator<T, T, T> {
// Mod attributes
protected:
int fmod = 0;
public:
Mod(std::string name = "opMod", int fmod = 0)
: baseOperator<T, T, T>(opMod, name) {
this->fmod = fmod;
// Check for fmod or not
if ((fmod == 0) && ((this->template type_check<T, float, double>()))) {
SPDLOG_ERROR("Set fmod to 1 to pass float values.");
return;
}
}
bool getAttribute(OPATTR attrName, int &obj) override {
if (attrName == attr_mode) {
obj = fmod;
return true;
}
return false;
}
bool setAttribute(OPATTR attrName, int obj) override {
if (attrName == attr_mode) {
fmod = obj;
return true;
}
return false;
}
static T mod_function(T x, T y) { return (T)((int)x % (int)y); }
static T fmod_function(T x, T y) { return ::fmod(x, y); }
tensor<T> compute(tensor<T> &a /*!< : N D tensor input*/,
tensor<T> &b /*!< : N D tensor input*/) {
std::vector<DIMENSION> resultShape = binaryBroadcastReShape(a, b);
tensor<T> result(resultShape);
if (a.shape() != b.shape()) {
SPDLOG_ERROR("tensor dimenions not appropriate for Mod operator.");
return NULL_TENSOR<T>;
}
DNNC_EIGEN_ARRAY_MAP(eigenVectorA, T, a);
DNNC_EIGEN_ARRAY_MAP(eigenVectorB, T, b);
DNNC_EIGEN_VECTOR_CTOR(T) eigen_result;
if (fmod) {
eigen_result.array() =
eigenVectorA.array().binaryExpr(eigenVectorB.array(), &fmod_function);
} else {
eigen_result.array() =
eigenVectorA.array().binaryExpr(eigenVectorB.array(), &mod_function);
}
result.load(eigen_result.data());
return result;
}
};
} // namespace dnnc
<file_sep># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import sys
import onnx
class LSTMTest(unittest.TestCase):
def setUp(self):
self.num_directions = 1
self.seq_length = 3
self.batch_size = 3
self.input_size = 4
self.hidden_size = 3
self.np_x = np.random.randn(self.seq_length * self.batch_size * self.input_size).astype(np.float32)
self.dc_x = dc.array(list(self.np_x))
self.np_w = np.random.randn(self.num_directions * 4 * self.hidden_size * self.input_size).astype(np.float32)
self.dc_w = dc.array(list(self.np_w))
self.np_r = np.random.randn(self.num_directions * 4 * self.hidden_size * self.hidden_size).astype(np.float32)
self.dc_r = dc.array(list(self.np_r))
self.np_b = np.random.randn(self.num_directions * 8 * self.hidden_size).astype(np.float32)
self.dc_b = dc.array(list(self.np_b))
self.np_h = np.random.randn(self.num_directions * self.batch_size * self.hidden_size).astype(np.float32)
self.dc_h = dc.array(list(self.np_h))
self.np_c = np.random.randn(self.num_directions * self.batch_size * self.hidden_size).astype(np.float32)
self.dc_c = dc.array(list(self.np_c))
self.np_p = np.random.randn(self.num_directions * 3 * self.hidden_size).astype(np.float32)
self.dc_p = dc.array(list(self.np_p))
self.np_x = np.reshape(self.np_x, (self.seq_length, self.batch_size, self.input_size))
self.dc_x = dc.reshape(self.dc_x, (self.seq_length, self.batch_size, self.input_size))
self.np_w = np.reshape(self.np_w, (self.num_directions, 4 * self.hidden_size, self.input_size))
self.dc_w = dc.reshape(self.dc_w, (self.num_directions, 4 * self.hidden_size, self.input_size))
self.np_r = np.reshape(self.np_r, (self.num_directions, 4 * self.hidden_size, self.hidden_size))
self.dc_r = dc.reshape(self.dc_r, (self.num_directions, 4 * self.hidden_size, self.hidden_size))
self.np_b = np.reshape(self.np_b, (self.num_directions, 8 * self.hidden_size))
self.dc_b = dc.reshape(self.dc_b, (self.num_directions, 8 * self.hidden_size))
self.np_h = np.reshape(self.np_h, (self.num_directions, self.batch_size, self.hidden_size))
self.dc_h = dc.reshape(self.dc_h, (self.num_directions, self.batch_size, self.hidden_size))
self.np_c = np.reshape(self.np_c, (self.num_directions, self.batch_size, self.hidden_size))
self.dc_c = dc.reshape(self.dc_c, (self.num_directions, self.batch_size, self.hidden_size))
self.np_p = np.reshape(self.np_p, (self.num_directions, 3 * self.hidden_size))
self.dc_p = dc.reshape(self.dc_p, (self.num_directions, 3 * self.hidden_size))
def testAll (self):
model = onnx.load('./parser/unit_operators/testcases/LSTM/LSTM.onnx')
# xTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/X_0.txt")
# xTest = dc.array(list(xTest))
# xTest = dc.reshape(xTest, (self.seq_length, self.batch_size, self.input_size))
# wTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/W_0.txt")
# wTest = dc.array(list(wTest))
# wTest = dc.reshape(wTest, (self.num_directions, 4 * self.hidden_size, self.input_size))
# rTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/R_0.txt")
# rTest = dc.array(list(rTest))
# rTest = dc.reshape(rTest, (self.num_directions, 4 * self.hidden_size, self.hidden_size))
# bTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/B_0.txt")
# bTest = dc.array(list(bTest))
# bTest = dc.reshape(bTest, (self.num_directions, 8 * self.hidden_size))
# sTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/S_0.txt")
# sTest = dc.array(list(sTest))
# sTest = dc.reshape(sTest, (self.batch_size))
# hTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/H_0.txt")
# hTest = dc.array(list(hTest))
# hTest = dc.reshape(hTest, (self.num_directions, self.batch_size, self.hidden_size))
# cTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/C_0.txt")
# cTest = dc.array(list(cTest))
# cTest = dc.reshape(cTest, (self.num_directions, self.batch_size, self.hidden_size))
# pTest = np.loadtxt(fname = "swig/outputs_XWRB/weights/P_0.txt")
# pTest = dc.array(list(pTest))
# pTest = dc.reshape(pTest, (self.num_directions, 3*self.hidden_size))
# outTest = np.loadtxt(fname = "swig/outputs_XWRB/outputs/Output_0.txt")
# outTest = dc.array(list(outTest))
# outTest = dc.reshape(outTest, (self.seq_length, self.num_directions, self.batch_size, self.hidden_size))
# dcr = dc.lstm(xTest, wTest, rTest, bTest, sTest, hTest, cTest, pTest)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
<file_sep># dnnc.api
## Brief understanding of working principle.
## Example 1
#### Structure
```python
dtype = {
"output" : "input",
}
```
#### Code
```python
tensor<output> example(tensor<input> &a, float alpha = 1.0) {
Example<output, input> op("localOpName", alpha);
return op.compute(a, b);
dtype = {
"double" : "double",
"float" : "int",
"int" : "bool",
}
}
```
#### Output
```cpp
tensor<double> example(tensor<double> &a, float alpha = 1.0) {
Example<double, double> op("localOpName", alpha);
return op.compute(a, b);
}
tensor<float> example(tensor<int> &a, float alpha = 1.0) {
Example<float, int> op("localOpName", alpha);
return op.compute(a, b);
}
tensor<int> example(tensor<bool> &a, float alpha = 1.0) {
Example<int, bool> op("localOpName", alpha);
return op.compute(a, b);
}
```
## Example 2
#### Structure
```python
dtype = {
"output" : ("input1","input2","input3"),
}
```
#### Code
```python
tensor<output> example(tensor<input> &a, float alpha = 1.0) {
Example<output, input> op("localOpName", alpha);
return op.compute(a, b);
dtype = {
"double" : ("double","float","int"),
}
}
```
#### Output
```cpp
tensor<double> example(tensor<double> &a, float alpha = 1.0) {
Example<double, double> op("localOpName", alpha);
return op.compute(a, b);
}
tensor<double> example(tensor<float> &a, float alpha = 1.0) {
Example<double, float> op("localOpName", alpha);
return op.compute(a, b);
}
tensor<double> example(tensor<int> &a, float alpha = 1.0) {
Example<double, int> op("localOpName", alpha);
return op.compute(a, b);
}
```
## Example 3
#### Structure
```python
dtype = {
"output" : ["input1","input2","input3"],
}
```
#### Code
```python
tensor<output> example(tensor<input1> &a, tensor<input2> &b, tensor<input3> &c, float alpha = 1.0) {
Example<output, input1, input2> op("localOpName", alpha);
return op.compute(a, b, c);
dtype = {
"double" : ["double","float","double"],
"float" : ["float","float","double"],
}
}
```
#### Output
```cpp
tensor<double> example(tensor<double> &a, tensor<float> &b, tensor<double> &c, float alpha = 1.0) {
Example<double, double, float> op("localOpName", alpha);
return op.compute(a, b, c);
}
tensor<float> example(tensor<float> &a, tensor<float> &b, tensor<double> &c, float alpha = 1.0) {
Example<float, float, float> op("localOpName", alpha);
return op.compute(a, b, c);
}
```
## Example 4
#### Structure
```python
dtype = {
"output" :(["input1","input2","input3"],["input1","input2","input3"]),
}
```
#### Code
```python
tensor<output> example(tensor<input1> &a, tensor<input2> &b, tensor<input3> &c, float alpha = 1.0) {
Example<output, input1, input2, input3> op("localOpName", alpha);
return op.compute(a, b, c);
dtype = {
"double" : (["double","float","double"], ["float","float","double"]),
"float" : ["float","double","float"],
}
}
```
#### Output
```cpp
tensor<double> example(tensor<double> &a, tensor<float> &b, tensor<double> &c, float alpha = 1.0) {
Example<double, double, float, double> op("localOpName", alpha);
return op.compute(a, b, c);
}
tensor<double> example(tensor<float> &a, tensor<float> &b, tensor<double> &c, float alpha = 1.0) {
Example<double, float, float, double> op("localOpName", alpha);
return op.compute(a, b, c);
}
tensor<float> example(tensor<float> &a, tensor<double> &b, tensor<float> &c, float alpha = 1.0) {
Example<float, float, double, float> op("localOpName", alpha);
return op.compute(a, b, c);
}
```<file_sep>// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 <NAME> <<EMAIL>>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS
#include "sparse_solver.h"
#include <Eigen/CholmodSupport>
template<typename T> void test_cholmod_T()
{
CholmodDecomposition<SparseMatrix<T>, Lower> g_chol_colmajor_lower; g_chol_colmajor_lower.setMode(CholmodSupernodalLLt);
CholmodDecomposition<SparseMatrix<T>, Upper> g_chol_colmajor_upper; g_chol_colmajor_upper.setMode(CholmodSupernodalLLt);
CholmodDecomposition<SparseMatrix<T>, Lower> g_llt_colmajor_lower; g_llt_colmajor_lower.setMode(CholmodSimplicialLLt);
CholmodDecomposition<SparseMatrix<T>, Upper> g_llt_colmajor_upper; g_llt_colmajor_upper.setMode(CholmodSimplicialLLt);
CholmodDecomposition<SparseMatrix<T>, Lower> g_ldlt_colmajor_lower; g_ldlt_colmajor_lower.setMode(CholmodLDLt);
CholmodDecomposition<SparseMatrix<T>, Upper> g_ldlt_colmajor_upper; g_ldlt_colmajor_upper.setMode(CholmodLDLt);
CholmodSupernodalLLT<SparseMatrix<T>, Lower> chol_colmajor_lower;
CholmodSupernodalLLT<SparseMatrix<T>, Upper> chol_colmajor_upper;
CholmodSimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower;
CholmodSimplicialLLT<SparseMatrix<T>, Upper> llt_colmajor_upper;
CholmodSimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower;
CholmodSimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper;
check_sparse_spd_solving(g_chol_colmajor_lower);
check_sparse_spd_solving(g_chol_colmajor_upper);
check_sparse_spd_solving(g_llt_colmajor_lower);
check_sparse_spd_solving(g_llt_colmajor_upper);
check_sparse_spd_solving(g_ldlt_colmajor_lower);
check_sparse_spd_solving(g_ldlt_colmajor_upper);
check_sparse_spd_solving(chol_colmajor_lower);
check_sparse_spd_solving(chol_colmajor_upper);
check_sparse_spd_solving(llt_colmajor_lower);
check_sparse_spd_solving(llt_colmajor_upper);
check_sparse_spd_solving(ldlt_colmajor_lower);
check_sparse_spd_solving(ldlt_colmajor_upper);
check_sparse_spd_determinant(chol_colmajor_lower);
check_sparse_spd_determinant(chol_colmajor_upper);
check_sparse_spd_determinant(llt_colmajor_lower);
check_sparse_spd_determinant(llt_colmajor_upper);
check_sparse_spd_determinant(ldlt_colmajor_lower);
check_sparse_spd_determinant(ldlt_colmajor_upper);
}
void test_cholmod_support()
{
CALL_SUBTEST_1(test_cholmod_T<double>());
CALL_SUBTEST_2(test_cholmod_T<std::complex<double> >());
}
<file_sep>// Copyright 2018 The AITS DNNC Authors.All Rights Reserved.
//
// Licensed to the Apache Software Foundation(ASF) under one
// or more contributor license agreements.See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.See the License for the
// specific language governing permissionsand limitations
// under the License.
//
// This file is part of AITS DNN compiler maintained at
// https://github.com/ai-techsystems/dnnCompiler
#include "core/datatypes.h"
namespace dnnc {
DNNC_DataType getDNNC_DataType(std::string stype) {
DNNC_DataType type = NOTYPE;
if (stype == "float")
type = FLOAT;
if (stype == "double")
type = DOUBLE;
if (stype == "int32_t")
type = INT32;
if (stype == "uint8_t")
type = UINT8;
if (stype == "int16_t")
type = INT16;
if (stype == "int8_t")
type = INT8;
if (stype == "string")
type = STRING;
if (stype == "int64_t")
type = INT64;
if (stype == "bool")
type = BOOL;
if (stype == "uint16_t")
type = UINT16;
if (stype == "half")
type = FLOAT16;
if (stype == "uint32_t")
type = UINT32;
if (stype == "int64_t")
type = UINT64;
return type;
}
std::string getDNNC_DataTypeStr(DNNC_DataType dtype) {
std::string type = "";
switch (dtype) {
case FLOAT:
type = "float";
break;
case DOUBLE:
type = "double";
break;
case INT32:
type = "int32_t";
break;
case UINT8:
type = "uint8_t";
break;
case INT16:
type = "int16_t";
break;
case INT8:
type = "int8_t";
break;
case STRING:
type = "string";
break;
case INT64:
type = "int64_t";
break;
case BOOL:
type = "bool";
break;
case FLOAT16:
case BFLOAT16:
type = "half";
break;
case UINT16:
type = "uint16_t";
break;
case UINT32:
type = "uint32_t";
break;
case UINT64:
type = "int64_t";
break;
case NOTYPE:
default:
type = "notype";
break;
}
return type;
}
IR_DataType getDNNC_IRType(std::string stype) {
IR_DataType type = IR_DataType::NOTYPE;
if (stype == "float")
type = IR_DataType::FLOAT;
if (stype == "double")
type = IR_DataType::DOUBLE;
if (stype == "int32_t")
type = IR_DataType::INT32;
if (stype == "uint8_t")
type = IR_DataType::UINT8;
if (stype == "int16_t")
type = IR_DataType::INT16;
if (stype == "int8_t")
type = IR_DataType::INT8;
if (stype == "string")
type = IR_DataType::STRING;
if (stype == "int64_t")
type = IR_DataType::INT64;
if (stype == "bool")
type = IR_DataType::BOOL;
if (stype == "uint16_t")
type = IR_DataType::UINT16;
if (stype == "half")
type = IR_DataType::FLOAT16;
if (stype == "uint32_t")
type = IR_DataType::UINT32;
if (stype == "int64_t")
type = IR_DataType::UINT64;
if (stype == "tensor<bool>")
type = IR_DataType::TENSOR_BOOL;
if (stype == "tensor<long int>")
type = IR_DataType::TENSOR_INT;
if (stype == "tensor<double>")
type = IR_DataType::TENSOR_FLOAT;
if (stype == "graph")
type = IR_DataType::GRAPH;
return type;
}
std::string getDNNC_IRTypeStr(IR_DataType dtype) {
std::string type = "";
switch (dtype) {
case IR_DataType::FLOAT:
type = "float";
break;
case IR_DataType::DOUBLE:
type = "double";
break;
case IR_DataType::INT32:
type = "int32_t";
break;
case IR_DataType::UINT8:
type = "uint8_t";
break;
case IR_DataType::INT16:
type = "int16_t";
break;
case IR_DataType::INT8:
type = "int8_t";
break;
case IR_DataType::STRING:
type = "string";
break;
case IR_DataType::INT64:
type = "int64_t";
break;
case IR_DataType::BOOL:
type = "bool";
break;
case IR_DataType::FLOAT16:
case IR_DataType::BFLOAT16:
type = "half";
break;
case IR_DataType::UINT16:
type = "uint16_t";
break;
case IR_DataType::UINT32:
type = "uint32_t";
break;
case IR_DataType::UINT64:
type = "uint64_t";
break;
case IR_DataType::TENSOR_BOOL:
type = "tensor<bool>";
break;
case IR_DataType::TENSOR_INT:
type = "tensor<int64_t>";
break;
case IR_DataType::TENSOR_FLOAT:
type = "tensor<double>";
break;
case IR_DataType::GRAPH:
type = "graph";
break;
case IR_DataType::NOTYPE:
default:
type = "notype";
break;
}
return type;
}
short typePrecisionIndex(DNNC_DataType dtype) {
switch (dtype) {
case BOOL:
return 1;
break;
case UINT8:
return 2;
break;
case INT8:
return 3;
break;
case UINT16:
return 4;
break;
case INT16:
return 5;
break;
case FLOAT16:
case BFLOAT16:
return 6;
break;
case UINT32:
return 7;
break;
case INT32:
return 8;
break;
case FLOAT:
return 9;
break;
case UINT64:
return 10;
break;
case INT64:
return 11;
break;
case DOUBLE:
return 12;
break;
case STRING:
return 13;
break;
case COMPLEX64:
return 14;
break;
case COMPLEX128:
return 15;
break;
default:
return 0;
break;
}
return 0;
}
/*<! return if ty1 has higher precedence over ty2.
*/
bool typePrecedence(DNNC_DataType ty1, DNNC_DataType ty2) {
return typePrecisionIndex(ty1) > typePrecisionIndex(ty2);
}
} // namespace dnnc
<file_sep>import os, sys, importlib
import unittest
# def load_tests(loader, tests, pattern):
def load_tests(loader, tests):
# suite = unittest.TestSuite();
test_file = "swig/passingTests.txt";
if ( os.path.isfile(test_file) == False ):
print("no test file in ", os.getcwd()+"/swig");
# return suite;
return
test_fp = open(test_file, "r");
#print("opened file", test_file)
for test in test_fp.readlines():
module_name = test.strip().split(".")[0]
class_name = module_name + "Test"
module = importlib.import_module("."+module_name, package="swig")
class_ = getattr(module, class_name)
tests.append(loader.loadTestsFromTestCase(class_))
test_fp.close()
return
# return suite;
def load_test(loader, test, tests):
test_file = "swig/"+test
if ( os.path.isfile(test_file) == False ):
print("no test file in ", os.getcwd()+"/swig");
# return suite;
return
print("running test", test.strip())
module_name = test.strip().split(".")[0]
class_name = module_name + "Test"
module = importlib.import_module("."+module_name, package="swig")
class_ = getattr(module, class_name)
tests.append(loader.loadTestsFromTestCase(class_))
return
<file_sep>#pragma once
namespace dnnc {
// Use this define to declare both:
// - `iterator`
// - `const_iterator`:
// As members of your class
#define SETUP_ITERATORS(C, T, S) \
SETUP_MUTABLE_ITERATOR(C, T, S) \
SETUP_CONST_ITERATOR(C, T, S)
// Use this define to declare only `iterator`
#define SETUP_MUTABLE_ITERATOR(C, T, S) \
typedef dnnc::iterator<C, T, S> iterator; \
iterator begin() { return iterator::begin(this); } \
iterator end() { return iterator::end(this); }
// Use this define to declare only `const_iterator`
#define SETUP_CONST_ITERATOR(C, T, S) \
typedef dnnc::const_iterator<C, T, S> const_iterator; \
const_iterator begin() const { return const_iterator::begin(this); } \
const_iterator end() const { return const_iterator::end(this); }
// S should be the state struct used to forward iteration:
#define SETUP_REVERSE_ITERATORS(C, T, S) \
struct S##_reversed : public S { \
inline void next(const C *ref) { S::prev(ref); } \
inline void prev(const C *ref) { S::next(ref); } \
inline void begin(const C *ref) { \
S::end(ref); \
S::prev(ref); \
} \
inline void end(const C *ref) { \
S::begin(ref); \
S::prev(ref); \
} \
}; \
SETUP_MUTABLE_RITERATOR(C, T, S) \
SETUP_CONST_RITERATOR(C, T, S)
#define SETUP_MUTABLE_RITERATOR(C, T, S) \
typedef dnnc::iterator<C, T, S##_reversed> reverse_iterator; \
reverse_iterator rbegin() { return reverse_iterator::begin(this); } \
reverse_iterator rend() { return reverse_iterator::end(this); }
#define SETUP_CONST_RITERATOR(C, T, S) \
typedef dnnc::const_iterator<C, T, S##_reversed> const_reverse_iterator; \
const_reverse_iterator rbegin() const { \
return const_reverse_iterator::begin(this); \
} \
const_reverse_iterator rend() const { \
return const_reverse_iterator::end(this); \
}
#define STL_TYPEDEFS(T) \
typedef std::ptrdiff_t difference_type; \
typedef size_t size_type; \
typedef T value_type; \
typedef T *pointer; \
typedef const T *const_pointer; \
typedef T &reference; \
typedef const T &const_reference
// Forward declaration of const_iterator:
template <class C, typename T, class S> struct const_iterator;
/* * * * * MUTABLE ITERATOR TEMPLATE: * * * * */
// C - The container type
// T - The content type
// S - The state keeping structure
template <class C, typename T, class S>
// The non-specialized version is used for T=rvalue:
struct iterator {
// Keeps a reference to the container:
C *ref;
// User defined struct to describe the iterator state:
// This struct should provide the functions listed below,
// however, note that some of them are optional
S state;
// Set iterator to next() state:
void next() { state.next(ref); }
// Initialize iterator to first state:
void begin() { state.begin(ref); }
// Initialize iterator to end state:
void end() { state.end(ref); }
// Returns current `value`
T get() { return state.get(ref); }
// Return true if `state != s`:
bool cmp(const S &s) const { return state.cmp(s); }
// Optional function for reverse iteration:
void prev() { state.prev(ref); }
public:
static iterator begin(C *ref) {
iterator it(ref);
it.begin();
return it;
}
static iterator end(C *ref) {
iterator it(ref);
it.end();
return it;
}
protected:
iterator(C *ref) : ref(ref) {}
public:
// Note: Instances build with this constructor should
// be used only after copy-assigning from other iterator!
iterator() {}
public:
T operator*() { return get(); }
iterator &operator++() {
next();
return *this;
}
iterator operator++(int) {
iterator temp(*this);
next();
return temp;
}
iterator &operator--() {
prev();
return *this;
}
iterator operator--(int) {
iterator temp(*this);
prev();
return temp;
}
bool operator!=(const iterator &other) const {
return ref != other.ref || cmp(other.state);
}
bool operator==(const iterator &other) const { return !operator!=(other); }
friend struct dnnc::const_iterator<C, T, S>;
// Comparisons between const and normal iterators:
bool operator!=(const const_iterator<C, T, S> &other) const {
return ref != other.ref || cmd(other.state);
}
bool operator==(const const_iterator<C, T, S> &other) const {
return !operator!=(other);
}
};
template <class C, typename T, class S>
// This specialization is used for iterators to reference types:
struct iterator<C, T &, S> {
// Keeps a reference to the container:
C *ref;
// User defined struct to describe the iterator state:
// This struct should provide the functions listed below,
// however, note that some of them are optional
S state;
// Set iterator to next() state:
void next() { state.next(ref); }
// Initialize iterator to first state:
void begin() { state.begin(ref); }
// Initialize iterator to end state:
void end() { state.end(ref); }
// Returns current `value`
T &get() { return state.get(ref); }
// Return true if `state != s`:
bool cmp(const S &s) const { return state.cmp(s); }
// Optional function for reverse iteration:
void prev() { state.prev(ref); }
public:
static iterator begin(C *ref) {
iterator it(ref);
it.begin();
return it;
}
static iterator end(C *ref) {
iterator it(ref);
it.end();
return it;
}
protected:
iterator(C *ref) : ref(ref) {}
public:
// Note: Instances build with this constructor should
// be used only after copy-assigning from other iterator!
iterator() {}
public:
T &operator*() { return get(); }
T *operator->() { return &get(); }
iterator &operator++() {
next();
return *this;
}
iterator operator++(int) {
iterator temp(*this);
next();
return temp;
}
iterator &operator--() {
prev();
return *this;
}
iterator operator--(int) {
iterator temp(*this);
prev();
return temp;
}
bool operator!=(const iterator &other) const {
return ref != other.ref || cmp(other.state);
}
bool operator==(const iterator &other) const { return !operator!=(other); }
friend struct dnnc::const_iterator<C, T &, S>;
// Comparisons between const and normal iterators:
bool operator!=(const const_iterator<C, T &, S> &other) const {
return ref != other.ref || cmd(other.state);
}
bool operator==(const const_iterator<C, T &, S> &other) const {
return !operator!=(other);
}
};
/* * * * * CONST ITERATOR TEMPLATE: * * * * */
// C - The container type
// T - The content type
// S - The state keeping structure
template <class C, typename T, class S>
// The non-specialized version is used for T=rvalue:
struct const_iterator {
// Keeps a reference to the container:
const C *ref;
// User defined struct to describe the iterator state:
// This struct should provide the functions listed below,
// however, note that some of them are optional
S state;
// Set iterator to next() state:
void next() { state.next(ref); }
// Initialize iterator to first state:
void begin() { state.begin(ref); }
// Initialize iterator to end state:
void end() { state.end(ref); }
// Returns current `value`
const T get() { return state.get(ref); }
// Return true if `state != s`:
bool cmp(const S &s) const { return state.cmp(s); }
// Optional function for reverse iteration:
void prev() { state.prev(ref); }
public:
static const_iterator begin(const C *ref) {
const_iterator it(ref);
it.begin();
return it;
}
static const_iterator end(const C *ref) {
const_iterator it(ref);
it.end();
return it;
}
protected:
const_iterator(const C *ref) : ref(ref) {}
public:
// Note: Instances build with this constructor should
// be used only after copy-assigning from other iterator!
const_iterator() {}
// To make possible copy-construct non-const iterators:
const_iterator(const iterator<C, T, S> &other) : ref(other.ref) {
state = other.state;
}
public:
const T operator*() { return get(); }
const_iterator &operator++() {
next();
return *this;
}
const_iterator operator++(int) {
const_iterator temp(*this);
next();
return temp;
}
const_iterator &operator--() {
prev();
return *this;
}
const_iterator operator--(int) {
const_iterator temp(*this);
prev();
return temp;
}
bool operator!=(const const_iterator &other) const {
return ref != other.ref || cmp(other.state);
}
bool operator==(const const_iterator &other) const {
return !operator!=(other);
}
const_iterator &operator=(const iterator<C, T, S> &other) {
ref = other.ref;
state = other.state;
return *this;
}
friend struct dnnc::iterator<C, T, S>;
// Comparisons between const and normal iterators:
bool operator!=(const iterator<C, T, S> &other) const {
return ref != other.ref || cmp(other.state);
}
bool operator==(const iterator<C, T, S> &other) const {
return !operator!=(other);
}
};
// This specialization is used for iterators to reference types:
template <class C, typename T, class S> struct const_iterator<C, T &, S> {
// Keeps a reference to the container:
const C *ref;
// User defined struct to describe the iterator state:
// This struct should provide the functions listed below,
// however, note that some of them are optional
S state;
// Set iterator to next() state:
void next() { state.next(ref); }
// Initialize iterator to first state:
void begin() { state.begin(ref); }
// Initialize iterator to end state:
void end() { state.end(ref); }
// Returns current `value`
const T &get() { return state.get(ref); }
// Return true if `state != s`:
bool cmp(const S &s) const { return state.cmp(s); }
// Optional function for reverse iteration:
void prev() { state.prev(ref); }
public:
static const_iterator begin(const C *ref) {
const_iterator it(ref);
it.begin();
return it;
}
static const_iterator end(const C *ref) {
const_iterator it(ref);
it.end();
return it;
}
protected:
const_iterator(const C *ref) : ref(ref) {}
public:
// Note: Instances build with this constructor should
// be used only after copy-assigning from other iterator!
const_iterator() {}
// To make possible copy-construct non-const iterators:
const_iterator(const iterator<C, T &, S> &other) : ref(other.ref) {
state = other.state;
}
public:
const T &operator*() { return get(); }
const T *operator->() { return &get(); }
const_iterator &operator++() {
next();
return *this;
}
const_iterator operator++(int) {
const_iterator temp(*this);
next();
return temp;
}
const_iterator &operator--() {
prev();
return *this;
}
const_iterator operator--(int) {
const_iterator temp(*this);
prev();
return temp;
}
bool operator!=(const const_iterator &other) const {
return ref != other.ref || cmp(other.state);
}
bool operator==(const const_iterator &other) const {
return !operator!=(other);
}
const_iterator &operator=(const iterator<C, T &, S> &other) {
ref = other.ref;
state = other.state;
return *this;
}
friend struct dnnc::iterator<C, T &, S>;
// Comparisons between const and normal iterators:
bool operator!=(const iterator<C, T &, S> &other) const {
return ref != other.ref || cmp(other.state);
}
bool operator==(const iterator<C, T &, S> &other) const {
return !operator!=(other);
}
};
} // namespace dnnc
| c7fda71ef70a7ffcc59ba648e164f5b7666d3fc8 | [
"CMake",
"Markdown",
"Makefile",
"Dockerfile",
"Python",
"Text",
"C++",
"Shell"
] | 141 | Markdown | ai-techsystems/deepC | 064a7cc04f96e20013ad7ae26a6abd6ef16ab77c | 6c7df32ba61f2e85fc1eab5af4ef4f5c1368cf71 |
refs/heads/master | <repo_name>adrcoto/web-tasks<file_sep>/src/components/Groups/Groups.js
import React, {Component} from 'react';
import Layout from "../Misc/Layout";
import axios from "axios";
import '../../css/tasks.css';
import {Button, Col, Form, FormGroup, Input, Label, Modal, ModalBody, ModalFooter, ModalHeader, Row} from 'reactstrap';
export default class Home extends Component {
constructor(props) {
super(props);
this.state = {
groups: [],
users: [],
shouldRerender: false,
name: '',
usrs: [],
us: '',
open: false,
id: false,
}
}
async componentDidMount() {
let response = await axios.get(`${process.env.REACT_APP_API_URL}/groups`);
let usersResponse = await axios.get(`${process.env.REACT_APP_API_URL}/admin/users`);
this.setState({
groups: response.data.data,
users: usersResponse.data.data,
});
}
_toggle = () => {
this.setState({
open: !this.state.open
});
};
_onChange = (e) => {
const {name, value} = e.target;
this.setState({
[name]: value
});
};
_add = () => {
this.setState({
id: false,
name: '',
open: true
});
};
_showUser = user_id => {
const {users} = this.state;
let name = '';
users && users.map(user => {
if (user.id === user_id) {
name = user.name;
}
});
return name;
};
render() {
const {user} = this.props;
const {groups, users, open, name, id, us} = this.state;
return (
<Layout user={user}>
<Modal isOpen={open} toggle={this._toggle}>
<ModalHeader toggle={this._toggle}>{id ? 'Edit task' : 'Add task'}</ModalHeader>
<ModalBody>
<Form>
<FormGroup>
<Label for="name">Name</Label>
<Input type="text"
name="name"
id="name"
placeholder="Name"
value={name}
onChange={this._onChange}/>
</FormGroup>
<FormGroup>
<Label for="users">Select</Label>
<Input type="select"
name="users"
id="users"
onChange={this._onChange}
value={user}>
<option value={us}>Select</option>
{users.length > 1 && users.map((u, key) => {
return <option key={key} value={u.id}>{u.name}</option>;
})}
</Input>
</FormGroup>
</Form>
</ModalBody>
<ModalFooter>
<Button color="primary">Add group</Button>
<Button color="secondary" onClick={this._toggle}>Cancel</Button>
</ModalFooter>
</Modal>
<Button className={'add-new'} color="primary" onClick={this._add}>Add task</Button>
<div className={'tasks-list'}>
<Row className={'table-header'}>
<Col xs={1}>Id</Col>
<Col xs={2}>Name</Col>
<Col xs={3}>Owner</Col>
<Col xs={2}>Actions</Col>
</Row>
{groups.map((group, key) => {
return <Row key={key} className={`table-column ${key % 2 === 0 ? 'odd' : ''}`}>
<Col xs={1}>{group.id}</Col>
<Col xs={2}>{group.name}</Col>
<Col xs={3}>{this._showUser(group.owner)}</Col>
<Col xs={2}>
<Button color="info" size="sm">View</Button>
</Col>
</Row>;
})}
</div>
</Layout>
)
}
}
<file_sep>/src/components/Misc/LoggedUser.js
import React, {Component} from "react";
import axios from "axios";
export const LoggedUser = (WrappedComponent) => {
return class extends Component {
constructor(props) {
super(props);
this.state = {
user: false
};
}
async componentDidMount() {
if (sessionStorage.getItem('token')) {
let res = await axios.get(`${process.env.REACT_APP_API_URL}/user`);
if (res && res.data && res.data.responseType === 'success') {
this.setState({
user: res.data.data
});
}
}
}
render() {
return <WrappedComponent {...this.props} user={this.state.user}/>;
}
};
};
<file_sep>/src/components/Auth/Register.js
import React, {Component} from 'react';
import axios from 'axios';
import {Button, FormGroup, Input, Label, Alert} from "reactstrap";
import {Link} from "react-router-dom";
export default class Register extends Component {
state = {
name: '',
email: '',
password: '',
errorMessage: ''
};
_onChange = (e) => {
const {name, value} = e.target;
this.setState({
[name]: value
});
};
_register = async () => {
const {name, email, password} = this.state;
const response = await axios.post(`${process.env.REACT_APP_API_URL}/register`, {
email, password, name
});
if (response && response.data && response.data.responseType === 'success') {
this.props.history.push('/login');
} else {
this.setState({
errorMessage: response.data.errorMessage
});
}
};
render() {
const {name, email, password, errorMessage} = this.state;
return (
<div className={'card'}>
<h1>Register</h1>
{errorMessage !== '' && <Alert color="danger">{errorMessage}</Alert>}
<FormGroup>
<Label for="exampleName">Name</Label>
<Input type="text" name="name" id="exampleName" value={name} onChange={this._onChange}/>
</FormGroup>
<FormGroup>
<Label for="exampleEmail">Email</Label>
<Input type="email" name="email" id="exampleEmail" value={email} onChange={this._onChange}/>
</FormGroup>
<FormGroup>
<Label for="examplePassword">Password</Label>
<Input type="password" name="password" id="examplePassword" value={password}
onChange={this._onChange}/>
</FormGroup>
<Button color="primary" onClick={this._register}>Register</Button>
<Link to={'login'}>Login</Link>
</div>
)
}
}
<file_sep>/src/components/Misc/Header.js
import React, {Component} from 'react';
import {Link} from 'react-router-dom';
import {Nav, NavItem} from "reactstrap";
export default class Header extends Component {
render() {
const {user} = this.props;
return (
<div className={'header'}>
<Nav className={'menu'}>
<NavItem className={'menu-item'}>
<Link to={"/"}>Home</Link>
</NavItem>
{user && user.role_id === 1 &&
<NavItem className={'menu-item'}>
<Link to={"users"}>Users</Link>
</NavItem>}
<NavItem className={'menu-item'}>
<Link to={"tasks"}>Tasks</Link>
</NavItem>
<NavItem className={'menu-item'}>
<Link to={"groups"}>Groups</Link>
</NavItem>
<NavItem className={'menu-item'}>
<Link to={"logout"}>Logout</Link>
</NavItem>
</Nav>
</div>
);
}
}<file_sep>/src/components/Tasks/index.js
import React, {Component} from 'react';
import Layout from "../Misc/Layout";
import axios from "axios";
import {ModalFooter, Button, Modal, ModalHeader, ModalBody, FormGroup, Form, Label, Input, Col, Row} from 'reactstrap';
import '../../css/tasks.css';
export default class Tasks extends Component {
constructor(props) {
super(props);
this.state = {
tasks: [],
users: [],
shouldRerender: false,
name: '',
description: '',
assign: '',
open: false,
id: false,
page: 1
}
}
async componentDidMount() {
let response = await axios.get(`${process.env.REACT_APP_API_URL}/tasks`);
let usersResponse = await axios.get(`${process.env.REACT_APP_API_URL}/admin/users`);
this.setState({
tasks: response.data.data,
users: usersResponse.data.data,
page: response.data.data.current_page
});
}
_loadNextTasks = async () => {
const {page} = this.state;
let response = await axios.get(`${process.env.REACT_APP_API_URL}/tasks?page=${(page + 1)}`);
this.setState({
tasks: {
...response.data.data, data: [
...this.state.tasks.data,
...response.data.data.data
]
},
page: response.data.data.current_page
});
};
async componentDidUpdate() {
if (this.state.shouldRerender) {
let response = await axios.get(`${process.env.REACT_APP_API_URL}/tasks`);
this.setState({
tasks: response.data.data,
shouldRerender: false,
page: response.data.data.current_page
});
}
}
_toggle = () => {
this.setState({
open: !this.state.open
});
};
_onChange = (e) => {
const {name, value} = e.target;
this.setState({
[name]: value
});
};
_add = () => {
this.setState({
id: false,
name: '',
description: '',
assign: '',
open: true
});
};
_edit = (task) => {
this.setState({
id: task.id,
name: task.name,
description: task.description,
assign: task.assign,
open: true
});
};
_addTask = async () => {
const {name, description, assign} = this.state;
let res = await axios.post(`${process.env.REACT_APP_API_URL}/task`, {name, description, assign});
if (res && res.data && res.data.responseType === 'success') {
this.setState({
shouldRerender: true,
open: false
});
}
};
_editTask = async () => {
const {id, name, description, assign} = this.state;
let res = await axios.patch(`${process.env.REACT_APP_API_URL}/task/${id}`, {name, description, assign});
if (res && res.data && res.data.responseType === 'success') {
this.setState({
shouldRerender: true,
open: false
});
}
};
_showUser = user_id => {
const {users} = this.state;
let name = '';
users && users.map(user => {
if (user.id === user_id) {
name = user.name;
}
});
return name;
};
render() {
const {user} = this.props;
const {tasks, users, name, description, assign, open, id} = this.state;
return (
<Layout user={user}>
<Modal isOpen={open} toggle={this._toggle}>
<ModalHeader toggle={this._toggle}>{id ? 'Edit task' : 'Add task'}</ModalHeader>
<ModalBody>
<Form>
<FormGroup>
<Label for="name">Name</Label>
<Input type="text"
name="name"
id="name"
placeholder="Name"
value={name}
onChange={this._onChange}/>
</FormGroup>
<FormGroup>
<Label for="description">Description</Label>
<Input type="textarea"
name="description"
id="description"
placeholder="Description"
value={description}
onChange={this._onChange}/>
</FormGroup>
<FormGroup>
<Label for="assign">Select</Label>
<Input type="select"
name="assign"
id="assign"
onChange={this._onChange}
value={assign}>
<option value={''}>Select</option>
{users.length > 1 && users.map((u, key) => {
return <option key={key} value={u.id}>{u.name}</option>;
})}
</Input>
</FormGroup>
</Form>
</ModalBody>
<ModalFooter>
<Button color="primary"
onClick={id ? this._editTask : this._addTask}>{id ? 'Edit task' : 'Add task'}</Button>
<Button color="secondary" onClick={this._toggle}>Cancel</Button>
</ModalFooter>
</Modal>
<Button className={'add-new'} color="primary" onClick={this._add}>Add task</Button>
<div className={'tasks-list'}>
<Row className={'table-header'}>
<Col xs={1}>Id</Col>
<Col xs={2}>Name</Col>
<Col xs={3}>Description</Col>
<Col xs={2}>Created by</Col>
<Col xs={2}>Assigned to</Col>
<Col xs={2}>Actions</Col>
</Row>
{tasks && tasks.data && tasks.data.length > 1 && tasks.data.map((task, key) => {
return <Row key={key} className={`table-column ${key % 2 === 0 ? 'odd' : ''}`}>
<Col xs={1}>{task.id}</Col>
<Col xs={2}>{task.name}</Col>
<Col xs={3}>{task.description}</Col>
<Col xs={2}>{this._showUser(task.user_id)}</Col>
<Col xs={2}>{this._showUser(task.assign)}</Col>
<Col xs={2}>
<Button color="info" size="sm" onClick={() => this._edit(task)}>Edit</Button>
</Col>
</Row>;
})}
</div>
{tasks.current_page < tasks.last_page && <div onClick={this._loadNextTasks}>Load more</div>}
</Layout>
);
}
} | f1ad8396a9ea402f1b412f58b8de84ba9f9d805d | [
"JavaScript"
] | 5 | JavaScript | adrcoto/web-tasks | 26916b8b4784268c2e132ac381be56e3691a8666 | 910ba7405ed04aa8e0e726a15067b8732b2f95d5 |
refs/heads/master | <repo_name>rafaelvalentine/ivynaturals<file_sep>/src/Components/Home/Home.jsx
import React, { Component } from 'react'
import { Jumbotron, Button, Card, Container, Row, Col } from 'react-bootstrap'
import { Helmet } from 'react-helmet'
import modelTwo from '../../Img/model-three.jpeg'
import modelOne from '../../Img/model-two.jpg'
import Products from './Products/Products'
import './Home.css'
const ValueList = (props) => {
const { text, title, number, img } = props
return (
<Col xs={12}>
<div className='values-content_items'>
<img src={img} alt={img} />
<div className='values-content_items_bottom'>
<div className='number'>{number}</div>
<div className='title'>{title}</div>
<div className='text'>{text}</div>
</div>
</div>
</Col>
)
}
const BenefitsList = (props) => {
const { text, title, number, img } = props
return (
<Col xs={12} md={4} lg={4}>
<div className='values-content_items'>
<img src={img} alt={img} />
<div className='values-content_items_bottom'>
<div className='number'>{number}</div>
<div className='title'>{title}</div>
<div className='text'>{text}</div>
</div>
</div>
</Col>
)
}
const Testimony = (props) => {
const { text, author, date } = props
return (
<Col xs={12} md={12} lg={6}>
<div className='testimony'>
<div className='testimony-card'>
<p className='text'>"{text}"</p>
<p className='author'> - {author}</p>
<p className='date'>{date}</p>
</div>
</div>
</Col>
)
}
export default class Home extends Component {
static defaultProps = {
values:[
{
img:'assets/img/skin.jpeg',
id:'1',
number:'1',
title:'Enrich your skin',
text:'Let of our brand of naturally derived products rejuvinate you'
},
{
img:'assets/img/ingredients.jpeg',
id:'2',
number:'2',
title:'Made from nature',
text:'From the best known natural skin care ingredients on earth, the Skin Genome Project, with 20,238 ingredients'
},
{
img:'assets/img/product.jpeg',
id:'3',
number:'3',
title:'Designed for you',
text:'Custom-made for you by our a in-house Dermatologist.'
}
],
benefits:[
{
img:'assets/img/intelligence.svg',
title:'Enrich your skin',
text:'Let of our brand of naturally derived products rejuvinate you'
},
{
img:'assets/img/fingerprints.svg',
title:'Enrich your skin',
text:'Let of our brand of naturally derived products rejuvinate you'
},
{
img:'assets/img/badge.svg',
title:'Enrich your skin',
text:'Let of our brand of naturally derived products rejuvinate you'
}
],
testimony:[
{
date:'February 2019',
author:'Beauty Nigeria',
text:'One skin care brand that stands out in the sea of swag: Ivy Natural.'
},
{
date:'January 2019',
author:'Fashion house et Nigeria',
text:" I don't actually need the 27 other skincare products dominating my #shelfie any more. Consider me woke"
}
]
}
componentDidMount(){
window.onscroll = ()=>{
if (document.body.scrollTop > 200 || document.documentElement.scrollTop > 200) {
document.getElementById("scroll").style.opacity = ".75";
document.getElementById("scroll").style.transform = "translateX(0)";
// document.getElementById("scroll").style.display = "flex";
} else {
document.getElementById("scroll").style.opacity = "0";
document.getElementById("scroll").style.transform = "translateX(50px)";
}
}
// if ( window.scrollTop > 20 || document.body.scrollTop > 20) {
// document.getElementById("scroll").style.display = "block";
// } else {
// document.getElementById("scroll").style.display = "none";
// }
console.log('exampleComponent mounted');
}
render () {
const values = this.props.values.map((value)=>{
if (value.id === '2'){
return<Col xs={12}>
<div className='values-content_items row-reverse'>
<img src={value.img} alt={value.img} />
<div className='values-content_items_bottom'>
<div className='number'>{value.number}</div>
<div className='title'>{value.title}</div>
<div className='text'>{value.text}</div>
</div>
</div>
</Col>
}
return <ValueList key={value.id} {...value}/>;
})
const benefits = this.props.benefits.map((value, index)=>(
<BenefitsList key={index} {...value}/>
))
return (
<div id='home'>
<Helmet>
<title>Home | Ivy Naturals</title>
<meta name='description' content='Helmet application' />
</Helmet>
{/*
==========================================================================
Banner
==========================================================================
*/}
<section className='banner'>
<Jumbotron style={{ backgroundImage: 'url(' + modelOne + ')' }} className='banner-jumbotron small'>
<div>
<h4>welcome to,</h4>
<h1>Ivy Natural</h1>
<p>
Beauty and Skin Care that's Grown For You!
</p>
<p>
<Button className='banner_button' variant='primary'>Let's Get Started <i className='fas fa-caret-down' /></Button>
</p>
</div>
</Jumbotron>
<Jumbotron style={{ backgroundImage: 'url(' + modelTwo + ')' }} className='banner-jumbotron big'>
<div>
<h4>welcome to,</h4>
<h1>Ivy Natural</h1>
<p>
Beauty and Skin Care that's Grown For You!
</p>
<p>
<Button className='banner_button' variant='primary'>Let's Get Started <i className='fas fa-caret-down' /></Button>
</p>
</div>
</Jumbotron>
</section>
{/*
==========================================================================
Values
==========================================================================
*/}
<section id='values' className='values'>
<div className='header'>
<h2>
why choose us
</h2>
<hr/>
</div>
<div className='values-content'>
<Row>
{values}
</Row>
</div>
</section>
{/*
==========================================================================
Products
==========================================================================
*/}
<section id='products' className='products'>
<div className='header'>
<h2>
Products
</h2>
<hr/>
</div>
<div className='products-content'>
<Products/>
</div>
</section>
{/*
==========================================================================
Benefits
==========================================================================
*/}
<section id='benefits' className='benefits'>
<div className='header'>
<h2>
Ivy's Benefits
</h2>
<hr/>
</div>
<div className='benefits-content'>
<Row>
{benefits}
</Row>
</div>
</section>
{/*
==========================================================================
Team
==========================================================================
*/}
<section id='team' className='team'>
<div className='header'>
<h2>
meet the Crème de la Crème
</h2>
<hr/>
</div>
<div className='team-content'>
<Card>
<Card.Img className='team-img' variant="top" src="assets/img/ivy_owner.jpg" />
<Card.Body>
<Card.Title>Barr. <NAME></Card.Title>
<Card.Text className='team-text'>
Before founding Ivy Natural, <NAME> was a fully time law attorney. She has a Masters in Property Law from Stanford.
</Card.Text>
<Card.Text className='team-text'>
She is also a mother and health enthusiast.
</Card.Text>
</Card.Body>
</Card>
</div>
</section>
{/*
==========================================================================
Testimonies
==========================================================================
*/}
<section id='testimonies' className='testimonies'>
<div className='testimonies-content'>
<Row>
<Testimony
text={this.props.testimony[0].text}
author={this.props.testimony[0].author}
date={this.props.testimony[0].date}
/>
<div className='col-lg-4 vertical-line'/>
<hr/>
<Testimony
text={this.props.testimony[1].text}
author={this.props.testimony[1].author}
date={this.props.testimony[1].date}
/>
</Row>
</div>
</section>
{/*
==========================================================================
Scroll Top
==========================================================================
*/}
<div onClick={()=>(
window.scrollTo({
top: 0,
behavior: 'smooth'
})
)} id="scroll">
<i className="fas fa-sort-up"></i>
</div>
</div>
)
}
}
<file_sep>/src/Components/Navbar/CustomNavbar.jsx
import React, { Component } from 'react'
// import { ButtonToolbar, Dropdown } from 'react-bootstrap'
import { Link } from 'react-router-dom'
// import moduleName from 'react-router-dom'
import './CustomNavbar.css'
// const Tooltipz = (props) => {
// return (
// <ButtonToolbar>
// <Dropdown>
// <Dropdown.Toggle id='dropdown-custom-1'>
// </Dropdown.Toggle>
// <Dropdown.Menu className='super-colors'>
// <Dropdown.Item eventKey='1'>Action</Dropdown.Item>
// <Dropdown.Item eventKey='2'>Another action</Dropdown.Item>
// <Dropdown.Item eventKey='3' active>
// Active Item
// </Dropdown.Item>
// <Dropdown.Divider />
// <Dropdown.Item eventKey='4'>Separated link</Dropdown.Item>
// </Dropdown.Menu>
// </Dropdown>
// </ButtonToolbar>
// )
// }
export default class CustomNavbar extends Component {
constructor (props) {
super(props)
this.state = {
}
this.openNav = this.openNav.bind(this)
this.closeNav = this.closeNav.bind(this)
}
/* Set the width of the side navigation to 250px and the left margin of the page content to 250px and add a black background color to body */
openNav () {
let x = window.matchMedia('(min-width: 700px)')
if (x.matches) {
// If media query matches
document.getElementById('mySidenav').style.width = '250px'
document.getElementById('main').style.marginLeft = '250px'
document.getElementById('nav-div').style.left = '60%'
document.getElementById('main').style.overflow = 'hidden'
document.body.style.backgroundColor = 'rgba(0,0,0, 1)'
document.getElementById('mySidenav-list').style.opacity = '1'
document.getElementById('copyright').style.opacity = '1'
document.getElementById('mySidenav').style.opacity = '1'
document.getElementById('main').style.overflowY = 'hidden'
} else {
document.getElementById('mySidenav').style.width = '100%'
document.getElementById('closebtn').style.transform = 'rotate(0deg)'
document.getElementById('copyright').style.opacity = '1'
document.getElementById('mySidenav-list').style.opacity = '1'
document.getElementById('main').style.overflowY = 'hidden'
}
}
/* Set the width of the side navigation to 0 and the left margin of the page content to 0, and the background color of body to white */
closeNav () {
document.getElementById('mySidenav').style.width = '0'
document.getElementById('main').style.marginLeft = '0'
document.body.style.backgroundColor = 'white'
document.getElementById('closebtn').style.transform = 'rotate(90deg)'
document.getElementById('copyright').style.opacity = '0'
document.getElementById('main').style.overflow = 'visible'
// document.getElementById('mySidenav').style.opacity = '0'
document.getElementById('mySidenav-list').style.opacity = '0'
document.getElementById('nav-div').style.left = '25%'
}
render () {
return (
<div id='nav-div'>
<aside id='mySidenav' className='sidenav'>
<h4><Link onClick={this.closeNav} to='/'>ivy natural</Link></h4>
<ul id='mySidenav-list'>
{/* <li><a href="javascript:void(0)" id='closebtn' className='closebtn' onClick={this.closeNav}>×</a></li> */}
<li><button id='closebtn' className='closebtn' onClick={this.closeNav}>×</button></li>
<li className='link' onClick={this.closeNav}><Link to='/about'>About Us</Link></li>
<li className='link' onClick={this.closeNav}><Link to='/services'>Market</Link></li>
<li className='link' onClick={this.closeNav}><Link to='/blog'>Blog</Link></li>
<li className='link' onClick={this.closeNav}><Link to='/contact'>Contact</Link></li>
</ul>
<p id='copyright' className='copyright'><small> ©</small> ivy natural { new Date().getUTCFullYear()} All Right Reserved</p>
</aside>
{/* Add all page content inside this div if you want the side nav to push page content to the right (not used if you only want the sidenav to sit on top of the page */}
<div id='nav-main'>
{/* Use any element to open the sidenav */}
<span className='nav-main-title'> <img src='assets/img/ivy_logo-copy2.png' alt='ivy_logo' /> ivy natural</span>
<div>
<ul id='nav-main-list' className='nav-main-list'>
<li> <img src='assets/img/magnifying-glass.svg' alt='search' /> </li>
{/* <li> <img src='assets/img/facebook.svg' alt='search' /> </li>
<li> <img src='assets/img/twitter.svg' alt='search' /> </li>
<li> <img src='assets/img/google-plus.svg' alt='search' /> </li> */}
<li className='link'><Link to='/about'>About Us</Link></li>
<li className='link' ><Link to='/services'>Market</Link></li>
<li className='link' ><Link to='/blog'>Blog</Link></li>
<li className='link'><Link to='/contact'>Contact</Link></li>
</ul>
</div>
<div className='nav-main-button-div'>
<span className='nav-main-button' onClick={this.openNav}> <img src='assets/img/menu.svg' alt='navbar-button' /></span>
</div>
</div>
</div>
)
}
}
| 315397ef33b68694438a1688516136a8afa07815 | [
"JavaScript"
] | 2 | JavaScript | rafaelvalentine/ivynaturals | 54d2d66d11ae5608aef8ecbbf292f1f5e52958a5 | a07d2744895d539a8ee0dd3085a499814b50d853 |
refs/heads/master | <file_sep>package edu.xcdq.tools;
import java.util.ArrayList;
import java.util.Collections;
public class Test04 {
public static void main(String[] args) {
ArrayList nums = new ArrayList<>();
nums.add(2);
nums.add(0);
nums.add(-5);
nums.add(0);
nums.add(3);
System.out.println( nums );
Collections.reverse( nums ); // 集合的反转
System.out.println( nums);
Collections.sort( nums ); // 排序方法
System.out.println( nums );
Collections.shuffle( nums ); // 混洗方法
System.out.println( nums );
System.out.println( Collections.max( nums )); // 求列表中的最大值
System.out.println( Collections.min( nums )); // 求列表中的最小值
Collections.replaceAll(nums, 0 , 1); // 替换所有符合条件的元素
System.out.println(nums);
System.out.println( Collections.frequency(nums, 1)); // 查找元素在列表中出现的次数
// 先排序,后做二分搜索,查找对应的值在列表中是否出现,返回出现在列表中的位置索引
Collections.sort(nums);
System.out.println( Collections.binarySearch(nums, 3 )); // 二分搜索
}
}
<file_sep>package edu.xcdq.list;
import java.util.ArrayList;
import java.util.List;
/**
* @author huyuansong
* @date 2021/5/6 8:22
*/
public class Test01ArrayList {
public static void main(String[] args) {
ArrayList<Integer> list = new ArrayList<>(); // 数组列表
// 1 添加数据
list.add(123);
list.add(346);
System.out.println(list);
// 2 替换数据
list.set(1, 777);
System.out.println(list);
// 将list2中的所有数据放到 list 中 3 addAll()
List<Integer> list2 = new ArrayList<>(); // 默认给定的长度是10 DEFAULT_CAPACITY = 10
list2.add(666);
list2.add(999);
list.addAll( list2 );
System.out.println(list);
// 循环list2中所有的数据
for (Integer i : list2 ) {
System.out.println( i );
}
// 5 删除循环出的对象
list2.remove(0);
System.out.println(list2);
// list 集合是否有数据 6 isEmpty()是否为空
if( !list.isEmpty() ) {
System.out.println("list.size = "+ list.size());
// 7 清空 list
list.clear();
}
System.out.println("list.size = "+ list.size());
}
}
<file_sep>package edu.xcdq.set;
import javax.management.OperationsException;
import java.util.HashSet;
/**
* @author huyuansong
* @date 2021/5/6 11:07
*/
public class HashSetTest01 {
public static void main(String[] args) {
HashSet<Object> set01 = new HashSet<>();
set01.add("aaa");
set01.add("bbb");
set01.add("ccc");
set01.add(111);
set01.add(222);
set01.add(333);
set01.add(333);
for (Object e : set01 ) {
System.out.print(e + "\t");
}
}
}
<file_sep>package edu.xcdq.tools;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
/**
* @author huyuansong
* @date 2021/5/6 16:29
*/
public class Test01 {
public static void main(String[] args) {
String str[] = { "中文", "计算机", "ABC", "123", "<EMAIL>" };
// 1 Arrays.asList() 把数组转换成list列表
List<String> strList = Arrays.asList(str);
for (String string : strList) {
System.out.print(string +"\t");
}
System.out.println();
// 2 new ArrayList<>(Arrays.asList(str)) 把数组转换成ArrayList,因此就拥有了众多的操作方法
ArrayList<String> alist = new ArrayList<>(Arrays.asList(str));
LinkedList<String> llist = new LinkedList<>(Arrays.asList(str));
alist.remove(2);
// llist.remove(3);
for (String string : alist) {
System.out.print(string + "\t");
}
// 将列表转换为数组 语法: 列表.toArray()
Object[] res = alist.toArray();
for (int i = 0; i < res.length; i++) {
System.out.print(res[i] + "\t" );
}
}
}
<file_sep>package edu.xcdq.hashmap;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* @author huyuansong
* @date 2021/5/6 15:20
*/
public class HashMapTest01 {
public static void main(String[] args) {
// 键值对
HashMap<String, String> map01 = new HashMap<>();
map01.put("name" , "翠花");
map01.put("age" , "18" );
map01.put("sex" , "男");
System.out.println(map01);
for (Map.Entry<String , String > m : map01.entrySet() ) {
System.out.println(m);
}
for (String key : map01.keySet() ) {
System.out.println(key + "= " + map01.get(key) );
}
for (String value : map01.values() ) {
System.out.println(value + "\t ");
}
}
}
| 209ff57417ac9f2dca498c504dbd9d793a521b47 | [
"Java"
] | 5 | Java | WWW0121/Person.java | 11930356b5e4459b7ba9fb3f0eb47d7e6ffe8db4 | a97a94e91b216f3bcf4531033abe5f7769d46cd8 |
refs/heads/master | <file_sep>import firebase from 'firebase/app';
import 'firebase/storage';
import 'firebase/firestore';
// Your web app's Firebase configuration
// For Firebase JS SDK v7.20.0 and later, measurementId is optional
var firebaseConfig = {
apiKey: "AIzaSyAIq-4FMc9i9MsrYho1u2qn1gn1A4DT564",
authDomain: "firegram-3caf5.firebaseapp.com",
projectId: "firegram-3caf5",
storageBucket: "firegram-3caf5.appspot.com",
messagingSenderId: "376396774116",
appId: "1:376396774116:web:c974cbac236627faa28af1",
measurementId: "G-4J1V7N1GZE"
};
// Initialize Firebase
firebase.initializeApp(firebaseConfig);
const projectStorage = firebase.storage();
const projectFirestore = firebase.firestore();
const timestamp = firebase.firestore.FieldValue.serverTimestamp;
export {projectStorage, projectFirestore, timestamp}; | f5fb801932f8625077551197f7f5d95d90797051 | [
"JavaScript"
] | 1 | JavaScript | sthasadin/firegram-imagegallery | e87cdcb43f8d9f57d5a865e1fc4a7a2730fe1429 | cb58fabb00322cf61644ad8113282ab323bdbba2 |
refs/heads/master | <repo_name>robin249/Magazine<file_sep>/app/models/comment.rb
class Comment < ActiveRecord::Base
# attr_accessible :content, :parent_id, :commentable_id, :commentable_type
# attr_accessor :parent_id
belongs_to :commentable, :polymorphic => true
has_many :comments, :as => :commentable, :dependent => :destroy
end
<file_sep>/app/controllers/comments_controller.rb
class CommentsController < ApplicationController
def index
end
def new
# @parent_id = params.delete(:parent_id)
@commentable = find_commentable
@comment = Comment.new( :commentable_id => @commentable.id,
:commentable_type => @commentable.class.to_s)
end
def create
@commentable = find_commentable
# binding.pry
@comment = @commentable.comments.new comment_params
if @comment.save
redirect_to :back, notice: 'Your comment was successfully posted!'
else
redirect_to :back, notice: "Your comment wasn't posted!"
end
end
private
def find_commentable
params.each do |name, value|
if name =~ /(.+)_id$/
return $1.classify.constantize.find(value)
end
end
nil
end
def comment_params
params.require(:comment).permit(:body)
end
end | 791758e1649d93946b8c92c1ff09aad338d7f337 | [
"Ruby"
] | 2 | Ruby | robin249/Magazine | 143c39c4585ea4aa3ec310a3b46e16d631ed3199 | 5dc95c5f38f6a16af2cfb1e2901cf14d64b06d3c |
refs/heads/master | <repo_name>dnarasi1/chandy_lamport_algorithm<file_sep>/controller.py
import bank_pb2
import sys
import socket
from time import sleep
import random
if __name__ == '__main__':
s_id = 1
amount = int(sys.argv[1])
fn = sys.argv[2]
b = []
branchSocList = {}
port_map = {}
with open(fn) as f:
for line in f:
b.append(line.split())
try:
bal = int(amount / len(b))
for branch in b:
port_map[branch[2]] = branch[0]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((branch[1], int(branch[2])))
initMsg = bank_pb2.InitBranch()
initMsg.balance = bal
for banks in b:
branches = initMsg.all_branches.add()
branches.name = banks[0]
branches.ip = banks[1]
branches.port = int(banks[2])
send_Message = bank_pb2.BranchMessage()
send_Message.init_branch.CopyFrom(initMsg)
s.sendall(send_Message.SerializeToString())
branchSocList[branch[2]] = s
while 1:
sleep(10)
initSnapshot = bank_pb2.InitSnapshot()
initSnapshot.snapshot_id = s_id
val = random.choice(branchSocList.keys())
print 'Initiating snapshot for:', val
send_message = bank_pb2.BranchMessage()
send_message.init_snapshot.CopyFrom(initSnapshot)
branchSocList[val].sendall(send_message.SerializeToString())
sleep(10)
retrieveSnapshot = bank_pb2.RetrieveSnapshot()
retrieveSnapshot.snapshot_id = s_id
send_message2 = bank_pb2.BranchMessage()
send_message2.retrieve_snapshot.CopyFrom(retrieveSnapshot)
print "snapshot_id:", s_id
for banks in branchSocList:
filled = {}
for obj in port_map:
filled[obj] = 0
branchSocList[banks].sendall(send_message2.SerializeToString())
received_message = branchSocList[banks].recv(1024)
data = bank_pb2.BranchMessage()
data.ParseFromString(received_message)
if data.WhichOneof('branch_message') == 'return_snapshot':
localSnap = data.return_snapshot.local_snapshot
# print localSnap
print port_map[banks], ":", localSnap.balance,
for val in localSnap.channel_state:
for val2 in port_map:
if str(val).startswith(str(val2)):
filled[val2] = str(val)[len(str(val2)):]
# print port_map[str(val2)], "->", port_map[banks], ":", str(val)[len(str(val2)):]
for branch in filled:
if branch != banks:
print port_map[branch], "->", port_map[banks], ":", filled[branch],
print
s_id += 1
except KeyboardInterrupt:
print ("\n Server Stopped----\n")
<file_sep>/README.md
# chandy_lamport_algorithm
Chandy-Lamport global snapshot algorithm using protobuf
# 1 A Distributed Banking Application
You will first implement a distributed banking application. The distributed bank has multiple branches. Every
branch knows about all other branches. TCP connections are setup between all pairs of branches. Each branch
starts with an initial balance. The branch then randomly selects another destination branch and sends a random
amount of money to this destination branch at unpredictable times.
Each branch must handle the following two types of messages in a distributed bank:
InitBranch this messages contains two pieces of information: the initial balance of a branch and a list of all
branches (including itself) in the distributed bank. Upon receiving this message, a branch will set its initial
balance and record the list of all branches.
Transfer this message indicates that a remote, source branch is transferring money to the current, target branch.
The message contains an integer representing the amount of money being transferred and the remote, source
branch’s name. The branch receiving the message should increase its balance by the amount of money
indicated in the message.
Every branch is both a sender and a receiver. A sender can only send positive amount of money. It needs to
first decrease its balance, then send out a message containing the amount of money to a remote branch. A branch’s
balance should not become negative. For simplicity, the amount of money should be drawn randomly between 1%
and 5% of the branch’s initial balance and can only be an integer. Intervals between consecutive sending operations
should be drawn uniformly at random between 0 and 5 seconds.
If you choose to implement the sender and receiver in different threads, you have to protect the balance of your
branch using a mutex or another synchronization method. In addition, you can assume that neither the branches
nor the communication channels will fail.
Your branch executable should take two command line inputs. The first one is a human-readable name of the
branch, e.g., “branch1”. The second one specifies the port number the branch runs on.
$> ./branch branch1 9090
It is expected that your branch executable will start a new branch called “branch1” which listens on port 9090 for
incoming TCP connections.
# 2.1 Controller
In this assignment, we rely on a controller to set a branch’s initial balance and notify every branch of all branches in
the distributed bank. This controller takes two command line inputs: the total amount of money in the distributed
bank and a local file that stores the names, IP addresses, and port numbers of all branches.
An example of how the controller program should operate is provided below:
$> ./controller 4000 branches.txt
The file (branches.txt) should contain a list of names, IP addresses, and ports, in the format “<name>
<public-ip-address> <port>”, of all of the running branches.
For example, if four branches with names: “branch1”, “branch2”, “branch3”, and “branch4” are running on
remote01.cs.binghamton.edu port 9090, 9091, 9092, and 9093, then branches.txt should contain:
branch1 172.16.31.10 9090
branch2 172.16.31.10 9091
branch3 172.16.31.10 9092
branch4 172.16.31.10 9093
The controller will distribute the total amount of money evenly among all branches, e.g., in the example above,
every branch will receive $1,000 initial balance. The controller initiates all branches by individually calling the
initBranch method described above. Note that the initial balance must be integer.
# 3 Taking Global Snapshots of the Bank
In this part, you will use the Chandy-Lamport global snapshot algorithm take global snapshots of your bank. In
case of the distributed bank, a global snapshot will contain both the local state of each branch (i.e., its balance) and
the amount of money in transit on all communication channels. Each branch will be responsible for recording and
reporting its own local state (balance) as well as the total money in transit on each of its incoming channels.
For simplicity, in this assignment, the controller will contact one of the branches to initiate the global
snapshot. It does so by sending a message indicating the InitSnapshot operation to the selected branch. The
selected branch will then initiate the snapshot by first recording its own local state and send out Marker messages
to all other branches. After some time (long enough for the snapshot algorithm to finish), the controller sends
RetrieveSnapshot messages to all branches to retrieve their recorded local and channel states.
If the snapshot is correct, the total amount of money in all branches and in transit should equal to the
command line argument given to the controller.
Each branch needs to support the following four types of messages to add snapshot capability to your distributed
bank.
InitSnapshot upon receiving this message, a branch records its own local state (balance) and sends out Marker
messages to all other branches. To identify multiple snapshots, the controller includes a snapshot_id to
this initSnapshot message, and all the Marker messages should include this snapshot_id as well.
Marker every Marker message includes a snapshot_id and the sending branch’s name branch_name. Upon
receiving this message, the receiving branch does the following:
1. If this is the first Marker message with the snapshot_id the receiving branch has seen, the re-
ceiving branch records its own local state (balance), records the state of the incoming channel from
the sender to itself as empty, immediately starts recording on other incoming channels, and sends out
Marker messages to all of its outgoing channels (i.e., all branches except itself).
Note that to ensure the correctness of the algorithm, it is important that no Transfer messages can
be sent out until all the necessary Marker messages have been sent out.
2. Otherwise, the receiving branch records the state of the incoming channel as the sequence of money
transfers that arrived between when it recorded its local state and when it received the Marker.
RetrieveSnapshot the controller sends retrieveSnapshot messages to all branches to collect snapshots. This mes-
sage will contain the snapshot_id that uniquely identifies a snapshot. A receiving branch should its
recorded local and channel states and return them to the caller (i.e., the controller) by sending a returnSnap-
shot message (next).
ReturnSnapshot a branch returns the controller its captured local snapshot in this message. This message should
include the snapshot_id, captured local state, as well as all incoming channel states.
The controller should be fully automated. It periodically sends the InitSnapshot message with mono-
tonically increasing snapshot_id on a randomly selected branch and outputs to the console the aggregated
global snapshot retrieved from all branches in the correct format. In addition, the snapshot taken by branches
needs to be identified by their names: e.g., “branch1” to represent branch1’s local state, and “branch2->branch1”
to represent the channel state. Here is an example controller output:
snapshot_id: 10
branch1: 1000, branch2->branch1: 10, branch3->branch1: 0
branch2: 1000, branch1->branch2: 0, branch3->branch2: 15
branch3: 960, branch->branch3: 15, branch2->branch3: 0
# 3.1 FIFO message delivery
The correctness of the Chandy-Lamport snapshot algorithm relies on FIFO message delivery of all communica-
tion channels among all branches (processes). A communication channel is a one way connection between two
branches. For example, in this assignment, from “branch1” to “branch2” is one communication channel. From
“branch2” to “branch1” is another channel.
In order to ensure FIFO message delivery, in this assignment, we use TCP as the transport layer protocol for
branch communications – both banking messages described in Part 2 and snap-shot related messages described
in Part 3. TCP ensures reliability and FIFO message delivery. Because TCP is full duplex, allowing messages to
transmit in both directions, there are two ways to setup branch communications:
1. We can use TCP in the half duplex manner, setting up two TCP connections between every pair of
branches. In this way, suppose there exists 4 branches, we will setup a total of 12 TCP connections, with
each branch handling 3 incoming connections and 3 outgoing connections.
If you choose to implement the assignment in this way, you need to make sure that you do not mix up the use
of these connections. For example, if a connection is designated to be an incoming connection for branch1,
then branch1 should never use this connection for sending outgoing messages. Otherwise, the FIFO nature
of communication channels will be violated.
2. Or, we can take advantage of the full duplex nature of TCP and have each branch should set up exactly one
TCP connection with every other branches in the distributed bank. In this way, given 4 branches, we will
set up a total of 6 TCP connections.
You can use either design. Bottom line is the FIFO property of communication channels can never be voilated.
<file_sep>/branches.py
#!/usr/bin/env python
import bank_pb2
import sys
import threading
import socket
from threading import Thread
from time import sleep
from random import randint
import random
balance = 0
branches_all = []
branch_soc_list = {}
mutex = threading.Lock()
this_branch = ''
curr_snap_id = 0
RECORD = {}
global_snapshot = {}
marker_count = 0
port = 0
def record_list():
global RECORD
global branches_all
for obj in branches_all:
RECORD[obj["port"]] = False
def branches_add(branch_list):
for obj in branch_list:
dictionary = {}
for info in obj.ListFields():
dictionary[info[0].name] = info[1]
if dictionary["port"] != int(sys.argv[2]):
branches_all.append(dictionary)
record_list()
def connect_branches():
print("connecting to other branches")
try:
for obj in branches_all:
mutex.acquire()
soc = create_soc(obj["ip"], obj["port"])
branch_soc_list[obj["port"]] = soc
mutex.release()
except KeyboardInterrupt:
soc.close()
exit()
def create_soc(ip, port_local):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port_local))
return s
def sleep_thread():
global balance
global branches_all
global branch_soc_list
global port
t = float(sys.argv[3])
while 1:
print "Sleeping for", t/1000, "seconds"
sleep(t/1000)
print "Woke up"
transfer_msg = bank_pb2.Transfer()
withdraw = randint(10, 50)
val = random.choice(branch_soc_list.keys())
if withdraw <= balance:
transfer_msg.money = withdraw
transfer_msg.src_branch = sys.argv[2]
transfer_msg.dst_branch = str(val)
send_message = bank_pb2.BranchMessage()
send_message.transfer.CopyFrom(transfer_msg)
lock = threading.Lock()
lock.acquire()
if not RECORD[int(val)]:
print "Sending", withdraw, "to", val
balance -= withdraw
branch_soc_list[val].sendall(send_message.SerializeToString())
print "Remaining balance is:", balance
else:
print "Not sending to", val
lock.release()
def receiver(client_socket):
global balance
global branches_all
global branch_soc_list
global curr_snap_id
global this_branch
global marker_count
global RECORD
while 1:
received_message = client_socket.recv(1024)
data = bank_pb2.BranchMessage()
data.ParseFromString(received_message)
if data.WhichOneof('branch_message') == 'init_branch':
lock = threading.Lock()
lock.acquire()
balance = data.init_branch.balance
lock.release()
branches_add(data.init_branch.all_branches)
connect_branches()
sleep(5)
print("calling sleep thread")
try:
th = Thread(target=sleep_thread, args=())
th.daemon = True
th.start()
except KeyboardInterrupt:
exit()
if data.WhichOneof('branch_message') == 'transfer':
if not RECORD[int(data.transfer.src_branch)]:
lock = threading.Lock()
lock.acquire()
balance += data.transfer.money
print "Balance in the bank after getting transferred money:", balance
lock.release()
else:
print "Recording", data.transfer.src_branch, "->", this_branch, data.transfer.money
lock = threading.Lock()
lock.acquire()
if curr_snap_id not in global_snapshot:
global_snapshot[curr_snap_id] = {}
global_snapshot[curr_snap_id][data.transfer.src_branch] = data.transfer.money
lock.release()
if data.WhichOneof('branch_message') == 'init_snapshot':
print "Received init_snapshot for", this_branch, "with snap_id", data.init_snapshot.snapshot_id
lock = threading.Lock()
lock.acquire()
for obj in RECORD:
RECORD[obj] = True
curr_snap_id = data.init_snapshot.snapshot_id
if curr_snap_id not in global_snapshot:
global_snapshot[curr_snap_id] = {}
global_snapshot[curr_snap_id]['balance'] = balance
lock.release()
for banks in branch_soc_list:
marker = bank_pb2.Marker()
marker.src_branch = str(port)
marker.dst_branch = str(banks)
marker.snapshot_id = curr_snap_id
send_message = bank_pb2.BranchMessage()
send_message.marker.CopyFrom(marker)
print "Sending marker to", banks, "for snapshot", curr_snap_id
branch_soc_list[banks].sendall(send_message.SerializeToString())
if data.WhichOneof('branch_message') == 'marker':
if data.marker.snapshot_id != curr_snap_id:
print "Received marker from", data.marker.src_branch, "and snap_id is", data.marker.snapshot_id
lock = threading.Lock()
lock.acquire()
for obj in RECORD:
RECORD[obj] = True
curr_snap_id = data.marker.snapshot_id
if curr_snap_id not in global_snapshot:
global_snapshot[curr_snap_id] = {}
global_snapshot[curr_snap_id]['balance'] = balance
lock.release()
for banks in branch_soc_list:
marker = bank_pb2.Marker()
marker.src_branch = str(port)
marker.dst_branch = str(banks)
marker.snapshot_id = curr_snap_id
send_message = bank_pb2.BranchMessage()
send_message.marker.CopyFrom(marker)
print "Sending marker to", banks, "for snapshot", curr_snap_id
branch_soc_list[banks].sendall(send_message.SerializeToString())
if banks == data.marker.src_branch:
RECORD[banks] = False
elif marker_count < len(branches_all):
print "Reply marker from", data.marker.src_branch, "for snapshot", data.marker.snapshot_id
marker_count += 1
lock = threading.Lock()
lock.acquire()
RECORD[int(data.marker.src_branch)] = False
if marker_count == len(branches_all):
print "Received markers from all. Snapshot created"
marker_count = 0
lock.release()
if data.WhichOneof('branch_message') == 'retrieve_snapshot':
print "Received retrieve_snapshot for", this_branch
snapshot = bank_pb2.ReturnSnapshot()
snapshot.local_snapshot.snapshot_id = data.retrieve_snapshot.snapshot_id
for key, val in global_snapshot.items():
if key == data.retrieve_snapshot.snapshot_id:
for key2, val2 in val.items():
if key2 != 'balance':
snapshot.local_snapshot.channel_state.append(int(str(key2)+str(val2)))
else:
snapshot.local_snapshot.balance = val2
send_message = bank_pb2.BranchMessage()
send_message.return_snapshot.CopyFrom(snapshot)
client_socket.sendall(send_message.SerializeToString())
def client_handler(client_socket):
try:
th = Thread(target=receiver, args=([client_socket]))
th.daemon = True
th.start()
except KeyboardInterrupt:
exit()
def main():
global this_branch
global port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
this_branch = sys.argv[1]
port = int(sys.argv[2])
sock.bind((host, port))
sock.listen(1)
try:
while True:
client_socket, client_address = sock.accept()
print("Connected to client: ", client_address)
t = Thread(target=client_handler, args=([client_socket]))
t.daemon = True
t.start()
except KeyboardInterrupt:
sock.close()
exit()
main()
| d216f68a0b4bddf27d12fa150598cd700bded564 | [
"Markdown",
"Python"
] | 3 | Python | dnarasi1/chandy_lamport_algorithm | 1a8f6595e2c51146a9774a2e28ea47d4f59e7b79 | 4d865ff9f9d0faf13e9d36a381ebd6ff73c9f597 |
refs/heads/master | <file_sep><!DOCTYPE html>
<html lang="en">
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>SportsSocialRank</title>
<!-- Bootstrap core CSS -->
<link href="<?php echo base_url(); ?>assets/vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<link rel="stylesheet" href="https://cdn.datatables.net/1.10.19/css/dataTables.bootstrap4.min.css">
<link rel="stylesheet" href="https://cdn.datatables.net/responsive/2.1.0/css/responsive.bootstrap.min.css">
<!-- Custom fonts for this template -->
<link href="<?php echo base_url(); ?>assets/vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
<!-- Plugin CSS -->
<link href="<?php echo base_url(); ?>assets/vendor/magnific-popup/magnific-popup.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="<?php echo base_url(); ?>assets/css/creative.min.css" rel="stylesheet">
<!-- Move After -->
<style>
.dataTables_scroll
{
overflow:auto;
}
.nav-tabs .nav-link.active {
background-color: #000;}
.ui-corner-all
{
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px;
}
</style>
</head>
<file_sep>
<!-- End Navbar -->
<div class="content">
<div class="container-fluid">
<a class="navbar-brand" href=""><img style ="border-radius: 50%;" src="<?php echo $userInfo->profile_image_url;?>"> @<?php echo $userInfo->screen_name;
?></a>
<div class="row">
<?php foreach ($rankings as $rank) {
?>
<div class="col-lg-3 col-md-6 col-sm-6">
<div class="card card-stats">
<div class="card-header card-header-info card-header-icon">
<div class="card-icon">
<i class="fa fa-twitter"></i>
</div>
<p class="card-category"><?php echo $rank[1]; ?></p>
<h3 class="card-title">#<?php echo $rank[0]; ?>/<?php echo $rank[2]; ?></h3>
</div>
<div class="card-footer">
<div class="stats">
<i class="material-icons">update</i> Just Updated
</div>
</div>
</div>
</div>
<?php
} ?>
</div>
<div class="row">
<?php foreach ($rankings as $rank) {
?>
<div class="col-lg-12 col-md-12">
<div class="card">
<div class="card-header card-header-warning">
<h4 class="card-title"><?php echo $rank[1]; ?></h4>
</div>
<div class="card-body table-responsive">
<?php
if ($rank[1] == "College Football") {
$rank[1] = "cfb";
} ?>
<table id="<?php echo $rank[1]; ?>-accounts" class="table table-striped " style="width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Following</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
</div>
</div> <?php
} ?>
</div>
<file_sep><?php
class FormSubmission extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->model('FormSubmission_model');
}
public function demoForm()
{
$this->load->library('form_validation');
$this->form_validation->set_error_delimiters('<div class="error">', '</div>');
//Validating Name Field
$this->form_validation->set_rules('full-name', 'full-name', 'required|min_length[1]|max_length[20]');
//Validating Email Field
$this->form_validation->set_rules('email', 'Email', 'required|valid_email');
//Validating Mobile no. Field
if ($this->form_validation->run() == false) {
echo "Error";
echo validation_errors();
} else {
$data = array(
'full-name' => $this->input->post('full-name'),
'email' => $this->input->post('email'),
'subject' => $this->input->post('subject'),
'message' => $this->input->post('message'),
);
$this->db->set($data);
$this->FormSubmission_model->contactFormSubmit($data);
}
}
}
<file_sep>#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import tweepy
import csv
import os
import mysql.connector
import datetime
import pytz
from dateutil import tz
mydb = mysql.connector.connect(
host="sportssocialrank.db.10366090.db2.hostedresource.net",
user="sportssocialrank",
passwd="<PASSWORD>",
database="sportssocialrank"
)
mycursor = mydb.cursor()
#twitter application credentials
consumer_key="6wR5l7KwDSDFmb6swY1seW5MP"
consumer_secret="<KEY>"
#twitter user credentials
access_token="<KEY>"
access_token_secret="<KEY>"
playerList = list();
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# which Twitter list and who owns it
slug = 'bundesliga-teams'
owner = 'Bundesliga_EN'
def get_list_members(api, owner, slug):
members = []
# without this you only get the first 20 list members
for page in tweepy.Cursor(api.list_members, owner, slug).items():
members.append(page)
# create a list containing all usernames
return [ m.screen_name for m in members ]
def get_userinfo(name,time):
# get all user data via a Tweepy API call
user = api.get_user(screen_name = name)
# create row data as a list
user_info = '"' +time + '"' + "," + '"' + name +'"' + "," + '"' + user.name + '"' + "," +'"'+str(user.followers_count) +'"' + "," + '"' +str(user.friends_count) +'"'+ "," + '"' +user.profile_image_url +'"'
timeString = time
nameString = name
usernameString = user.name
category = "Bundesliga"
followersString = str(user.followers_count)
followingString = str(user.friends_count)
profileImageString = user.profile_image_url
try:
mycursor = mydb.cursor()
try:
mycursor.execute("INSERT INTO users (Name, Category, Twitter_username, Instagram_username) VALUES (%s, %s, %s, %s)", ( usernameString, category, nameString,nameString))
except psycopg2.IntegrityError:
conn.rollback()
else:
mydb.commit()
except Exception as e:
print ("'ERROR:', e[0]")
print(mycursor.rowcount, "record inserted.")
print(user_info)
# send that one row back
return user_info
def get_currenttime():
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return st
# Convert from UTC to tz's local time.
def update_db(usernames):
time = get_currenttime()
# add each member to the csv
for name in usernames:
get_userinfo(name,time)
def main():
# create list of all members of the Twitter list
usernames = get_list_members(api, owner, slug)
update_db(usernames)
# # provide name for new CSV
# filename = "nflteamtwitter.csv"
# # create new CSV and fill it
# create_csv(filename, usernames)
# # tell us how many we got
# print ("Number of rows should be %d, plus the header row." % len(usernames))
if __name__ == '__main__':
main()
<file_sep><html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<!-- <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> -->
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="<?php echo base_url(); ?>assets/css/header.css">
<link rel="stylesheet" href="<?php echo base_url(); ?>assets/css/stylesheets.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.1.1/css/bootstrap.css">
<link rel="stylesheet" href="https://cdn.datatables.net/1.10.19/css/dataTables.bootstrap4.min.css">
<link rel="stylesheet" href="https://cdn.datatables.net/responsive/2.1.0/css/responsive.bootstrap.min.css">
<!-- Latest compiled and minified JavaScript -->
<title>Sports Social Rank</title>
<?php echo link_tag('assets/css/bootstrap.min.css'); //this will call the css?>
</head>
<nav class="navbar fixed-top navbar-expand-lg navbar-dark bg-social-blue">
<div class="container">
<a class="navbar-brand" href="/sportssocialrank"><img style="width:300px;" src="<?php echo base_url(); ?>assets/images/Logo.png"></a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarSupportedContent" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarSupportedContent">
<ul class="navbar-nav mr-auto">
<li class="nav-item active">
<a class="nav-link" href="<?php echo base_url();?>">Home <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="<?php echo base_url();?>users">Teams</a>
</li>
</ul>
</div> </div>
</nav>
<body>
<file_sep><?php
class Category_model extends CI_Model
{
public function __construct()
{
$this->load->database();
}
public function categoryExist($category)
{
$this->db->from('category_details');
$this->db->where('name', $category);
$query = $this->db->get();
$lastQuery = $this->db->last_query();
foreach ($query->result() as $row) {
return $row->id;
}
return 0;
}
public function getcategoryID($category)
{
$sql = "SELECT * FROM category_details WHERE REPLACE(name, ' ', '') = REPLACE( '".$category."', ' ', '')";
$query = $this->db->query($sql);
$row = $query->row();
foreach ($query->result() as $row) {
return $row->id;
}
}
public function addNewCategory($category)
{
if ($this->categoryExist($category) == 0) {
$data = array(
'name' => $category
);
$this->db->insert('category_details', $data);
}
$this->db->from('category_details');
$this->db->where('name', $category);
$query = $this->db->get();
foreach ($query->result() as $row) {
return $row->id;
}
}
public function removeCategory($category)
{
return $this->db->delete('category_details', array('name' => $category));
}
public function addNewAccountCategory($accountId, $categoryId)
{
// if($this->accountCategoryExist($accountId, $categoryId) == false){
// echo "IT WENT HERE";
$data = array(
'accounts_id' => $accountId,
'category_details_id' => $categoryId,
);
$this->db->insert('accounts_category', $data);
// }
//
// $this->db->from('accounts_category');
// $this->db->where('accounts_id',$accountId);
// $this->db->where('category_details_id',$categoryId);
// $query = $this->db->get();
// foreach ($query->result() as $row)
// {
// echo $row->id;
// return $row->id;
// }
}
public function getAccountsCategories($accountId)
{
$this->db->distinct();
$this->db->from('accounts_category');
$this->db->where('accounts_id', $accountId);
$query = $this->db->get();
foreach ($query->result() as $row) {
}
return $query->result();
}
public function getAllCategoriesId()
{
$this->db->from('category_details');
$query = $this->db->get();
foreach ($query->result() as $row) {
}
return $query->result();
}
public function categoryCount($categoryId)
{
$this->db->from('accounts_category');
$this->db->where('category_details_id', $categoryId);
$query = $this->db->get();
$lastQuery = $this->db->last_query();
return $query->num_rows();
}
public function getCategoryName($categoryId)
{
$this->db->from('category_details');
$this->db->where('id', $categoryId);
$query = $this->db->get();
foreach ($query->result() as $row) {
return $row->name;
}
}
public function accountCategoryExist($accountId, $categoryId)
{
$this->db->from('accounts_category');
$this->db->where('accounts_id', $accountId);
$this->db->where('category_details_id', $categoryId);
$query = $this->db->get();
$lastQuery = $this->db->last_query();
foreach ($query->result() as $row) {
echo "There is already a account";
return $row->id;
}
echo "There is no account";
return false;
}
}
<file_sep><?php
class TwitterAccount extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->model('TwitterAccounts_model');
}
public function getAccountInfo($accountId)
{
$this->TwitterAccounts_model->getAccountInfo($accountId);
}
public function getFollowers($accountId)
{
$this->TwitterAccounts_model->getFollowers($accountId);
}
}
<file_sep><?php
class TopFive extends CI_Model {
public function __construct()
{
$this->load->database();
}
public function collegeFootball(){
$this->db->select('*');
$query = $this->db->query("
SELECT tt.*,users.Category
FROM twitter tt
INNER JOIN users
ON Twitter_username = display_name where user = collegeFootball;
ORDER
BY followers desc;");
return $query->result();
}
}
}
<file_sep><?php
class FormSubmission_model extends CI_Model
{
public function __construct()
{
$this->load->database();
}
public function demoFormSubmit($data)
{
$this->db->insert('contact_form', $data);
echo("1");
}
public function contactFormSubmit($data)
{
$this->db->insert('contact_form', $data);
echo("1");
}
}
<file_sep><?php
class Pages extends CI_Controller
{
public function view($page = 'home')
{
if (! file_exists(APPPATH.'views/pages/'.$page.'.php')) {
// Whoops, we don't have a page for that!
show_404();
}
$this->load->helper('form');
$data['title'] = ucfirst($page); // Capitalize the first letter
// $this->load->model('DBUpdate_model');
//
// $data['latestUpdate'] = $this->DBUpdate_model->latestUpdate();
// $data['previousUpdate'] = $this->DBUpdate_model->previousUpdate();
if ($page == 'about') {
$this->load->view('templates/aboutheader');
$this->load->view('pages/'. $page, $data);
$this->load->view('admin/footer', $data);
}
if ($page == 'contact') {
$this->load->view('templates/contactheader');
$this->load->view('pages/'. $page, $data);
$this->load->view('admin/footer', $data);
}
}
public function user($name)
{
$this->load->model('User_model');
echo 'efwefwef';
}
}
<file_sep><?php
class DBUpdate_model extends CI_Model {
public function __construct()
{
$this->load->database();
}
public function latestUpdate()
{
$sql = "SELECT MAX(date) as max_date
FROM twitter_dbupdates";
$query = $this->db->query($sql);
$row = $query->row();
return $row->max_date;
}
public function previousUpdate()
{
$sql = "Select date as prev_date from twitter_dbupdates order by date DESC Limit 1,1" ;
$query = $this->db->query($sql);
$row = $query->row();
return $row->prev_date;
}
}
<file_sep><?php
$categoryArray = array();
$count = array();
$num_accounts;
$num_followers;
$num_followers_today;
$servername = "sportssocialrank.db.10366090.db2.hostedresource.net";
$username = "sportssocialrank";
$password = "<PASSWORD>";
$dbname = "sportssocialrank";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
//move old Database
$sql = "INSERT INTO categorydata_old SELECT * FROM categorydata_new;";
$result = $conn->query($sql);
$sql = "DELETE FROM categorydata_new;";
$result = $conn->query($sql);
$sql = "SELECT DISTINCT category FROM users";
$result = $conn->query($sql);
if ($result->num_rows > 0) {
while($row = $result->fetch_assoc()) {
$categoryArray[] = $row['category'];
}
} else {
echo "0 results";
}
$str = implode (", ", $categoryArray);
foreach ($categoryArray as &$category){
//get number of accounts
$sql = "SELECT COUNT(*) FROM users WHERE category = '$category'";
$result = $conn->query($sql);
$row = $result->fetch_assoc();
$num_accounts = $row['COUNT(*)'];
echo '$sql';
echo "\r\n";
//get number of followers
$sql = "SELECT sum(followers)
FROM users
INNER JOIN twitterdata_new ON users.Twitter_username = twitterdata_new.display_name
Where category = '$category'
;";
$result = $conn->query($sql);
$row = $result->fetch_assoc();
$num_followers = $row['sum(followers)'];
echo $sql;
echo "\r\n";
//get number of followers today
$sql = "SELECT sum(followers_today_count)
FROM users
INNER JOIN twitterdata_new ON users.Twitter_username = twitterdata_new.display_name
Where category = '$category'
;";
$result = $conn->query($sql);
$row = $result->fetch_assoc();
$num_followers_today = $row['sum(followers_today_count)'];
echo $sql;
echo "\r\n";
//update Database
$sql = "INSERT INTO categorydata_new (name, num_teams, followers, followers_today_count) VALUES ('$category', '$num_accounts','$num_followers','$num_followers_today')";
echo $sql;
echo "\r\n";
if ($conn->query($sql) === TRUE) {
} else {
}
}
$conn->close();
?>
<file_sep><?php
//File: TwitterFolldersUpdate.php
//Decription: This will update the twitter_dbupdates every 15 minutes with 900 accounts twitter info
//Author: <NAME>
//Notes:
//*******************************************************************************
//*******************************************************************************
//Globals: $Conn - Creates Connection to database
// $numUsers - Stores number of users in usersDB
// $
$currentStartId;
$currentEndId;
$dateTime;
$conn;
$differenceInFollowers;
run();
function run()
{
twitterDBRunInfo();
twitterApiRun();
}
////-------------------------------------------------------------------------------
//Name: twitterDBRunInfo
//Description: Collects all the info needed for running the twitter api call such as which Users
//info to collect and at what date the run was done on
//Methods Called: RunQuery();
//Variables: $conn - Connection to DB. $numUsers - Number of users in db
//$dateTime - Current date and time. $LastRunEndId - Where the last run ended.
////-------------------------------------------------------------------------------
function twitterDBRunInfo()
{
global $currentStartId;
global $currentEndId;
global $dateTime;
global $numUsers;
global $categoryList;
$LastRunEndId;
$numUsers;
$conn = db();
$numUsers = getNumUsers();
$dateTime = getDateTime();
$LastRunEndId = getLastRunEndId();
$currentStartId = getStartId($LastRunEndId, $numUsers);
$currentEndId = getEndId($LastRunEndId, $numUsers, $currentStartId);
}
function twitterApiRun()
{
global $currentStartId;
global $currentEndId;
global $numUsers;
global $dateTime;
global $conn;
global $differenceInFollowers;
$usersArray = getUsers($currentStartId, $currentEndId, $numUsers);
addTwitterDbUpdate($dateTime, $currentEndId, $numUsers, $currentStartId);
$currentGroupRunId = getLastGroupRunID();
twitterAPI($currentGroupRunId, $usersArray, $dateTime);
insertTwitterRank($currentGroupRunId);
}
////-------------------------------------------------------------------------------
//Name: getLasGroupRunID
//Description: Starts calling the twitter API
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getLastGroupRunID()
{
$sql = "SELECT MAX(id) FROM sportssocialrank.twitter_dbupdates";
$row = runQuery($sql, false);
$lastGroupRunId = $row['MAX(id)'];
$currentGroupRunId = $lastGroupRunId;
return $currentGroupRunId;
}
////-------------------------------------------------------------------------------
//Name: twitterApi
//Description: Starts calling the twitter API
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function twitterAPI($groupdId, $usersArray, $dateTime)
{
global $differenceInFollowers;
require_once('TwitterAPIExchange.php');
$settings = array(
'oauth_access_token' => "<KEY>",
'oauth_access_token_secret' => "<KEY>",
'consumer_key' => "6wR5l7KwDSDFmb6swY1seW5MP",
'consumer_secret' => "<KEY>"
);
$url = 'https://api.twitter.com/1.1/users/show.json';
$requestMethod = 'GET';
foreach ($usersArray as &$team) {
$getfield = "?screen_name=".$team;
$url = 'https://api.twitter.com/1.1/users/show.json';
$requestMethod = 'GET';
$twitter = new TwitterAPIExchange($settings);
$json = $twitter->setGetfield($getfield)
->buildOauth($url, $requestMethod)
->performRequest();
$twitterInfo = json_decode($json);
$teamDisplayName = $twitterInfo->screen_name;
$followersAtCurrentTime = $twitterInfo->followers_count;
$differenceInFollowers = getDayFollowerCount($followersAtCurrentTime, $teamDisplayName);
insertTwitterArchive($groupdId, $twitterInfo, $differenceInFollowers);
insertTwitter($groupdId, $twitterInfo, $differenceInFollowers);
}
}
////-------------------------------------------------------------------------------
//Name: insertIntoArchiveDB
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function insertTwitterArchive($groupdId, $twitterInfo, $differenceInFollowers)
{
$sql = 'SELECT id FROM twitter_accounts where screen_name ="'.$twitterInfo->screen_name.'"';
$row = runQuery($sql, false);
$accounts_id = $row['id'];
$sql = "INSERT INTO twitter_data_archive (twitter_accounts_id, twitter_dbupdates_id, name, display_name, followers, following, profile_image_url, profile_banner_url,followers_today_count) "
. "VALUES ('$accounts_id','$groupdId', '$twitterInfo->name','$twitterInfo->screen_name','$twitterInfo->followers_count','$twitterInfo->friends_count','$twitterInfo->profile_image_url','$twitterInfo->profile_banner_url','$differenceInFollowers')";
runQuery($sql, true);
}
////-------------------------------------------------------------------------------
//Name: insertTwitter
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function insertTwitter($groupdId, $twitterInfo, $differenceInFollowers)
{
$sql = "SELECT id FROM twitter_data WHERE display_name = '".$twitterInfo->screen_name."';";
$row = runQuery($sql, false);
$id = $row['id'];
$date = getDateTime();
if ($id == null) {
$sql = 'SELECT id FROM twitter_accounts where screen_name ="'.$twitterInfo->screen_name.'"';
$row = runQuery($sql, false);
$accounts_id = $row['id'];
$sql = "INSERT INTO twitter_data(twitter_accounts_id, twitter_dbupdates_id, name, date, display_name, followers, following, profile_image_url, profile_banner_url,followers_today_count) "
. "VALUES ('$accounts_id','$groupdId', '$twitterInfo->name','$date', '$twitterInfo->screen_name','$twitterInfo->followers_count','$twitterInfo->friends_count','$twitterInfo->profile_image_url','$twitterInfo->profile_banner_url','$differenceInFollowers')";
runQuery($sql, true);
$sql = "UPDATE twitter_accounts
SET name ='".$twitterInfo->name."', profile_image_url ='".
$twitterInfo->profile_image_url."' , profile_banner_url ='".$twitterInfo->profile_banner_url."'
WHERE screen_name = '".$twitterInfo->screen_name."';";
echo $sql;
runQuery($sql, true);
} else {
$sql = "UPDATE twitter_data SET name ='".$twitterInfo->name."',date='".$date."',twitter_dbupdates_id='".$groupdId."', display_name = '".$twitterInfo->screen_name."',followers ='".$twitterInfo->followers_count.
"', following ='".$twitterInfo->friends_count."' , profile_image_url ='".
$twitterInfo->profile_image_url."' , profile_banner_url ='".$twitterInfo->profile_banner_url."', "
. "followers_today_count ='".$differenceInFollowers."'
WHERE display_name = '".$twitterInfo->screen_name."';";
runQuery($sql, true);
$sql = "UPDATE twitter_accounts
SET name ='".$twitterInfo->name."', profile_image_url ='".
$twitterInfo->profile_image_url."' , profile_banner_url ='".$twitterInfo->profile_banner_url."'
WHERE screen_name = '".$twitterInfo->screen_name."';";
echo $sql;
runQuery($sql, true);
}
}
////-------------------------------------------------------------------------------
//Name: insertTwitterRank
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function insertTwitterRank($currentGroupRunId)
{
global $conn;
global $categoryIdList;
$sql = "SELECT id FROM sportssocialrank.category_details";
$result = $conn->query($sql);
if ($result->num_rows > 0) {
while ($row = $result->fetch_assoc()) {
$categoryIdList[] = $row['id'];
}
foreach ($categoryIdList as $categoryId) {
$sql = "SELECT twitter_data.twitter_accounts_id FROM accounts
INNER JOIN accounts_category ON accounts.id = accounts_category.accounts_id
INNER JOIN category_details ON category_details.id = accounts_category.category_details_id
INNER JOIN twitter_accounts ON twitter_accounts.accounts_id = accounts.id
INNER JOIN twitter_data ON twitter_data.twitter_accounts_id = twitter_accounts.id
where accounts_category.category_details_id ='".$categoryId."' ORDER BY followers DESC LIMIT 0, 5000";
$result = $conn->query($sql);
$i = 0;
while ($row = $result->fetch_assoc()) {
$i++;
$twitterAccounts_id = $row['twitter_accounts_id'];
//Get Rank from a day ago
$sql = "select * from twitter_rank_archive tr inner join
twitter_dbupdates td on tr.twitter_dbupdates_id = td.id
where tr.twitter_accounts_id = '".$twitterAccounts_id."' and tr.category_details_id = '".$categoryId."'and date BETWEEN DATE_SUB(NOW(), INTERVAL 1 DAY) AND NOW() order by date asc limit 1";
$rankings = runQuery($sql, false);
$rankOneDayAgo = $rankings['rank'];
//Get Rank from a week ago
$sql = "select * from twitter_rank_archive tr inner join
twitter_dbupdates td on tr.twitter_dbupdates_id = td.id
where tr.twitter_accounts_id = '".$twitterAccounts_id."' and tr.category_details_id = '".$categoryId."'
and date BETWEEN DATE_SUB(NOW(), INTERVAL 7 DAY) AND NOW() order by date asc limit 1";
$rankings = runQuery($sql, false);
$rankOneWeekAgo = $rankings['rank'];
//Get Rank for 30 days
$sql = "select * from twitter_rank_archive tr inner join
twitter_dbupdates td on tr.twitter_dbupdates_id = td.id
where tr.twitter_accounts_id = '".$twitterAccounts_id."' and tr.category_details_id = '".$categoryId."'
and date BETWEEN DATE_SUB(NOW(), INTERVAL 30 DAY) AND NOW() order by date asc limit 1";
$rankings = runQuery($sql, false);
$rankOneMonthAgo = $rankings['rank'];
//Get Rank from a year ago
$sql = "select * from twitter_rank_archive tr inner join
twitter_dbupdates td on tr.twitter_dbupdates_id = td.id
where tr.twitter_accounts_id = '".$twitterAccounts_id."' and tr.category_details_id = '".$categoryId."'
and and date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND NOW() order by date asc limit 1";
$rankings = runQuery($sql, false);
$rankOneYearAgo = $rankings['rank'];
$sql = "select * from twitter_rank_archive where twitter_accounts_id = '".$twitterAccounts_id."' and tr.category_details_id = '".$categoryId."'";
$rankId = runQuery($sql, false);
$idRanking = $rankId['id'];
if ($idRanking == null) {
$sql = "INSERT INTO twitter_rank (rank, twitter_accounts_id, twitter_dbupdates_id, category_details_id,rank_day_change,rank_week_change,rank_month_change,rank_year_change)" . "VALUES "
. "('$i','$twitterAccounts_id', '$currentGroupRunId','$categoryId','$rankOneDayAgo','$rankOneWeekAgo','$rankOneMonthAgo','$rankOneYearAgo')";
runQuery($sql, true);
} else {
$sql = "UPDATE twitter_rank SET rank ='".$i."',twitter_dbupdates_id='".$currentGroupRunId."', rank_day_change ='".$rankOneDayAgo.
"', rank_week_change ='".$rankOneWeekAgo."' , rankOneMonthAgo ='".
$rankOneMonthAgo."' , rankOneYearAgo ='".$rankOneYearAgo."'
WHERE twitter_account_id = '".$twitterAccounts_id."' and category_details_id = '".$categoryId."';";
runQuery($sql, true);
}
$sql = "INSERT INTO twitter_rank_archive (rank, twitter_accounts_id, twitter_dbupdates_id, category_details_id,rank_day_change,rank_week_change,rank_month_change,rank_year_change)" . "VALUES "
. "('$i','$twitterAccounts_id', '$currentGroupRunId','$categoryId','$rankOneDayAgo','$rankOneWeekAgo','$rankOneMonthAgo','$rankOneYearAgo')";
runQuery($sql, true);
}
}
}
}
////-------------------------------------------------------------------------------
//Name: getDayFollowerCount
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getDayFollowerCount($followersAtCurrentTime, $teamDisplayName)
{
$date = getDateShort();
$sql = "SELECT followers FROM twitter_data_archive WHERE date BETWEEN '".$date." 00:00:00' AND '". $date ." 23:59:59' and display_name= '". $teamDisplayName. "' ORDER BY id ASC LIMIT 1;";
$row = runQuery($sql, false);
$startOfDayFollowers = $row['followers'];
if ($startOfDayFollowers== null) {
return null;
}
$differenceInFollowers = $followersAtCurrentTime -$startOfDayFollowers;
return $differenceInFollowers;
}
////-------------------------------------------------------------------------------
//Name: getUsers
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getUsers($currentStartId, $currentEndId, $numUsers)
{
global $conn;
$sql = "SELECT MIN(id) FROM sportssocialrank.twitter_accounts";
$row = runQuery($sql, false);
$minId = $row['MIN(id)'];
$sql = "SELECT MAX(id) FROM sportssocialrank.twitter_accounts";
$row = runQuery($sql, false);
$maxId = $row['MAX(id)'];
if ($currentStartId>$currentEndId) {
$sql = "SELECT screen_name FROM twitter_accounts where (id BETWEEN '$currentStartId' AND '$numUsers') or (id BETWEEN 1 and '$currentEndId')";
} else {
$sql = "SELECT screen_name FROM twitter_accounts where id BETWEEN '$currentStartId' AND '$currentEndId'";
}
$result = $conn->query($sql);
if ($result->num_rows > 0) {
while ($row = $result->fetch_assoc()) {
$usersArray[] = $row['screen_name'];
}
} else {
}
$str = implode(", ", $usersArray);
return $usersArray;
}
function db()
{
global $conn;
$usersArray = array();
$servername = "172.16.17.32:3306";
$username = "SportsSocialRank";
$password = "<PASSWORD>!";
$dbname = "sportssocialrank";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
return $conn;
}
////-------------------------------------------------------------------------------
//Name: getNumUsers
//Description: Get number of users
//SQL Query: SELECT COUNT(*) FROM users;"
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getNumUsers()
{
$sql = "SELECT MAX(id) FROM sportssocialrank.twitter_accounts";
$row = runQuery($sql, false);
$maxId = $row['MAX(id)'];
return $maxId;
}
////-------------------------------------------------------------------------------
//Name: getDateTime
//Description: Gets Current date aand time in this format '2018-10-19 15:03:19'
////-------------------------------------------------------------------------------
function getDateTime()
{
date_default_timezone_set('America/Chicago');
$dateTime = date('Y-m-d G:i:s', time());
return $dateTime;
}
function getDateShort()
{
date_default_timezone_set('America/Chicago');
$date = date('Y-m-d');
return $date;
}
////-------------------------------------------------------------------------------
//Name: getLastRunEndId
//Description: Queries database to find where the last run ended
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getLastRunEndId()
{
$sql = "SELECT * FROM sportssocialrank.twitter_dbupdates ORDER BY id desc limit 1;";
$row = runQuery($sql, false);
$endId = $row['end_id'];
return $endId;
}
////-------------------------------------------------------------------------------
//Name: getStartId
//Description: Finds which Id the new run should start on
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getStartId($LastRunEndId, $numUsers)
{
//Check if Users less then 900, if so start start at 0
if ($numUsers < 900) {
$currentStartId = 1;
return $currentStartId;
}
//Check if Run will not reach end of Users
elseif ($LastRunEndId + 900 < $numUsers) {
$currentStartId = $LastRunEndId + 1;
return $currentStartId;
}
//If Run reaches end of Users
else {
$currentStartId = $LastRunEndId + 1;
return $currentStartId;
}
}
////-------------------------------------------------------------------------------
//Name: getEndId
//Description: Finds which Id the new run should end on
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getEndId($LastRunEndId, $numUsers, $currentStartId)
{
//Check if Users less then 900, if so start start at 0
if ($numUsers < 900) {
$currentEndId = $numUsers;
return $currentEndId;
}
//Check if Run will not reach end of Users
elseif ($LastRunEndId + 900 < $numUsers) {
$currentEndId = $currentStartId + 900;
return $currentEndId;
}
//If Run reaches end of Users
else {
$numDistance = $numUsers - $currentStartId;
$currentEndId = 900 - $numDistance;
return $currentEndId;
}
}
function addTwitterDbUpdate($dateTime, $currentEndId, $numUsers, $currentStartId)
{
$sql = "INSERT INTO twitter_dbupdates (date, start_id, end_id, total_users)" . "VALUES "
. "('$dateTime','$currentStartId', '$currentEndId','$numUsers')";
runQuery($sql, true);
}
function runQuery($sql, $Insert)
{
// print $sql . PHP_EOL;
global $conn; // Now all instances where the function refers to $x will refer to the GLOBAL version of $x, **not** just $x inside the function itself
$result = $conn->query($sql);
if ($Insert != true) {
$row = mysqli_fetch_array($result);
return $row;
}
}
<file_sep><?php
class Users_model extends CI_Model
{
public function __construct()
{
$this->load->database();
}
public function getLatest($category)
{
if ($category=="cfb") {
$category="College Football";
}
if ($category=="nflplayers") {
$category="NFL Player";
}
if ($category=="premierleague") {
$category="Premier League";
}
if ($category=="FCSFootball") {
$category="FCS Football";
}
if ($category=="collegebasketball") {
$category="College Basketball";
}
$this->db->select('*');
$query = $this->db->query("
SELECT acc.name, td.followers,td.following,td.followers_today_count, tc.screen_name, tc.profile_image_url
FROM accounts_category c
INNER JOIN accounts acc
on acc.id = c.accounts_id
INNER JOIN twitter_accounts tc
on acc.id = tc.accounts_id
INNER JOIN twitter_data td
on tc.id = td.twitter_accounts_id
INNER JOIN category_details cd
on c.category_details_id = cd.id
WHERE cd.name = '$category'
order by td.followers desc;");
return $query->result();
}
public function getTopFive($category)
{
if ($category=="cfb") {
$category="College Football";
}
if ($category=="nflplayers") {
$category="NFL Player";
}
if ($category=="premierleague") {
$category="Premier League";
}
$this->db->select('*');
// $query = $this->db->query("
// SELECT tt.*,users.Category
// FROM twitter tt
// INNER JOIN users
// ON Twitter_username = display_name where users.category ='" . $category ."'
// ORDER
// BY followers desc limit 5;");
$query = $this->db->query("
SELECT acc.name, td.followers,td.following,td.followers_today_count, tc.screen_name, tc.profile_image_url
FROM accounts_category c
INNER JOIN accounts acc
on acc.id = c.accounts_id
INNER JOIN twitter_accounts tc
on acc.id = tc.accounts_id
INNER JOIN twitter_data td
on tc.id = td.twitter_accounts_id
INNER JOIN category_details cd
on c.category_details_id = cd.id
WHERE cd.name = '$category'
ORDER
BY followers desc limit 5;");
return $query->result();
}
}
<file_sep>
<body id="page-top">
<!-- Navigation -->
<nav class="navbar navbar-expand-lg navbar-light fixed-top" id="mainNav">
<div class="container">
<a class="navbar-brand js-scroll-trigger" href="#page-top">Sports Social Rank</a>
<button class="navbar-toggler navbar-toggler-right" type="button" data-toggle="collapse" data-target="#navbarResponsive" aria-controls="navbarResponsive" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarResponsive">
<ul class="navbar-nav ml-auto">
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#about">About</a>
</li>
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#services">Why Sports Social Rank</a>
</li>
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#demo">Demo</a>
</li>
</ul>
</div>
</div>
</nav>
<header class="masthead text-center text-white d-flex">
<div class="container my-auto">
<div class="row">
<div class="col-lg-10 mx-auto">
<h1 class="text-uppercase">
<strong style="background-color: black;+">Gain <span style="color:#f05f40;"> Insights</span> into sports social media</strong>
</h1>
<hr>
</div>
<div class="col-lg-8 mx-auto">
<!-- <p class="text-faded mb-5">See not only where your team stands but see every team across every major sports league</p> -->
<a class="btn btn-primary btn-xl js-scroll-trigger" href="#about">Find Out how</a>
</div>
</div>
</div>
</header>
<div class="modal" id="exampleModal" tabindex="100000" role="dialog" aria-labelledby="exampleModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Modal title</h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
...
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
<button type="button" class="btn btn-primary">Save changes</button>
</div>
</div>
</div>
</div>
<section class="bg-primary" id="about">
<div class="container">
<div class="row">
<div class="col-lg-8 mx-auto text-center">
<h2 class="section-heading text-white">We collect twitter data on players and teams accounts from some of the top leagues</h2>
<hr class="light my-4">
<p class="text-faded mb-4">Choose a league to explore some of our data</p>
<ul class="nav nav-tabs" id="myTab" role="tablist">
<li class="nav-item">
<a style="color:white;"class="nav-link active" id="cfb-tab" data-toggle="tab" href="#cfb" role="tab" aria-controls="cfb-tab" aria-selected="true">College Football</a>
</li>
<li class="nav-item">
<a style="color:white;" class="nav-link" id="nfl" data-toggle="tab" href="#nfl-tab" role="tab" aria-controls="nfl-tab" aria-selected="false">NFL</a>
</li>
<li class="nav-item">
<a style="color:white;" class="nav-link" id="contact-tab" data-toggle="tab" href="#nba-tab" role="tab" aria-controls="contact" aria-selected="false">NBA</a>
</li>
<li class="nav-item">
<a style="color:white;" class="nav-link" id="contact-tab" data-toggle="tab" href="#mlb-tab" role="tab" aria-controls="contact" aria-selected="false">MLB</a>
</li>
<li class="nav-item">
<a style="color:white;" class="nav-link" id="contact-tab" data-toggle="tab" href="#mls-tab" role="tab" aria-controls="contact" aria-selected="false">MLS</a>
</li>
<li class="nav-item">
<a style="color:white;" class="nav-link" id="contact-tab" data-toggle="tab" href="#nflplayers-tab" role="tab" aria-controls="contact" aria-selected="false">NFL Players</a>
</li>
<li class="nav-item">
<a style="color:white;" class="nav-link" id="contact-tab" data-toggle="tab" href="#premierleague-tab" role="tab" aria-controls="contact" aria-selected="false">Premier League</a>
</li>
</ul>
<div class="tab-content" id="myTabContent">
<div class="tab-pane fade show active" id="cfb" role="tabpanel" aria-labelledby="cfb-tab">
<table id="cfb-table" class="cfbtable table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
<div class="tab-pane fade" id="nfl-tab" role="nfl-tab" aria-labelledby="nfl-tab">
<table id="nfl-table" class="table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
<div class="tab-pane fade" id="nba-tab" role="tabpanel" aria-labelledby="contact-tab"><table id="nba-table" class="table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table></div>
<div class="tab-pane fade" id="mlb-tab" role="tabpanel" aria-labelledby="contact-tab"><table id="mlb-table" class="table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table></div>
<div class="tab-pane fade" id="mls-tab" role="tabpanel" aria-labelledby="contact-tab"><table id="mls-table" class="table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table></div>
<div class="tab-pane fade" id="nflplayers-tab" role="tabpanel" aria-labelledby="contact-tab"><table id="nflplayers-table" class="table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table></div>
<div class="tab-pane fade" id="premierleague-tab" role="tabpanel" aria-labelledby="contact-tab"><table id="premierleague-table" class="table table-bordered table-striped table-hover" style="background-color:white; width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table></div>
</div>
</br>
</br>
<a class="btn btn-light btn-xl js-scroll-trigger" href="#demo">Sign up For Demo</a>
</div>
</div>
</div>
</section>
<section id="services">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">Why Sports Social Rank?</h2>
<hr class="my-4">
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fas fa-4x fa-gem text-primary mb-3 sr-icon-1"></i>
<h3 class="mb-3">Know your value</h3>
<p class="text-muted mb-0">We allow you to see where your social media accounts stands against others in your league and all sports.</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fas fa-4x fa-chart-line text-primary mb-3 sr-icon-2"></i>
<h3 class="mb-3">Know when someone goes viral</h3>
<p class="text-muted mb-0">Track viral trends in sports.</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fas fa-4x fa-chart-pie text-primary mb-3 sr-icon-3"></i>
<h3 class="mb-3">Competitor Insight</h3>
<p class="text-muted mb-0">Gain insight into your competitions content and growth patterns.</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fas fa-4x fa-cogs text-primary mb-3 sr-icon-4"></i>
<h3 class="mb-3">Custom Reports</h3>
<p class="text-muted mb-0">Create custom reports around data being collected.</p>
</div>
</div>
</div>
</div>
</section>
<section id="demo" class="bg-dark text-white">
<div class="container text-center">
<h2 class="mb-4">Sign up for a Free Demo and Walkthrough</h2>
<form id="demo-form">
</br>
</br>
<div class="row">
<div class="col">
<input name="first-name" class="form-control" placeholder="<NAME>" value="">
</div>
<div class="col">
<input type="text" name="last-name" class="form-control" placeholder="<NAME>">
</div>
</div>
</br> </br>
<div class="row">
<div class="col">
<input type="text" name="email" class="form-control" placeholder="Email">
</div>
<div class="col">
<input type="tel" name="phone" class="form-control" placeholder="Phone">
</div>
</div>
</br> </br>
<div class="row">
<div class="col">
<input type="text" name="organization"class="form-control" placeholder="organization">
</div>
<div class="col">
<input type="text" name="job-title"class="form-control" placeholder="Job Title">
</div>
</div>
</br> </br>
<button type="submit" class="btn btn-success">Submit <span class="fa fa-arrow-right"></span></button>
<p id="success" style="display:none; color:green; padding-top:15px;">Successfully Submitted!</p>
</form>
</div>
</section>
<script
src="https://code.jquery.com/jquery-3.3.1.min.js"
integrity="<KEY>
crossorigin="anonymous"></script>
<script>
$(document).ready(function(){
$('a[data-toggle="tab"]').on('shown.bs.tab', function(e){
$($.fn.dataTable.tables(true)).DataTable()
.columns.adjust()
.fixedColumns().relayout();
});
});
$(function(){
$("#demo-form").submit(function(){
dataString = $("#demo-form").serialize();
$.ajax({
type: "POST",
url: "<?php echo base_url(); ?>/FormSubmission/demoform",
data: dataString,
success: function(data){
if(data != "1"){
alert('Error Submiting');
}
else {
alert('Successfully Submitted!');
location.reload();
}
}
});
return false; //stop the actual form post !important!
});
});
var uri = window.location.toString();
if (uri.indexOf("?") > 0) {
var clean_uri = uri.substring(0, uri.indexOf("?"));
window.history.replaceState({}, document.title, clean_uri);
}
</script>
<!-- Bootstrap core JavaScript -->
<script src="<?php echo base_url(); ?>assets/vendor/jquery/jquery.min.js"></script>
<script src="<?php echo base_url(); ?>assets/vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Plugin JavaScript -->
<script src="<?php echo base_url(); ?>assets/vendor/jquery-easing/jquery.easing.min.js"></script>
<script src="<?php echo base_url(); ?>assets/vendor/scrollreveal/scrollreveal.min.js"></script>
<script src="<?php echo base_url(); ?>assets/vendor/magnific-popup/jquery.magnific-popup.min.js"></script>
<!-- Custom scripts for this template -->
<script src="<?php echo base_url(); ?>assets/js/creative.min.js"></script>
</body>
</html>
<file_sep><?php
class UserRank extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->helper('url_helper');
$this->load->model('UserRank_model');
$this->load->model('User_model');
$this->load->model('Category_model');
}
public function getCurrentRank($user)
{
$accountId = $this->User_model->getUserId($user);
$users = $this->UserRank_model->getAllRanks($accountId);
}
public function getYesterdayRank($user)
{
$accountId = $this->User_model->getUserId($user);
$users = $this->UserRank_model->getAllRanks($accountId);
}
}
<file_sep><?php
class Welcome extends CI_Controller {
public function __construct() {
parent::__construct();
$this->load->database();
$this->load->helper('url_helper');
$this->load->model('User_model');
}
public function welcome(){
echo 'welcome user';
}
}
<file_sep>
<!-- End Navbar -->
<div class="content">
<div class="container-fluid">
<div class="alert alert-block">
<a class="close" data-dismiss="alert" href="#">×</a>
<h4 class="alert-heading">What is Sports Social Rank?</h4>
Sports Social Rank is a ranking of sports teams and players based on twitter followers being updated
every 15 minutes.
</br></br><em>Followers Today</em> is followers gained since 12:00 am central time of the current day
</div>
<div class="dropdown">
<button class="btn btn-secondary dropdown-toggle" type="button" id="dropdownMenuButton" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
Select Category
</button>
<div class="dropdown-menu" aria-labelledby="dropdownMenuButton">
<?php foreach ($categories as $category) {
?>
<a class="dropdown-item" href="#<?php echo $category->name; ?>"><?php echo $category->name; ?></a>
<?php
} ?>
</div>
</div>
<?php foreach ($categories as $category) {
?>
<div class="col-lg-12 col-md-12">
<div class="card">
<div id="<?php echo $category->name; ?>" class="card-header card-header-warning">
<h4 class="card-title"><?php echo $category->name; ?></h4>
</div>
<div class="card-body table-responsive">
<?php if ($category->name == "College Football") {
$rank[1] = "cfb";
}
if ($category->name == "College Football") {
$category->name = "cfb";
}
if ($category->name == "NFL Player") {
$category->name = "nflplayer";
}
if ($category->name == "Premier League") {
$category->name = "premierleague";
}
if ($category->name == "College Basketball") {
$category->name = "collegebasketball";
}
if ($category->name == "FCS Football") {
$category->name = "FCSFootball";
} ?>
<table id="<?php echo $category->name; ?>-accounts" class="table table-striped " style="width:100%">
<thead>
<tr>
<th>Rank</th>
<th>Name</th>
<th>Followers</th>
<th>Following</th>
<th>Followers Today</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
</div>
</div> <?php
} ?>
</div>
<file_sep><?php
class Users extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->helper('url_helper');
$this->load->model('Users_model');
}
public function get_latest($category)
{
$users = $this->Users_model->getLatest($category);
echo json_encode($users);
exit();
}
public function getTopFive($category)
{
$users = $this->Users_model->getTopFive($category);
echo json_encode($users);
exit();
}
}
<file_sep><?php
class Accounts extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->helper('url_helper');
$this->load->model('Users_model');
$this->load->model('Accounts_model');
$this->load->library('ion_auth');
$this->ion_auth->user();
}
public function insertTwitterList()
{
$slug = $this->input->post('slug');
$owner_screen_name = $this->input->post('owner_screen_name');
$category = $this->input->post('category');
$this->load->model('Accounts_model');
$this->Accounts_model->getTwitterList($slug, $owner_screen_name, $category);
// redirect('/dashboard/accounts', 'refresh');
}
public function getAllAccounts()
{
$this->load->model('Accounts_model');
$accountsArray = $this->Accounts_model->getAllAccounts();
return $accountsArray;
}
public function getAccountsRecentlyAdded()
{
$this->load->model('Accounts_model');
$accountsArray = $this->Accounts_model->getRecentlyAdded();
print_r($accountsArray);
return $accountsArray;
}
}
<file_sep><?php
class User extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->model('User_model');
$this->load->helper('url_helper');
$this->load->model('Users_model');
$this->load->model('Accounts_model');
$this->load->model('UserRank_model');
$this->load->model('TwitterAccounts_model');
$this->load->model('Category_model');
$this->load->helper('html');
$this->load->library('ion_auth');
$this->ion_auth->user();
}
public function _remap($param)
{
if ($param=="get_team") {
$this-> get_team();
} elseif ($param=="get_team_chart") {
$this-> get_team_chart();
} else {
$this->index($param);
}
}
public function index($user)
{
// if ($this->ion_auth->logged_in()) {
$accountName=$this->User_model->getAccountNameFromTwitter($user);
$data['chartData']=$this->TwitterAccounts_model->getFollowers(3);
$data['rankings']=$this->UserRank_model->getAllRanks($accountName);
$this->load->view('admin/header', $data);
$data['userInfo']=$this->TwitterAccounts_model->getAccountInfoTwitter($user);
$this->load->view('members/user', $data);
$this->load->view('admin/footer');
// } else {
// redirect('auth/login', 'refresh');
// }
}
public function get_users()
{
$users = $this->User_model->getAllUsers();
echo json_encode($users);
exit();
}
//Ajax for Returning all data on a team
public function get_team()
{
$draw = intval($this->input->get("draw"));
$start = intval($this->input->get("start"));
$length = intval($this->input->get("length"));
if (isset($_POST['team'])) {
$team = $_POST['team'];
// $login = $values['login'];
// ...
}
$query = $this->db->query("
SELECT * FROM twitter_archive where display_name = '" . $team ."'
ORDER
BY date desc ;");
$data = [];
foreach ($query->result() as $key => $r) {
$followers = (int) $r->followers;
$newfollowers = number_format($followers);
$following = (int) $r->following;
$newfollowing = number_format($following);
$followerstoday = (int) $r->followers_today_count;
$followerstoday = number_format($followerstoday);
$baseurl = base_url();
$date = $r->date;
$groupRunId = $r->group_run_id;
$growthRate = $followerstoday / $followers;
$data[] = array(
$newfollowers,
$newfollowing,
$followerstoday,
$date
,
$groupRunId);
}
$result = array(
"draw" => $draw,
"recordsTotal" => $query->num_rows(),
"recordsFiltered" => $query->num_rows(),
"data" => $data
);
echo json_encode($result);
exit();
}
//Ajax for Returning all data on a team
public function get_team_chart()
{
if (isset($_POST['team'])) {
$team = $_POST['team'];
// $login = $values['login'];
// ...
}
$query = $this->db->query("
SELECT * FROM twitter_archive where display_name = '" . $team ."'
ORDER
BY date asc;");
$data = [];
$responce->cols[] = array(
"id" => "",
"label" => "Topping",
"pattern" => "",
"type" => "string"
);
$responce->cols[] = array(
"id" => "",
"label" => "followers",
"pattern" => "",
"type" => "number"
);
foreach ($query->result() as $key => $r) {
$responce->rows[]["c"] = array(
array(
"v" => "$r->date",
"f" => null
) ,
array(
"v" => (int) $r->followers,
"f" => null
)
);
}
echo json_encode($responce);
exit();
}
}
<file_sep><?php
class TwitterAccounts_model extends CI_Model
{
public function __construct()
{
$this->load->database();
}
public function getAccountInfo($accountName)
{
$accountId = $this->User_model->getUserId($accountName);
$this->db->where('accounts_id', $accountId);
$query = $this->db->get('twitter_accounts');
$result = $query->row();
return $result;
}
public function getAccountInfoTwitter($accountName)
{
$this->db->where('screen_name', $accountName);
$query = $this->db->get('twitter_accounts');
$result = $query->row();
return $result;
}
public function getFollowers($twitterAccountId)
{
$sql = "select * from twitter_data_archive where twitter_accounts_id = '" . $twitterAccountId ."' order by date desc";
$query = $this->db->query($sql);
foreach ($query->result() as $row) {
$followers = $row->followers;
$date = $row->date;
}
return json_encode($query->result());
}
}
<file_sep>memory_limit = 128M
max_input_vars = 2000
upload_max_filesize = 64M
post_max_size = 64M
max_execution_time = 120
magic_quotes_gpc = Off
magic_quotes_runtime = Off
magic_quotes_sybase = Off<file_sep><?php
class UserRank_model extends CI_Model
{
public function __construct()
{
$this->load->database();
$this->load->model('Category_model');
$this->load->model('User_model');
}
public function getRank($twitterId, $categoryId)
{
$sql = "SELECT * FROM twitter_rank where twitter_accounts_id = '".$twitterId."' and category_details_id = '".$categoryId."' order by id desc limit 1";
$query = $this->db->query($sql);
$row = $query->row();
return $query->row()->rank;
}
public function getRankRange($twitterId, $categoryId, $days)
{
$sql = "SELECT rank FROM twitter_rank tr inner join twitter_dbupdates td on td.id = tr.twitter_dbupdates_id where date >= (CURDATE() - INTERVAL '".$days."' DAY)
AND (tr.twitter_accounts_id = '".$twitterId."' and tr.category_details_id = '".$categoryId."') order by td.id desc limit 1";
$query = $this->db->query($sql);
$row = $query->row();
return $query->row()->rank;
}
public function getAllRanks($accountName)
{
$accountId = $this->User_model->getUserId($accountName);
$categoryId = $this->Category_model->getAccountsCategories($accountId);
$twitterId = $this->User_model->getTwitterID($accountId);
$ranks = array();
$i=0;
foreach ($categoryId as $row) {
$rank = $this->UserRank_model->getRank($twitterId, $row->category_details_id);
$ranks[$i][0] = $rank;
$categoryId = $row->category_details_id;
$categoryName = $this->Category_model->getCategoryName($categoryId);
$ranks[$i][1] = $categoryName;
$categoryCount = $this->Category_model->categoryCount($categoryId);
$ranks[$i][2] = $categoryCount;
$i++;
}
return $ranks;
}
}
<file_sep><!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/dt/dt-1.10.13/datatables.min.css"/>
<script type="text/javascript" src="https://cdn.datatables.net/v/dt/dt-1.10.13/datatables.min.js"></script>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script>
// Load the Visualization API and the piechart package.
google.charts.load('current', {'packages':['corechart']});
// Set a callback to run when the Google Visualization API is loaded.
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var jsonData = $.ajax({
type: "POST",
url: "get_team_chart",
data: {
team:'<?php echo $team ?>'
},
dataType:"json", async: false
}).responseText;
// Create our data table out of JSON data loaded from server.
var data = new google.visualization.DataTable(jsonData);
// Instantiate and draw our chart, passing in some options.
var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
chart.draw(data, {width: 900, height: 500});
}
</script>
<style>
h1 {
text-align: center;
}
</style>
</head>
<style>
.body{
padding-top: 0px;
}
</style>
<body style="padding-top:0px;">
<div class="container">
<h1>All Time</h1>
<div id="chart_div"></div>
<h1>@<?php echo $team ?></h1>
<table id="teamTable" class="table table-bordered table-striped table-hover" style="width:100%">
<thead>
<tr>
<th>Followers</th>
<th>Following</th>
<th>Followers Today</th>
<th>Date</th>
<th>Group Run ID</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
</body>
<script type="text/javascript">
$(document).ready(function() {
$('#teamTable').DataTable({
// This shows just the table,
responsive: true,
"deferRender": true,
paging: false,
"bAutoWidth": false,
"ajax": {
'type': 'POST',
url : "get_team",
'data': {
team: '<?php echo $team ?>',
}
},
"order": [[ 3, "desc" ]],
}) ;
});
</script>
</html>
<file_sep><?php
class Category extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->helper('url_helper');
$this->load->model('Category_model');
$this->load->model('DBUpdate_model');
}
public function getAccountsCategories($accountId)
{
$this->Category_model->getAccountsCategories($accountId);
}
public function insertCategory($category)
{
$categoryExist = $this->Category_model->categoryExist($category);
if ($categoryExist == 0) {
$this->Category_model->addNewCategory($category);
}
}
public function removeCategory($category)
{
return $this->Category_model->removeCategory($category);
}
public function categoryExist($category)
{
return $this->Category_model->removeCategory($category);
}
public function addNewAccountCategory($accountId, $categoryId)
{
return $this->Category_model->addNewAccountCategory($accountId, $categoryId);
}
public function getCategoryId($category)
{
return $this->Category_model->getCategoryId($category);
}
}
<file_sep><?php
class Dashboard extends CI_Controller
{
public function __construct()
{
parent::__construct();
$this->load->database();
$this->load->helper('url_helper');
$this->load->model('Users_model');
$this->load->model('Accounts_model');
$this->load->model('UserRank_model');
$this->load->model('TwitterAccounts_model');
$this->load->model('Category_model');
$this->load->helper('html');
$this->load->library('ion_auth');
$this->ion_auth->user();
}
public function home()
{
// if ($this->ion_auth->logged_in()) {
// if ($this->ion_auth->is_admin()) {
$this->load->view('admin/header');
$data['categories']=$this->Category_model->getAllCategoriesId();
$this->load->view('admin/dashboard', $data);
$this->load->view('admin/footer', $data);
// } else {
// $data['user']=$this->ion_auth->user()->row();
// $user = $this->ion_auth->user()->row();
// $accountName = $user->company;
// $data['rankings']=$this->UserRank_model->getAllRanks($accountName);
// $this->load->view('members/header');
// $data['userInfo']=$this->TwitterAccounts_model->getAccountInfo($accountName);
// $this->load->view('members/dashboard', $data);
// $this->load->view('members/footer');
// }
// } else {
// redirect('auth/login', 'refresh');
// }
}
public function user($user)
{
if ($this->ion_auth->logged_in()) {
$accountName = $user;
$data['rankings']=$this->UserRank_model->getAllRanks($accountName);
$this->load->view('members/header');
$data['userInfo']=$this->TwitterAccounts_model->getAccountInfoTwitter($accountName);
$this->load->view('members/user', $data);
$this->load->view('members/footer');
} else {
redirect('auth/login', 'refresh');
}
}
public function accounts()
{
if ($this->ion_auth->logged_in()) {
if ($this->ion_auth->is_admin()) {
$this->load->view('admin/header');
$data['accountsArray'] = $this->Accounts_model->getRecentlyAdded();
$this->load->view('admin/accounts', $data);
} else {
$this->load->view('admin/header');
$data['accountsList'] = $this->Accounts_model->getRecentlyAdded();
$this->load->view('admin/accounts', $data);
}
} else {
redirect('auth/login', 'refresh');
}
}
}
<file_sep><head>
<meta charset="utf-8" />
<link rel="apple-touch-icon" sizes="76x76" href="../assets/img/apple-icon.png">
<link rel="icon" type="image/png" href="../assets/img/favicon.png">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" />
<title>
Sports Social Rank
</title>
<meta content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0, shrink-to-fit=no' name='viewport' />
<!-- Fonts and icons -->
<link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700|Roboto+Slab:400,700|Material+Icons" />
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/latest/css/font-awesome.min.css">
<!-- CSS Files -->
<link href="../assets/css/material-dashboard.css?v=2.1.1" rel="stylesheet" />
<!-- CSS Just for demo purpose, don't include it in your project -->
<link href="../assets/demo/demo.css" rel="stylesheet" />
<!--Load the AJAX API-->
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script type="text/javascript" src="js/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.5.0/Chart.js"></script>
<script type="text/javascript" src="js/app.js"></script>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-64395606-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-64395606-2');
</script>
</head>
<body class="">
<div class="wrapper ">
<div class="sidebar" data-color="purple" data-background-color="white" data-image="../assets/img/sidebar-1.jpg">
<!--
Tip 1: You can change the color of the sidebar using: data-color="purple | azure | green | orange | danger"
Tip 2: you can also add an image using data-image tag
-->
<div class="logo">
<a href="<?=base_url()?>dashboard" class="simple-text logo-normal">
Sports Social Rank
</a>
</div>
<div class="sidebar-wrapper">
<ul class="nav">
<li class="nav-item ">
<a class="nav-link" href="<?=base_url()?>">
<i class="material-icons">dashboard</i>
<p>Home</p>
</a>
</li>
<li class="nav-item active">
<a class="nav-link" href="<?=base_url()?>about">
<i class="material-icons">people</i>
<p>About Us</p>
</a>
</li>
<!-- <li class="nav-item">
<a class="nav-link" href="<?=base_url()?>contact">
<i class="material-icons">chat</i>
<p>Contact Us</p>
</a>
</li> -->
<li style="padding-top:55px;"class="nav-item active-pro ">
<center><a href="https://www.samford.edu/sports-analytics/"><img style="width:100px" src="https://www.samford.edu/sports-analytics/images/logos/sports-analytics-logo.jpg"></a></center>
<p style="font-size:15px;padding-left:10px;">
Samford Center For Sports Analytics</p>
</li>
</ul>
</div>
</div>
<div class="main-panel">
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-transparent navbar-absolute fixed-top ">
<div class="container-fluid">
<div class="navbar-wrapper">
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse" aria-controls="navigation-index" aria-expanded="false" aria-label="Toggle navigation">
<span class="sr-only">Toggle navigation</span>
<span class="navbar-toggler-icon icon-bar"></span>
<span class="navbar-toggler-icon icon-bar"></span>
<span class="navbar-toggler-icon icon-bar"></span>
</button>
<div class="collapse navbar-collapse justify-content-end">
<form class="navbar-form">
</form>
<ul class="navbar-nav">
<li class="nav-item">
</li>
</ul>
</div>
</div>
</nav>
<file_sep><?php
class User_model extends CI_Model
{
public function __construct()
{
$this->load->database();
}
public function getUserId($accountName)
{
$sql = "SELECT * FROM accounts WHERE REPLACE(name, ' ', '') = REPLACE( '".$accountName."', ' ', '')";
$query = $this->db->query($sql);
$row = $query->row();
foreach ($query->result() as $row) {
return $row->id;
}
}
public function getTwitterId($accountId)
{
$sql = "SELECT * FROM twitter_accounts WHERE REPLACE(accounts_id, ' ', '') = REPLACE( '".$accountId."', ' ', '')";
$query = $this->db->query($sql);
$row = $query->row();
foreach ($query->result() as $row) {
$twitterId = $row->id;
return $twitterId;
}
}
public function getAccountNameFromTwitter($twitterScreenName)
{
$sql = "Select accounts.name from accounts inner join twitter_accounts where twitter_accounts.accounts_id = accounts.id and twitter_accounts.screen_name = '".$twitterScreenName."'";
$query = $this->db->query($sql);
$row = $query->row();
foreach ($query->result() as $row) {
$accountName = $row->name;
return $accountName;
}
}
public function isUser($user)
{
$sql = "SELECT id FROM sportssocialrank.twitter_archive where display_name = '" . $user ."' limit 1; ";
$query = $this->db->query($sql);
if ($query->num_rows() > 0) {
;
} else {
}
}
public function getAllUsers()
{
$query = $this->db->get_where('twitter');
return $query->result();
}
}
<file_sep><?php
class Team_model extends CI_Model {
public function __construct()
{
$this->load->database();
}
public function getTeamArchive($user)
{
$sql = "SELECT * FROM sportssocialrank.twitter_archive where display_name = '".$user."';";
";
$query = $this->db->query($sql);
return $query->result_array();
}
}
<file_sep><?php
//File: TwitterFolldersUpdate.php
//Decription: This will update the twitter_dbupdates every 15 minutes with 900 accounts twitter info
//Author: <NAME>
//Notes:
//*******************************************************************************
//*******************************************************************************
//Globals: $Conn - Creates Connection to database
// $numUsers - Stores number of users in usersDB
// $
$currentStartId;
$currentEndId;
$dateTime;
$conn;
run();
function run (){
twitterDBRunInfo();
twitterApiRun();
}
////-------------------------------------------------------------------------------
//Name: twitterDBRunInfo
//Description: Collects all the info needed for running the twitter api call such as which Users
//info to collect and at what date the run was done on
//Methods Called: RunQuery();
//Variables: $conn - Connection to DB. $numUsers - Number of users in db
//$dateTime - Current date and time. $LastRunEndId - Where the last run ended.
////-------------------------------------------------------------------------------
function twitterDBRunInfo (){
global $currentStartId;
global $currentEndId;
global $dateTime;
global $numUsers;
$LastRunEndId;
$numUsers;
$conn = db();
$numUsers = getNumUsers();
$dateTime = getDateTime();
$LastRunEndId = getLastRunEndId();
$currentStartId = getStartId($LastRunEndId, $numUsers);
$currentEndId = getEndId($LastRunEndId, $numUsers, $currentStartId);
addTwitterDbUpdate($dateTime, $currentEndId,$numUsers,$currentStartId);
insertTwitterArchive($twitterInfo, $differenceInFollowers);
}
function twitterApiRun(){
global $currentStartId;
global $currentEndId;
global $numUsers;
global $dateTime;
global $conn;
$usersArray = getUsers($currentStartId,$currentEndId,$numUsers);
twitterAPI($usersArray,$dateTime);
}
////-------------------------------------------------------------------------------
//Name: twitterApi
//Description: Starts calling the twitter API
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function twitterAPI($usersArray,$dateTime){
require_once('TwitterAPIExchange.php');
$settings = array(
'oauth_access_token' => "<KEY>",
'oauth_access_token_secret' => "<KEY>",
'consumer_key' => "6wR5l7KwDSDFmb6swY1seW5MP",
'consumer_secret' => "<KEY>"
);
$url = 'https://api.twitter.com/1.1/users/show.json';
$requestMethod = 'GET';
foreach ($usersArray as &$team) {
$getfield = "?screen_name=".$team;
$url = 'https://api.twitter.com/1.1/users/show.json';
$requestMethod = 'GET';
$twitter = new TwitterAPIExchange($settings);
$json = $twitter->setGetfield($getfield)
->buildOauth($url, $requestMethod)
->performRequest();
$twitterInfo = json_decode($json);
$teamDisplayName = $twitterInfo->screen_name;
$followersAtCurrentTime = $twitterInfo->followers_count;
$differenceInFollowers = getDayFollowerCount($followersAtCurrentTime,$teamDisplayName);
echo 'HHHHHHHHHHHHHH';
insertTwitterArchive($twitterInfo,$differenceInFollowers );
insertTwitter($twitterInfo, $differenceInFollowers);
}
}
////-------------------------------------------------------------------------------
//Name: insertIntoArchiveDB
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function insertTwitterArchive($twitterInfo,$differenceInFollowers){
$sql = "INSERT INTO twitter_archive (name, display_name, followers, following, profile_image_url, profile_banner_url,followers_today_count) "
. "VALUES ('$twitterInfo->name', '$twitterInfo->screen_name','$twitterInfo->followers_count','$twitterInfo->friends_count','$twitterInfo->profile_image_url','$twitterInfo->profile_banner_url','$differenceInFollowers')";
runQuery($sql, True);
}
////-------------------------------------------------------------------------------
//Name: insertTwitter
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function insertTwitter($twitterInfo,$differenceInFollowers){
$sql = "SELECT id FROM twitter WHERE display_name = '".$twitterInfo->screen_name."';";
echo $sql;
$row = runQuery($sql, False);
$id = $row['id'];
echo $id;
if($id = ' '){
$sql = "INSERT INTO twitter (name, display_name, followers, following, profile_image_url, profile_banner_url,followers_today_count) "
. "VALUES ('$twitterInfo->name', '$twitterInfo->screen_name','$twitterInfo->followers_count','$twitterInfo->friends_count','$twitterInfo->profile_image_url','$twitterInfo->profile_banner_url','$differenceInFollowers')";
runQuery($sql, True);
}
else{
$sql = "UPDATE twitter SET name ='".$twitterInfo->name."', display_name = '".$twitterInfo->screen_name."',followers ='".$twitterInfo->followers_count."' , following ='".$twitterInfo->friends_count."' , profile_image_url ='".$twitterInfo->profile_image_url."' , profile_banner_url ='".$twitterInfo->profile_banner_url."', followers_today_count ='".$differenceInFollowers."'
WHERE display_name = '".$twitterInfo->screen_name.";";
runQuery($sql, True);
}
}
////-------------------------------------------------------------------------------
//Name: getDayFollowerCount
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getDayFollowerCount($followersAtCurrentTime,$teamDisplayName){
$date = getDateShort();
$sql = "SELECT followers FROM twitter_archive WHERE date BETWEEN '".$date." 00:00:00' AND '". $date ." 23:59:59' and display_name= '". $teamDisplayName. "' ORDER BY id ASC
LIMIT 1;";
$row = runQuery($sql,false);
$startOfDayFollowers = $row['followers'];
if($startOfDayFollowers= ' '){
return null;
}
$differenceInFollowers = $followersAtCurrentTime -$startOfDayFollowers;
return $differenceInFollowers;
}
////-------------------------------------------------------------------------------
//Name: getUsers
//Description: Queries database to find which users to include in run
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getUsers($currentStartId,$currentEndId,$numUsers){
global $conn;
if($currentEndId > $currentStartId){
$sql = "SELECT Twitter_username FROM users WHERE id BETWEEN ". $currentStartId . " and ". $currentEndId;
}
else{
$sql = "SELECT Twitter_username FROM users WHERE (id BETWEEN ". $currentStartId . " and ". $numUsers . ") AND (id BETWEEN 1 and ". $currentEndId . ")";
}
$result = $conn->query($sql);
if ($result->num_rows > 0) {
while($row = $result->fetch_assoc()) {
$usersArray[] = $row['Twitter_username'];
}
} else {
echo "0 results";
}
$str = implode (", ", $usersArray);
return $usersArray;
}
function db () {
global $conn;
$usersArray = array();
$servername = "sportssocialrank.db.10366090.db2.hostedresource.net";
$username = "sportssocialrank";
$password = "<PASSWORD>";
$dbname = "sportssocialrank";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
return $conn;
}
////-------------------------------------------------------------------------------
//Name: getNumUsers
//Description: Get number of users
//SQL Query: SELECT COUNT(*) FROM users;"
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getNumUsers(){
$sql = "SELECT COUNT(*) FROM users;";
$row = runQuery($sql,false);
$numUsers = $row['COUNT(*)'];
return $numUsers;
}
////-------------------------------------------------------------------------------
//Name: getDateTime
//Description: Gets Current date aand time in this format '2018-10-19 15:03:19'
////-------------------------------------------------------------------------------
function getDateTime(){
date_default_timezone_set('America/Chicago');
$dateTime = date('Y-m-d G:i:s', time());
return $dateTime;
}
function getDateShort(){
date_default_timezone_set('America/Chicago');
$date = date('Y-m-d');
return $date;
}
////-------------------------------------------------------------------------------
//Name: getLastRunEndId
//Description: Queries database to find where the last run ended
//SQL Query: SELECT * FROM sportssocialrank.twitter_dbupdates;
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getLastRunEndId(){
$sql = "SELECT * FROM sportssocialrank.twitter_dbupdates;";
$row = runQuery($sql,false);
$endId = $row['endId'];
return $endId;
}
////-------------------------------------------------------------------------------
//Name: getStartId
//Description: Finds which Id the new run should start on
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getStartId($LastRunEndId,$numUsers){
//Check if Users less then 900, if so start start at 0
if($numUsers < 900){
$currentStartId = 1;
return $currentStartId;
}
//Check if Run will not reach end of Users
elseif($LastRunEndId + 900 < $numUsers){
$currentStartId = $LastRunEndId + 1;
return $currentStartId;
}
//If Run reaches end of Users
else{
$currentStartId = $LastRunEndId + 1;
return $currentStartId;
}
}
////-------------------------------------------------------------------------------
//Name: getEndId
//Description: Finds which Id the new run should end on
//Methods Called: RunQuery();
////-------------------------------------------------------------------------------
function getEndId($LastRunEndId,$numUsers,$currentStartId){
//Check if Users less then 900, if so start start at 0
if($numUsers < 900){
$currentEndId = $numUsers;
return $currentEndId;
}
//Check if Run will not reach end of Users
elseif($LastRunEndId + 900 < $numUsers){
$currentEndId = $currentStartId + 900;
return $currentEndId;
}
//If Run reaches end of Users
else{
$numDistance = $numUsers - $currentStartId;
$currentEndId = 900 - $numDistance;
return $currentEndId;
}
}
function addTwitterDbUpdate($dateTime, $currentEndId, $numUsers,$currentStartId){
$sql = "INSERT INTO twitter_dbupdates (date, startId, endId, totalUsers)" . "VALUES "
. "('$dateTime','$currentStartId', '$currentEndId','$numUsers')";
runQuery($sql,true);
}
function runQuery($sql,$Insert){
// echo $sql;
//echo "\n\n";
global $conn; // Now all instances where the function refers to $x will refer to the GLOBAL version of $x, **not** just $x inside the function itself
$result = $conn->query($sql);
if($Insert != True){
$row = mysqli_fetch_array($result);
return $row;
}
}
?>
<file_sep><?php
class Accounts_model extends CI_Model
{
public function __construct()
{
$this->load->database();
require_once(APPPATH.'libraries/TwitterAPIExchange.php');
$this->load->model('Category_model');
}
public function getAllAccounts()
{
$this->db->select('name');
return $query = $this->db->get('accounts');
}
public function getRecentlyAdded()
{
return $this->db->query("SELECT * FROM accounts WHERE date_added BETWEEN DATE_SUB(NOW(), INTERVAL 5 MINUTE) AND NOW()");
}
public function getTwitterList($slug, $owner_screen_name, $category)
{
$accountsList = $this->getTwitterListJSON($slug, $owner_screen_name);
$this->addNewAccountsJSON($accountsList, $category);
}
public function getTwitterListJSON($slug, $owner_screen_name)
{
$settings = array(
'oauth_access_token' => "<KEY>",
'oauth_access_token_secret' => "<KEY>",
'consumer_key' => "6wR5l7KwDSDFmb6swY1seW5MP",
'consumer_secret' => "<KEY>"
);
//Get Twitter List
$url = "https://api.twitter.com/1.1/lists/members.json";
$getfield = "?slug=".$slug."&owner_screen_name=".$owner_screen_name."&count=5000";
$requestMethod = 'GET';
$twitter = new TwitterAPIExchange($settings);
$json = $twitter->setGetfield($getfield)
->buildOauth($url, $requestMethod)
->performRequest();
$accountsList = json_decode($json);
echo "</br>";
// try {
// $arr = (array)$accountsList->errors[0]->code;
// }
// catch (Exception $e) {
// echo "";
// }
if (!empty($arr)) {
if ($accountsList->errors[0]->code = 34) {
echo $accountsList->errors[0]->message;
exit;
}
}
return $accountsList;
}
public function addNewAccountsJSON($accountsList, $category)
{
//Add New Category
$categoryId = $this->Category_model->addNewCategory($category);
//StartLooping Through JSON
$i = -1;
foreach ($accountsList->users as $key=>$users) {
$i++;
//Check To see if account exist
$accountName = $accountsList->users[$i]->name;
$accountId = $this->accountExist($accountName);
echo $i;
echo ": ";
echo $accountName;
echo "</br>";
//If account exist check twitterID
if ($accountId != false) {
$twitterId = $this->twitterAccountExist($accountId);
if ($twitterId != false) {
} else {
//Add Category to Account
$categoryAccountId = $this->Category_model->addNewAccountCategory($accountId, $categoryId);
//Add ALL CATEGORY
$categoryAccountId = $this->Category_model->addNewAccountCategory($accountId, 1);
$data = array(
'accounts_id' => $accountId,
'name' => $accountName,
'screen_name'=> $accountsList->users[$i]->screen_name,
'location'=> $accountsList->users[$i]->location,
'url '=> $accountsList->users[$i]->url,
'description'=> $accountsList->users[$i]->description,
'verified'=> $accountsList->users[$i]->verified,
'profile_banner_url'=> $accountsList->users[$i]->profile_banner_url,
'profile_image_url'=> $accountsList->users[$i]->profile_image_url
);
print_r($data);
$this->insertTwitterAccount($accountId, $data);
}
} else {
//Add Account to Accounts List
$data = array(
'name' => $accountName,
);
echo $accountName;
$this->db->insert('accounts', $data);
$accountId = $this->accountExist($accountName);
//Add Category to Account
$categoryAccountId = $this->Category_model->addNewAccountCategory($accountId, $categoryId);
//Add ALL CATEGORY
$categoryAccountId = $this->Category_model->addNewAccountCategory($accountId, 1);
//Add Account to Twitter Accounts
$data = array(
'accounts_id' => $accountId,
'name' => $accountName,
'screen_name'=> $accountsList->users[$i]->screen_name,
'location'=> $accountsList->users[$i]->location,
'url '=> $accountsList->users[$i]->url,
'description'=> $accountsList->users[$i]->description,
'verified'=> $accountsList->users[$i]->verified,
'profile_banner_url'=> $accountsList->users[$i]->profile_banner_url,
'profile_image_url'=> $accountsList->users[$i]->profile_image_url
);
print_r($data);
$this->insertTwitterAccount($accountId, $data);
}
}
}
public function accountExist($account)
{
$this->db->where('name', $account);
$query = $this->db->get('accounts');
$result = $query->row();
if ($query->num_rows() > 0) {
$accountId= $result->id;
return $accountId;
} else {
return 0;
}
}
public function twitterAccountExist($accounts_id)
{
$this->db->where('accounts_id', $accounts_id);
$query = $this->db->get('twitter_accounts');
$result = $query->row();
if ($query->num_rows() > 0) {
$id= $result->id;
echo $id;
return $id;
} else {
return false;
}
}
public function insertTwitterAccount($accounts_id, $data)
{
$twitterAccountExist = $this->twitterAccountExist($accounts_id);
if ($twitterAccountExist == false) {
$this->db->insert('twitter_accounts', $data);
}
}
}
<file_sep><div class="content">
<div class="container-fluid">
<div style="padding:50px" class="alert alert-block">
<div class="container text-center">
<h2 class="text-left mb-4">Contact Us</h2>
<form id="demo-form">
</br>
</br>
<div class="row">
<div class="col">
<input name="first-name" class="form-control" placeholder="First name" value="">
</div>
<div class="col">
<input type="text" name="last-name" class="form-control" placeholder="Last name">
</div>
</div>
</br> </br>
<div class="row">
<div class="col">
<input type="text" name="email" class="form-control" placeholder="Email">
</div>
<div class="col">
<input type="text" name="subject" class="form-control" placeholder="subject">
</div>
</div>
</br> </br>
<div class="row">
<div class="col">
<input type="text" name="organization"class="form-control" placeholder="organization">
</div>
</div>
</br> </br>
<button type="submit" class="btn btn-success">Submit <span class="fa fa-arrow-right"></span></button>
<p id="success" style="display:none; color:green; padding-top:15px;">Successfully Submitted!</p>
</form>
</div>
</section>
<script
src="https://code.jquery.com/jquery-3.3.1.min.js"
integrity="<KEY>
crossorigin="anonymous"></script>
<script>
$(document).ready(function(){
$('a[data-toggle="tab"]').on('shown.bs.tab', function(e){
$($.fn.dataTable.tables(true)).DataTable()
.columns.adjust()
.fixedColumns().relayout();
});
});
$(function(){
$("#demo-form").submit(function(){
dataString = $("#demo-form").serialize();
$.ajax({
type: "POST",
url: "<?php echo base_url(); ?>FormSubmission/demoform",
data: dataString,
success: function(data){
if(data != "1"){
alert('Error Submiting');
}
else {
alert('Successfully Submitted!');
location.reload();
}
}
});
return false; //stop the actual form post !important!
});
});
var uri = window.location.toString();
if (uri.indexOf("?") > 0) {
var clean_uri = uri.substring(0, uri.indexOf("?"));
window.history.replaceState({}, document.title, clean_uri);
}
</script>
<!-- Bootstrap core JavaScript -->
<script src="<?php echo base_url(); ?>assets/vendor/jquery/jquery.min.js"></script>
<script src="<?php echo base_url(); ?>assets/vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Plugin JavaScript -->
<script src="<?php echo base_url(); ?>assets/vendor/jquery-easing/jquery.easing.min.js"></script>
<script src="<?php echo base_url(); ?>assets/vendor/scrollreveal/scrollreveal.min.js"></script>
<script src="<?php echo base_url(); ?>assets/vendor/magnific-popup/jquery.magnific-popup.min.js"></script>
<!-- Custom scripts for this template -->
<script src="<?php echo base_url(); ?>assets/js/creative.min.js"></script>
</body>
</html>
| 7256a7952b58f631ecb02614c8fa4474d0131de5 | [
"Python",
"PHP",
"INI"
] | 33 | PHP | gchamoun/SportsSocialRank | 2a77884b42b1e6d36fcfb67244c1460d6cfcdc21 | c6625e6b872b28499fa5345d1b369e5e295b3ed2 |
refs/heads/master | <repo_name>IsharaDissanayaka/Health-Care-System-PatientManagement<file_sep>/README.md
# Health-Care-System-PatientManagement
RESTFUL API web service
<file_sep>/DB/hcs.sql
-- phpMyAdmin SQL Dump
-- version 5.0.1
-- https://www.phpmyadmin.net/
--
-- Host: 127.0.0.1
-- Generation Time: May 06, 2020 at 09:04 AM
-- Server version: 10.4.11-MariaDB
-- PHP Version: 7.4.3
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET AUTOCOMMIT = 0;
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `hcs`
--
-- --------------------------------------------------------
--
-- Table structure for table `patient`
--
CREATE TABLE `patient` (
`PatientId` int(10) NOT NULL,
`PatientName` varchar(30) NOT NULL,
`Age` int(10) NOT NULL,
`PhoneNo` varchar(15) NOT NULL,
`Email` varchar(30) NOT NULL,
`Address` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `patient`
--
INSERT INTO `patient` (`PatientId`, `PatientName`, `Age`, `PhoneNo`, `Email`, `Address`) VALUES
(34, 'Saviru', 18, '118529637', '<EMAIL>', 'Homagama'),
(35, 'Dilon', 17, '701472583', '<EMAIL>', 'Matara'),
(36, 'Lakshan', 28, '773692581', '<EMAIL>', 'Malabe'),
(58, 'Ishara', 23, '712013654', '<EMAIL>', 'Homagama'),
(60, 'Subhani', 30, '713210456', '<EMAIL>', 'Galle');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `patient`
--
ALTER TABLE `patient`
ADD PRIMARY KEY (`PatientId`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `patient`
--
ALTER TABLE `patient`
MODIFY `PatientId` int(10) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=61;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
<file_sep>/HelthCare-System/src/com/hcs/controller/PatientController.java
package com.hcs.controller;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import com.hcs.model.Patient;
import com.hcs.util.DBConnection;
public class PatientController {
private static Connection connection;
private static PreparedStatement ps;
private static ResultSet rs;
public String AddPatient(Patient patient) {
String output = "";
try {
connection = DBConnection.getConnection();
if (connection == null)
{return "Error while connecting to the database for inserting."; }
ps = connection.prepareStatement(
"INSERT INTO patient(PatientId,PatientName,Age,PhoneNo,Email,Address) "
+ " VALUES (?,?,?,?,?,?)");
ps.setInt(1, 0);
ps.setString(2, patient.getPatientName());
ps.setInt(3, patient.getAge());
ps.setInt(4, patient.getPhoneNo());
ps.setString(5, patient.getEmail());
ps.setString(6, patient.getAddress());
ps.execute();
connection.close();
output = "Inserted successfully";
}
catch (Exception e)
{
output = "Error while inserting the patient.";
System.err.println(e.getMessage());
}
return output;
}
public List<Patient> readPatients() {
List<Patient> patients = new ArrayList<>();
try {
connection = DBConnection.getConnection();
if (connection == null) {
System.err.println("connecting failed.");
}
Statement stmt = connection.createStatement();
rs = stmt.executeQuery("select * from patient");
// iterate through the rows in the result set
while (rs.next()) {
Patient pt = new Patient();
pt.setPatientId(rs.getInt("PatientId"));
pt.setPatientName(rs.getString("PatientName"));
pt.setAge(rs.getInt("Age"));
pt.setPhoneNo(rs.getInt("PhoneNo"));
pt.setEmail(rs.getString("Email"));
pt.setAddress(rs.getString("Address"));
patients.add(pt);
}
connection.close();
} catch (Exception e) {
System.err.println(e.getMessage());
}
return patients;
}
public String updatepatient(Patient patient) {
String output = "";
try {
connection = DBConnection.getConnection();
if (connection == null) {
return "Error while connecting to the database for updating.";
}
// create a prepared statement
ps = connection.prepareStatement(
"UPDATE patient SET PatientName=?,Age=?,PhoneNo=?,Email=?,Address=? WHERE PatientId=?");
// binding values
ps.setString(1, patient.getPatientName());
ps.setInt(2,patient.getAge());
ps.setInt(3,patient.getPhoneNo());
ps.setString(4, patient.getEmail());
ps.setString(5, patient.getAddress());
ps.setInt(6, patient.getPatientId());
// execute the statement
ps.execute();
connection.close();
output = "Updated successfully";
} catch (Exception e) {
output = "Error while updating the patient.-"+e;
System.err.println(e.getMessage());
}
return output;
}
public String deletePatient(String PatientId) {
String output = "";
try {
connection = DBConnection.getConnection();
if (connection == null) {
return "Error while connecting to the database for deleting.";
}
// create a prepared statement
connection = DBConnection.getConnection();
ps = connection.prepareStatement("delete from patient where PatientId=?");
// binding values
ps.setInt(1, Integer.parseInt(PatientId));
// execute the statement
ps.execute();
connection.close();
output = "Deleted successfully";
} catch (Exception e) {
output = "Error while deleting the patient. -"+ e.getMessage();
System.err.println(e.getMessage());
}
return output;
}
public Patient searchPatients(String PatientId) {
Patient pt = new Patient();
try {
connection = DBConnection.getConnection();
if(connection == null) {
System.out.println("connection failed");
}
Statement stmt = connection.createStatement();
rs=stmt.executeQuery("select * from patient p where PatientId="+PatientId+"");
while(rs.next()) {
pt.setPatientId(rs.getInt("PatientId"));
pt.setPatientName(rs.getString("PatientName"));
pt.setAge(rs.getInt("Age"));
pt.setPhoneNo(rs.getInt("PhoneNo"));
pt.setEmail(rs.getString("Email"));
pt.setAddress(rs.getString("Address"));
}
connection.close();
}catch (Exception e) {
System.out.println(e.getMessage());
}
return pt;
}
}
<file_sep>/HelthCare-System/src/com/hcs/service/PatientService.java
package com.hcs.service;
import java.text.ParseException;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.FormParam;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import com.google.gson.Gson;
import com.hcs.controller.PatientController;
import com.hcs.model.Patient;
@Path("/patients")
public class PatientService {
PatientController patientController = new PatientController();
@GET
@Path("/read")
@Produces({ MediaType.TEXT_PLAIN })
public String readItems() {
return new Gson().toJson(patientController.readPatients());
}
@POST
@Path("/insert")
@Consumes(MediaType.APPLICATION_FORM_URLENCODED)
@Produces(MediaType.TEXT_PLAIN)
public String insertPatient(@FormParam("PatientName") String PatientName,@FormParam("Age") String Age,@FormParam("PhoneNo") String PhoneNo,
@FormParam("Email") String Email, @FormParam("Address") String Address) throws ParseException {
Patient patient = new Patient();
patient.setPatientName(PatientName);
patient.setAge(Integer.parseInt(Age));
patient.setPhoneNo(Integer.parseInt(PhoneNo));
patient.setEmail(Email);
patient.setAddress(Address);
return patientController.AddPatient(patient);
}
@DELETE
@Path("/{id}")
@Produces(MediaType.TEXT_PLAIN)
public String deletePatient(@PathParam("id")String PatientId) {
return patientController.deletePatient(PatientId);
}
@PUT
@Path("/update")
@Consumes(MediaType.APPLICATION_FORM_URLENCODED)
@Produces(MediaType.TEXT_PLAIN)
public String updatePatient(@FormParam("PatientId") String PatientId,@FormParam("PatientName") String PatientName,@FormParam("Age") String Age,@FormParam("PhoneNo") String PhoneNo,
@FormParam("Email") String Email, @FormParam("Address") String Address) throws ParseException {
Patient patient = new Patient();
patient.setPatientId(Integer.parseInt(PatientId));
patient.setPatientName(PatientName);
patient.setAge(Integer.parseInt(Age));
patient.setPhoneNo(Integer.parseInt(PhoneNo));
patient.setEmail(Email);
patient.setAddress(Address);
return patientController.updatepatient(patient);
}
@GET
@Path("/search/{id}")
@Produces({MediaType.TEXT_PLAIN})
public String searchPatient(@PathParam("id")String PatientId) {
return new Gson().toJson(patientController.searchPatients(PatientId));
}
}
| b534ddb7f1bc3a2da46ede9323ce9919bd7e6106 | [
"Markdown",
"SQL",
"Java"
] | 4 | Markdown | IsharaDissanayaka/Health-Care-System-PatientManagement | 7aa39bc0752003a09cdc70663847373fe4a29028 | 8ab805310ec659edc2db625f3c92772a4edca5c1 |
refs/heads/master | <repo_name>devquasar/task-app-backend<file_sep>/src/modules/tasks/dto/tasks.dto.ts
import { ApiProperty } from '@nestjs/swagger';
import { IsString } from 'class-validator';
import { Tasks } from '../model/tasks.entity';
import { User } from '../../../user.decorator';
export class TasksDTO implements Readonly<TasksDTO> {
@ApiProperty({ required: true })
id: number;
@ApiProperty({ required: true })
@IsString()
title: string;
@ApiProperty({ required: true })
@IsString()
status: string;
@ApiProperty({ required: true })
@IsString()
date: string;
@ApiProperty()
@IsString()
description: string;
public static from(dto: Partial<TasksDTO>) {
const it = new TasksDTO();
it.id = dto.id;
it.title = dto.title;
it.status = dto.status;
it.date = dto.date;
it.description = dto.description;
return it;
}
public static fromEntity(entity: Tasks) {
return this.from({
id: entity.id,
title: entity.title,
status: entity.status,
date: entity.date,
description: entity.description,
});
}
public static toEntity(dto: Partial<TasksDTO>, user: User = null) {
const it = new Tasks();
it.id = dto.id;
it.title = dto.title;
it.description = dto.description;
it.status = dto.status;
it.date = '' + new Date();
return it;
}
}
<file_sep>/src/modules/auth/controller/auth.controller.ts
import {
Controller,
Request,
Res,
Post,
HttpStatus,
Body,
UseGuards,
Get,
} from '@nestjs/common';
import { AuthGuard } from '@nestjs/passport';
import { AuthService } from '../service/auth.service';
import { Connection, Repository } from 'typeorm';
import { Admin } from '../../admin/model/admin.entity';
@Controller('auth')
export class AuthController {
private adminRepository: Repository<Admin>;
constructor(
private authService: AuthService,
private connection: Connection,
) {
this.adminRepository = this.connection.getRepository(Admin);
}
@Post('/register')
async register(@Body() admin) {
return this.authService.addUser(admin);
}
@UseGuards(AuthGuard('local'))
@Post('login')
async login(@Request() req) {
return this.authService.login(req.user);
}
@UseGuards(AuthGuard('jwt'))
@Get('profile')
getProfile(@Request() req) {
return req.user;
}
@UseGuards(AuthGuard('jwt'))
@Post('refresh')
async refresh(@Request() req) {
const admin = await this.adminRepository.findOne(req.user.id);
return this.authService.login(admin);
}
}
<file_sep>/src/modules/tasks/controller/tasks.controller.ts
import { Controller, UseGuards } from '@nestjs/common';
import { Crud, CrudController } from '@nestjsx/crud';
import { TasksService } from '../service/tasks.service';
import { TasksDTO } from '../dto/tasks.dto';
import { Tasks } from '../model/tasks.entity';
@Crud({
model: {
type: Tasks,
},
})
@Controller('tasks')
export class TasksController implements CrudController<Tasks> {
constructor(public service: TasksService) {}
}
<file_sep>/src/modules/tasks/service/tasks.service.ts
import { Injectable } from '@nestjs/common';
import { InjectRepository } from '@nestjs/typeorm';
import { Repository } from 'typeorm';
import { TypeOrmCrudService } from '@nestjsx/crud-typeorm';
import { Tasks } from '../model/tasks.entity';
import { TasksDTO } from '../dto/tasks.dto';
import { User } from '../../../user.decorator';
@Injectable()
export class TasksService extends TypeOrmCrudService<Tasks> {
constructor(@InjectRepository(Tasks) repo) {
super(repo);
}
}
<file_sep>/README.md
# Task app Backend part
Dev stack:
NestJS
PostgreSQL
| c776565fa6ef9e771f6eabd870032854ddb63fdf | [
"Markdown",
"TypeScript"
] | 5 | TypeScript | devquasar/task-app-backend | 4977231ec7bece7b55cb97dd231cb0b224f7b983 | 75121eacd8c8df14438b3999a63ae2901ff4b939 |
refs/heads/master | <repo_name>NaldoRonz/info3180-Project1<file_sep>/migrations/versions/c9174ff09bef_.py
"""empty message
Revision ID: c9174ff09bef
Revises: <PASSWORD>
Create Date: 2020-03-30 13:59:36.345481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c9174ff09bef'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('my_followers')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('my_followers',
sa.Column('follower_id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('follower_fname', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.Column('follower_lname', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('follower_id', name='my_followers_pkey')
)
# ### end Alembic commands ###
<file_sep>/app/models.py
from . import my_db
# This creates table
class my_users(my_db.Model):
_tablename_ = "user_profiles"
user_id = my_db.Column(my_db.Integer, primary_key=True)
firstname = my_db.Column(my_db.String(20), nullable=False)
lastname = my_db.Column(my_db.String(20),nullable=False)
gender = my_db.Column(my_db.String(10),nullable=False)
email = my_db.Column(my_db.String(35),nullable=False, unique=True)
location = my_db.Column(my_db.String(50),nullable=False)
biography = my_db.Column(my_db.String(1000),nullable=False)
filename = my_db.Column(my_db.String(20),nullable=False)
date_created = my_db.Column(my_db.String(20), nullable=False)
def __init__(self,firstname,lastname,gender,email,location,biography,filename,date_created):
self.firstname = firstname
self.lastname = lastname
self.gender = gender
self.email = email
self.location = location
self.biography = biography
self.filename = filename
self.date_created = date_created
def __repr__(self):
return f"my_users('{self.firstname}','{self.lastname}','{self.email}','{self.filename}','{self.date_created}')"
my_db.create_all()
#class my_followers(my_db.Model):
#_tablename_ = "followers"
#follower_id = my_db.Column(my_db.Integer, primary_key=True)
#follower_fname = my_db.Column(my_db.String(20), nullable=False)
#follower_lname = my_db.Column(my_db.String(20), nullable=False)
#def__init__(self,follower_fname,follower_lname):
# self.follower_fname = follower_fname
# self.follower_lname = follower_lname
#def __repr__(self):
# return f"followes('{self.follower_fname}','{self.follower_lname}')"<file_sep>/flask-migrate.py
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from app import app
from app import my_db
migrate = Migrate(app,my_db)
manager = Manager(app)
manager.add_command("my_db",MigrateCommand)
if __name__ == '__main__':
manager.run()<file_sep>/app/__init__.py
#<NAME> 620109753
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import secure_filename
csrf = CSRFProtect()
app = Flask(__name__)
# For Flask Form
app.config["SECRET_KEY"] = "<KEY>"
# Runs on port 5432 only works with default server postgres
#app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://postgres:RSK4LFEg@localhost/postgres"
app.config["SQLALCHEMY_DATABASE_URI"] = 'postgresql://ujmngpfpsldxvj:<EMAIL>@ec2-54-147-209-121.<EMAIL>.com:5432/de54p28jgcb3kp'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
#For File Upload
UPLOAD_FOLDER = "./app/static/uploads"
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
app.config["MAX_CONTENT_LENGTH"] = 10*1024*1024
app.config.from_object(__name__)
csrf.init_app(app)
my_db = SQLAlchemy(app)
from app import views, models
<file_sep>/requirements.txt
alembic==1.4.2
Click==7.0
Flask==1.1.1
Flask-Migrate==2.5.3
Flask-Script==2.0.6
Flask-SQLAlchemy==2.4.1
Flask-Uploads==0.2.1
Flask-WTF==0.14.3
gunicorn==19.9.0
itsdangerous==1.1.0
Jinja2==2.11.1
Mako==1.1.2
MarkupSafe==1.1.0
psycopg2==2.8.4
python-dateutil==2.8.1
python-editor==1.0.4
six==1.14.0
SQLAlchemy==1.3.15
Werkzeug==1.0.0
WTForms==2.2.1
<file_sep>/app/forms.py
#<NAME> 620109753
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, FileField, TextAreaField
from wtforms.validators import Required, Regexp, Length, Email
from flask_wtf.file import FileRequired, FileAllowed, FileRequired
from wtforms import ValidationError
class add_Profile(FlaskForm):
Firstname = StringField("Firstname", validators = [Required(), Length(min=2, max=20), Regexp("^[/s A-Za-z /s]+$")])
Lastname = StringField("Lastname", validators = [Required(), Length(min=2, max=20), Regexp("^[/s A-Za-z /s]+$")])
Gender = SelectField("Gender", choices = [("Male","Male"),("Female","Female"), ("O","Other")])
Email = StringField("Email", validators = [Required(), Email(), Length(max =35)])
Location = SelectField("Location", choices = [("Kings Jamaica","Kings JA"),("Linst JA", "Linst JA"),("Mont JA","Mont JA"),("SpnTwn JA","SpnTwn JA"),("Miami FL","Miami FL"),("Queens NY","Queens NY")])
Biography = TextAreaField("Biography", validators = [Required(), Length(max =1000)])
Photo = FileField("Photo", validators =[FileRequired(),FileAllowed(['jpg', 'png', 'jpeg'], "Please only upload image files only!!!")])
| ca16e82ffe879d49923fd5b27bc8592267347c0c | [
"Python",
"Text"
] | 6 | Python | NaldoRonz/info3180-Project1 | bacae43d8e7032b784d07cfcfea9527b9840b312 | 1e889f504b6fc308b78af78c1c065cb4f4ca1fc8 |
refs/heads/master | <file_sep>package com.kgc.controller;
import com.kgc.pojo.Studentinfo;
import com.kgc.service.StudentinfoService;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.RequestMapping;
import javax.annotation.Resource;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import java.util.List;
@Controller
public class IndexController {
@Resource
StudentinfoService studentinfoService;
@RequestMapping("/")
public String index(Model model){
List<Studentinfo> list=studentinfoService.selectAll();
model.addAttribute("list",list);
return "index";
}
@RequestMapping("/cha")
public String cha(Model model,Integer sid){
Studentinfo Studentinfo = studentinfoService.selectById(sid);
model.addAttribute("cha",Studentinfo);
return "/upd";
}
@RequestMapping("/upd")
public String dotinajia(Studentinfo studentinfo, HttpServletRequest request){
int upd=studentinfoService.upd(studentinfo);
if(upd>0){
request.setAttribute("upd1","更新成功");
return "redirect:/";
}else{
return "redirect:/cha";
}
}
}
<file_sep>package com.kgc.service;
import com.kgc.pojo.Person;
import java.util.List;
public interface PersonService {
List<Person> selectAll();
void inser(Person person);
void del(int id);
}
<file_sep>package com.kgc.service;
import com.kgc.pojo.Studentinfo;
import java.util.List;
public interface StudentinfoService {
List<Studentinfo> selectAll();
Studentinfo selectById(Integer sid);
int upd(Studentinfo studentinfo);
}
| 548f9f1e761bb23cfed559ac9cf7dbdf6293b633 | [
"Java"
] | 3 | Java | shangfuchenggithup/10- | a45305a2dbe2d577f3c3281a1ea1b27abfc181b2 | c7e931ced4d5d477f8608af6da9a6e19288fb6eb |
refs/heads/master | <repo_name>koshevoytimur/Grampus<file_sep>/Grampus/PieChartViewController.swift
//
// ViewController.swift
// Pie Chart View
//
//
import UIKit
class PieChartViewController : UIViewController {
let pieChartView = PieChartView()
let simplePieChartView = SimplePieChartView()
let bestLooker = 1
let superWorker = 2
let extrovert = 3
let untidy = 4
let deadLiner = 5
let introvert = 6
override func viewDidLoad() {
super.viewDidLoad()
let padding: CGFloat = 20
let height = (view.frame.height - padding * 3) / 2
pieChartView.frame = CGRect(
x: 0, y: padding, width: view.frame.size.width, height: height
)
pieChartView.segments = [
LabelledSegment(color: #colorLiteral(red: 1.0, green: 0.121568627, blue: 0.28627451, alpha: 1.0), name: "", value: CGFloat(bestLooker)), //Red -
LabelledSegment(color: #colorLiteral(red: 1, green: 0.541176471, blue: 0, alpha: 1), name: "", value: CGFloat(superWorker)), //Orange
LabelledSegment(color: #colorLiteral(red: 0.5791940689, green: 0.1280144453, blue: 0.5726861358, alpha: 1), name: "", value: CGFloat(extrovert)), //Purple
LabelledSegment(color: #colorLiteral(red: 0.0, green: 0.870588235, blue: 1.0, alpha: 1.0), name: "", value: CGFloat(untidy)), //Light Blue
LabelledSegment(color: #colorLiteral(red: 0.9994240403, green: 0.9855536819, blue: 0, alpha: 1), name: "", value: CGFloat(deadLiner)), //Green
LabelledSegment(color: #colorLiteral(red: 0.0, green: 0.392156863, blue: 1.0, alpha: 1.0), name: "", value: CGFloat(introvert)) //Blue
]
// pieChartView.segments = [
// LabelledSegment(color: #colorLiteral(red: 0, green: 0.9768045545, blue: 0, alpha: 1), name: "Green", value: 25), //“Best Looker”
// LabelledSegment(color: #colorLiteral(red: 1.0, green: 0.121568627, blue: 0.28627451, alpha: 1.0), name: "Red", value: 57.56), //“Super Worker”
// LabelledSegment(color: #colorLiteral(red: 0.9994240403, green: 0.9855536819, blue: 0, alpha: 1), name: "Yellow", value: 40), //“Extrovert”
// LabelledSegment(color: #colorLiteral(red: 0.6679978967, green: 0.4751212597, blue: 0.2586010993, alpha: 1), name: "Brown", value: 30), //“Untidy”
// LabelledSegment(color: #colorLiteral(red: 0.01680417731, green: 0.1983509958, blue: 1, alpha: 1), name: "Blue", value: 38), //“Deadliner”
// LabelledSegment(color: #colorLiteral(red: 0.478431373, green: 0.423529412, blue: 1.0, alpha: 1.0), name: "Purple", value: 27), //“Introvert”
// ]
pieChartView.segmentLabelFont = .systemFont(ofSize: 10)
view.addSubview(pieChartView)
simplePieChartView.frame = CGRect(
x: 0, y: height + padding * 2,
width: view.frame.size.width, height: height
)
simplePieChartView.segments = [
Segment(color: .red, value: 57),
Segment(color: .blue, value: 30),
Segment(color: .green, value: 25),
Segment(color: .yellow, value: 40)
]
view.addSubview(simplePieChartView)
}
}
| e18b5025a2b4adca09b780cdba0f0c2e6a7dbaad | [
"Swift"
] | 1 | Swift | koshevoytimur/Grampus | c62ddf5de4a80f81013fdab35a618dc2ff25f5b8 | 4eb36d865f2e788b4cb2c01f96087fc7c9b476a6 |
refs/heads/master | <file_sep>first_number=3 # Add your variables here
second_number=-5
sum=first_number + second_number
difference=first_number - second_number
product=first_number * second_number
quotient = first_number / second_number
| ae86e428d311b234eada59d795484a5b08d854bd | [
"Ruby"
] | 1 | Ruby | myildiz17/reading-errors-and-debugging-how-tests-work-chi01-seng-ft-062220 | 3d82ab8cca486869d288ed5c895a2c33c357710c | 33fa0183b2ef59fbb703eb4784465c1d3c58087a |
refs/heads/master | <file_sep>from setuptools import setup
setup(name="CloudCarver",
version="0.01",
description="Stuff",
author="<NAME>",
packages=['cloudcarver']
)
<file_sep>from cloudcarver.errors import HandlerError
__author__ = 'jdenning'
import logging
log = logging.getLogger()
try:
import salt.client
fire_local = salt.client.Caller().sminion.functions['event.fire']
fire_master = salt.client.Caller().sminion.functions['event.fire_master']
except:
log.error("Unable to import salt.client!")
raise HandlerError("Unable to import salt libraries!")
class SaltHandler(object):
def __init__(self, request):
fire_master({"data": "Initialized SaltHandler"}, "MyCustomResource")
self.request = request
self.request.physical_resource_id = "Foo1234"
def set_physical_id(self, id=None):
if self.request.physical_resource_id:
# Use the resource_physical_id on the request
pass
elif id:
# Use the id passed into this function
self.request.physical_resource_id = id
else:
# Generate a new id
self.request.physical_resource_id = self.generate_physical_id()
def relay_request(self, req_type):
tag = "sns/%s/%s"% (self.request.resource_type, self.request.request_id, req_type)
fire_master(self.request.to_dict(), tag)
def wait_for_response(self, maxwait=30):
pass
def relay_request(self, req_type):
print("Got %s request!"% req_type)
print("Request: %s"% self.request)
def create(self):
self.relay_request("CREATE")
def update(self):
self.relay_request("UPDATE")
def delete(self):
self.relay_request("DELETE")
if __name__ == "__main__":
print("Starting the loop!")
import sys
from cloudcarver.controller import watch_sqs_queue
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile_path = '/tmp/cloudcarver.log'
logfile_handler = logging.FileHandler(logfile_path)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(formatter)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(formatter)
log.addHandler(stdout_handler)
config = {
'sqs' : {
'num_messages': 1,
'visibility_timeout' : 5,
'wait_time' : 5,
'sleep_time' : 10,
},
'routes' : {
'MyResource' : SaltHandler,
}
}
print(config)
queue_name = "test-custom-resource-queue"
log.debug("Beginning watch_sqs_queue loop")
watch_sqs_queue(queue_name, config)
<file_sep>import json
import requests
__author__ = 'jdenning'
class RequestMessage(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def to_dict(self):
request_dict = {
'RequestType' : self.request_type,
'StackId' : self.stack_id,
'RequestId' : self.request_id,
'LogicalResourceId' : self.logical_resource_id,
'ResourceType' : self.resource_type,
'ResponseURL' : self.response_url,
'PhysicalResourceId' : self.physical_resource_id,
'ResourceProperties' : self.properties,
'OldResourceProperties' : self.old_properties,
}
return(request_dict)
@classmethod
def from_AWS_format(cls, **kwargs):
print(kwargs)
valid_kw_args = {
'RequestType' : 'request_type',
'StackId' : 'stack_id',
'RequestId' : 'request_id',
'LogicalResourceId' : 'logical_resource_id',
'ResourceType' : 'resource_type',
'ResponseURL' : 'response_url',
'PhysicalResourceId' : 'physical_resource_id',
'ResourceProperties' : 'properties',
'OldResourceProperties' : 'old_properties',
}
parsed_kw_args = {}
for key, value in kwargs.items():
if key in valid_kw_args.values():
parsed_kw_args[key] = value
elif key in valid_kw_args.keys():
# Non-pythonic AWS key was used, translate to pythonic version
parsed_kw_args[valid_kw_args[key]] = value
else:
pass
print(parsed_kw_args)
return(RequestMessage(**parsed_kw_args))
@classmethod
def from_AWS_json(cls, json_str):
"""
Alternate constructor for use with JSON data
:param json_str: String of JSON data
:return: RequestMessage instance
"""
params = json.loads(json_str)
return(cls.from_AWS_format(**params))
def __str__(self):
return "RequestMessage - %s"% self.request_id
class ResponseMessage(object):
@property
def status(self):
if not self.error:
return("SUCCESS")
else:
return("FAILED")
def __init__(self, request):
self.stack_id = request.stack_id
self.request_id = request.request_id
self.logical_resource_id = request.logical_resource_id
self.error = None
self.data = None
if getattr(request, 'physical_resource_id', None):
physical_resource_id = request.physical_resource_id
else:
physical_resource_id = self.generate_physical_id()
self.physical_resource_id = physical_resource_id
def generate_physical_id(self):
return "FOO54321"
def to_dict(self):
response_dict = {
'Status' : self.status,
'StackId' : self.stack_id,
'RequestId' : self.request_id,
'LogicalResourceId' : self.logical_resource_id,
'PhysicalResourceId' : self.physical_resource_id,
}
# Include reason if there's an error
if self.error:
response_dict['Reason'] = self.error
# Include output data, if it exists
if self.data:
response_dict['Data'] = self.data
return(response_dict)
def to_json(self):
return(json.dumps(self.to_dict()))
<file_sep>__author__ = 'jdenning'
import logging
log = logging.getLogger()
class PrintHandler(object):
def __init__(self, request):
self.request = request
self.request.physical_resource_id = "Foo1234"
def print_handler(self, req_type):
print("Got %s request!"% req_type)
print("Request: %s"% self.request)
def create(self):
self.print_handler("CREATE")
def update(self):
self.print_handler("UPDATE")
def delete(self):
self.print_handler("DELETE")
def Delete(self):
log.debug("Called the wrong handler!")
self.delete()
if __name__ == "__main__":
print("Starting the loop!")
import sys
from cloudcarver.controller import watch_sqs_queue
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile_path = '/tmp/cloudcarver.log'
logfile_handler = logging.FileHandler(logfile_path)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(formatter)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(formatter)
log.addHandler(stdout_handler)
config = {
'sqs' : {
'num_messages': 1,
'visibility_timeout' : 5,
'wait_time' : 5,
'sleep_time' : 10,
},
'routes' : {
'MyResource' : PrintHandler,
}
}
print(config)
queue_name = "test-custom-resource-queue"
log.debug("Beginning watch_sqs_queue loop")
watch_sqs_queue(queue_name, config)
<file_sep>__author__ = 'jdenning'
<file_sep>============
CloudCarver
============
Framework for developing custom resource providers for AWS CloudFormation.
<file_sep>from unittest import TestCase
from cloudcarver.message import RequestMessage
from cloudcarver.tests import valid_request_kw, valid_request_json
__author__ = 'jdenning'
class TestRequestMessage(TestCase):
def setUp(self):
self._valid_kw = {
"request_type" : valid_request_kw['request_type'],
"stack_id" : valid_request_kw['stack_id'],
"request_id" : valid_request_kw['request_id'],
"logical_resource_id" : valid_request_kw['logical_resource_id'],
"physical_resource_id" : valid_request_kw['physical_resource_id'],
"resource_type" : valid_request_kw['resource_type'],
"response_url" : valid_request_kw['response_url'],
"properties" : valid_request_kw['properties'],
"old_properties" : valid_request_kw['old_properties'],
}
self._valid_json = valid_request_json
def test_from_AWS_json_returns_RequestMessage_object(self):
req = RequestMessage.from_AWS_json(self._valid_json)
assert type(req) is RequestMessage
def test_init_kwarg(self):
"""
Test that all params in self_valid_kw are accepted by __init__
"""
req = RequestMessage(**self._valid_kw)
def test_has_stack_id(self):
req = RequestMessage(**self._valid_kw)
assert req.stack_id == self._valid_kw['stack_id']
def test_has_request_id(self):
req = RequestMessage(**self._valid_kw)
assert req.request_id == self._valid_kw['request_id']
def test_has_logical_resource_id(self):
req = RequestMessage(**self._valid_kw)
assert req.logical_resource_id == self._valid_kw['logical_resource_id']
def test_has_physical_resource_id(self):
req = RequestMessage(**self._valid_kw)
assert req.physical_resource_id == self._valid_kw['physical_resource_id']
def test_has_resource_type(self):
req = RequestMessage(**self._valid_kw)
assert req.resource_type == self._valid_kw['resource_type']
def test_has_response_url(self):
req = RequestMessage(**self._valid_kw)
assert req.response_url == self._valid_kw['response_url']
def test_has_request_type(self):
req = RequestMessage(**self._valid_kw)
assert req.request_type == self._valid_kw['request_type']
def test_has_properties(self):
req = RequestMessage(**self._valid_kw)
assert req.properties == self._valid_kw['properties']
def test_has_old_properties(self):
req = RequestMessage(**self._valid_kw)
assert req.old_properties == self._valid_kw['old_properties']
<file_sep>__author__ = 'jdenning'
class HandlerError(Exception):
pass
class AWSError(Exception):
pass
<file_sep>from unittest import TestCase, skip
from cloudcarver.tests import MockRequestMessage
from cloudcarver.message import ResponseMessage
import json
__author__ = 'jdenning'
class TestResponseMessage(TestCase):
def setUp(self):
self.request = MockRequestMessage()
def test_to_dict_has_key_Status(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert 'Status' in resp_dict.keys()
def test_to_dict_has_key_StackId(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert 'StackId' in resp_dict.keys()
def test_to_dict_has_key_RequestId(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert 'RequestId' in resp_dict.keys()
def test_to_dict_has_key_LogicalResourceId(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert 'LogicalResourceId' in resp_dict.keys()
def test_to_dict_has_key_PhysicalResourceId(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert 'PhysicalResourceId' in resp_dict.keys()
def test_to_dict_StackId_same_as_request(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert resp_dict['StackId'] == self.request.stack_id
def test_to_dict_RequestId_same_as_request(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert resp_dict['RequestId'] == self.request.request_id
def test_to_dict_LogicalResourceId_same_as_request(self):
resp = ResponseMessage(self.request)
resp_dict = resp.to_dict()
assert resp_dict['LogicalResourceId'] == self.request.logical_resource_id
def test_default_status_is_SUCCESS(self):
resp = ResponseMessage(self.request)
assert resp.status == "SUCCESS"
def test_status_is_FAILED_if_error(self):
resp = ResponseMessage(self.request)
resp.error = True
assert resp.status == "FAILED"
def test_to_json_StackId_same_as_request(self):
resp = ResponseMessage(self.request)
resp_json = resp.to_json()
resp_dict = json.loads(resp_json)
assert resp_dict['StackId'] == self.request.stack_id
def test_to_json_RequestId_same_as_request(self):
resp = ResponseMessage(self.request)
resp_json = resp.to_json()
resp_dict = json.loads(resp_json)
assert resp_dict['RequestId'] == self.request.request_id
def test_to_json_LogicalResourceId_same_as_request(self):
resp = ResponseMessage(self.request)
resp_json = resp.to_json()
resp_dict = json.loads(resp_json)
assert resp_dict['LogicalResourceId'] == self.request.logical_resource_id
<file_sep>__author__ = 'jdenning'
valid_request_kw = {
"request_type" : "Update",
"stack_id" : "arn:aws:cloudformation:us-east-1:287482246495:stack/test-custom03/92cb9a50-56d0-11e3-b2de-500150b34cb4",
"request_id" : "de8d6e36-8b45-46a0-a47e-bb7e6a9640ec",
"logical_resource_id" : "MyCustomResourceTest",
"physical_resource_id" : "test-custom03-MyCustomResourceTest-Y39E4LV9QT44",
"resource_type" : "Custom::MyResource",
"response_url" : "https://cloudformation-custom-resource-response-useast1.s3.amazonaws.com/arn:aws:cloudformation:us-east-1:287482246495:stack/test-custom03/92cb9a50-56d0-11e3-b2de-500150b34cb4|MyCustomResourceTest|de8d6e36-8b45-46a0-a47e-bb7e6a9640ec?Expires=1387250657&AWSAccessKeyId=<KEY>&Signature=m87YeA+eAttIwtS+dBtkJkiT8wM=",
"properties" : {"ServiceToken":"arn:aws:sns:us-east-1:287482246495:test-customresource"},
"old_properties" : {}
}
valid_request_json = '{"request_type": "Update", "physical_resource_id": "test-custom03-MyCustomResourceTest-Y39E4LV9QT44", "request_id": "de8d6e36-8b45-46a0-a47e-bb7e6a9640ec", "stack_id": "arn:aws:cloudformation:us-east-1:287482246495:stack/test-custom03/92cb9a50-56d0-11e3-b2de-500150b34cb4", "logical_resource_id": "MyCustomResourceTest", "old_properties": {}, "response_url": "https://cloudformation-custom-resource-response-useast1.s3.amazonaws.com/arn:aws:cloudformation:us-east-1:287482246495:stack/test-custom03/92cb9a50-56d0-11e3-b2de-500150b34cb4|MyCustomResourceTest|de8d6e36-8b45-46a0-a47e-bb7e6a9640ec?Expires=1387250657&AWSAccessKeyId=<KEY>Signature=<KEY> "resource_type": "Custom::MyResource", "properties": {"ServiceToken": "arn:aws:sns:us-east-1:287482246495:test-customresource"}}'
class MockRequestMessage(object):
def __init__(self):
self.request_type = valid_request_kw['request_type']
self.stack_id = valid_request_kw['stack_id']
self.request_id = valid_request_kw['request_id']
self.logical_resource_id = valid_request_kw['logical_resource_id']
self.physical_resource_id = valid_request_kw['physical_resource_id']
self.resource_type = valid_request_kw['resource_type']
self.response_url = valid_request_kw['response_url']
self.properties = valid_request_kw['properties']
self.old_properties = valid_request_kw['old_properties']
"""
valid_kw_args = {
'RequestType' : 'request_type',
'StackId' : 'stack_id',
'RequestId' : 'request_id',
'LogicalResourceId' : 'logical_resource_id',
'ResourceType' : 'resource_type',
'ResponseURL' : 'response_url',
'PhysicalResourceId' : 'physical_resource_id',
'ResourceProperties' : 'resource_properties',
'OldResourceProperties' : 'old_resource_properties',
}
for key, value in kwargs.items():
if key in valid_kw_args.values():
setattr(self, key, value)
elif key in valid_kw_args.keys():
# Non-pythonic AWS key was used, translate to pythonic version
setattr(self, valid_kw_args[key], value)
"""<file_sep>import requests
import boto
from cloudcarver.errors import HandlerError, AWSError
from cloudcarver.message import ResponseMessage, RequestMessage
import time
import logging
__author__ = 'jdenning'
log = logging.getLogger("cloudcarver")
def get_message_from_sqs(sqs_queue):
"""
Get message from sqs_queue, and return a RequestMessage object
:param sqs_queue:
:param num_messages:
:param visibility_timeout:
:param wait_time_seconds:
:return:
"""
result_set = sqs_queue.get_messages(num_messages=1, visibility_timeout=5, wait_time_seconds=5)
if len(result_set) > 0:
sqs_msg = result_set[0]
req_msg = parse_sqs_message(sqs_msg)
sqs_msg.delete()
return(req_msg)
def parse_sqs_message(message):
return(RequestMessage.from_AWS_json(sqs_msg.get_body()))
def send_response(request_msg, data=None, error=None):
"""
Send response message to S3 url
:param request:
:param data:
:param error:
:return:
"""
response = ResponseMessage(request_msg)
if data:
response.data = data
if error:
response.error = error
try:
# PUT the response file to the S3 pre-signed URL
requests.put(url=request_msg.response_url,
data=response.to_json(),
headers={"Content-Type": ""},
verify=True
).raise_for_status()
log.debug((response.to_json()))
log.info("Successfully send response %s for RequestID:%s"% (request_msg.response_url, request_msg.request_id))
except Exception, e:
print("Got Error! - %s"% e.message)
log.exception("Failed sending response %s for RequestID:%s"% (request_msg.response_url, request_msg.request_id))
log.exception("Error: %s"% e.message)
raise AWSError("Unable to send response!")
def handler_not_found(resource_type):
"""
Handle the error case, when a route is not found for a request
:param request:
:return:
"""
error_msg = "Handler not found for resource type '%s'"% resource_type
log.error(error_msg)
raise HandlerError(error_msg)
def _get_handler_instance(request, routes):
"""
Figure out which handler to use for the message
:param request: RequestMessage to process
:type request: RequestMessage
:param routes: Dictionary of routes
:type routes: dict
:return: Handler instance
"""
resource_type_arr = request.resource_type.split("::")
# If the first component of the resource_type is 'Custom', delete it
if resource_type_arr[0].lower() == "custom":
del resource_type_arr[0]
resource_type = "::".join(resource_type_arr)
print("Looking up handler class for resource type `%s`"% resource_type)
print(routes)
handler_cls = routes.get(resource_type, None)
print("Looking up handler class `%s`"% handler_cls)
if not handler_cls:
handler_not_found(resource_type)
raise HandlerError("Can't find handler class for %s!"% resource_type)
else:
# Handler class found, initialize it
handler = handler_cls(request)
return(handler)
def get_handler(request, routes):
handler = _get_handler_instance(request, routes)
log.debug("Got handler instance")
method_name = request.request_type.lower()
log.debug("Getting method `%s`"% method_name)
handler_method = getattr(handler, method_name, None)
if handler_method is None:
error = "Handler class `%` does not have a method `%s`"% (handler.__name__, handler_method)
log.exception(error)
raise HandlerError(error)
else:
return(handler_method)
def error_handling_sqs_request(request, exception=None, error_msg=None):
log.debug("Error handling request!")
log.debug("Request: %s"% request)
AWS_error = "Unable to handle request!"
if exception:
log.error("Caught exception handling message!")
log.exception(exception.message)
if error_msg:
log.error(error_msg)
AWS_error = error_msg
log.info("Sending FAILED response")
send_response(request, error=AWS_error)
def handle_sqs_request(request, config):
req_msg = RequestMessage.from_AWS_json(request.get_body())
try:
handler = get_handler(req_msg, config['routes'])
data = handler()
error = getattr(handler, 'error', None)
send_response(req_msg, data=data, error=error)
except HandlerError, e:
error_handling_sqs_request(req_msg, exception=e)
except AWSError, e:
# AWS Connection error or other unknown error connecting to SQS
log.error("Unable to process Request!")
log.exception(e.message)
raise e
except Exception, e:
print(e)
error_handling_sqs_request(req_msg, exception=e)
def get_queue(name):
try:
log.debug("Connecting to SQS")
conn = boto.connect_sqs()
log.debug("Getting queue - `%s`"% name)
queue = conn.get_queue(name)
return(queue)
except Exception, e:
raise AWSError("Unable to connect to SQS queue `%s` - %s"% e.message)
def process_request_message(sqs_message, config):
"""
:param sqs_message:
:type sqs_message: boto.sqs.Message
:param config:
:return:
"""
handle_sqs_request(sqs_message, config)
sqs_message.delete()
def watch_sqs_queue(name, config):
"""
Watch an SQS queue, and handle messages according to routes
:param name:
:return:
"""
log.debug("Watching SQS queue named `%s`"% name)
log.debug("Config: %s"% config)
queue = get_queue(name)
sqs_config = config['sqs']
while 1:
messages = queue.get_messages(num_messages=sqs_config['num_messages'],
visibility_timeout=sqs_config['visibility_timeout'],
wait_time_seconds=sqs_config['wait_time'])
if messages:
for m in messages:
process_request_message(m, config)
log.debug("Waiting for %s seconds"% sqs_config['sleep_time'])
time.sleep(sqs_config['sleep_time'])
| d9d213f586d89bcf84a697d096dfed2d41e7e3b0 | [
"Python",
"reStructuredText"
] | 11 | Python | jasondenning/cloudcarver | 65c5b18a98e122de9d943ad591e0b286d602f611 | ebac2194b077d3a8a058c9c3331dafcc65c0cf81 |
refs/heads/master | <file_sep>export interface CongressMemberLeavingOffice {
congress: string;
chamber: string;
num_results: number;
offset: number;
members: LeavingOfficeMember[];
}
export interface LeavingOfficeMember {
id: string;
api_uri: string;
first_name: string;
middle_name: string;
last_name: string;
suffix: null;
party: string;
state: string;
district: number;
begin_date: string;
end_date: string;
status: string;
note: string;
}
<file_sep>import { CongressMemberDetail } from "./CongressMemberDetail";
import { CongressMemberLeavingOffice } from "./CongressMemberLeavingOffice";
import { CongressMembersResult } from "./CongressMembers";
export type Chamber = "senate" | "house";
export type CongressResponse = {
copyright: string;
results:
| CongressMembersResult[]
| CongressMemberDetail[]
| CongressMemberLeavingOffice[];
status: string;
};
<file_sep># USA Congress public api
<h3 align="center">
<img alt="Logo" title="#logo" width="450px" src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTYKK-LkqyNtWLD0IcdAzEy-PwvjKJgOcjGaA&usqp=CAU">
<br><br>
<b>USA Congress Public API - React</b>
<br>
</h3>
<p align="center">
<a href="https://beyoung.com.br">
<img alt="Made with React" src="https://img.shields.io/badge/created%20with-React-blue">
</a>
<a>
<img alt="License" src="https://img.shields.io/github/license/vitorserrano/ecoleta?color=%237519C1">
<br><br>
</p>
## :triangular_flag_on_post: What is it?
Consume the public API to list USA congressman while implementing multiple filters and pagination consuming webservices (GET) and filtering data with multiple filters. It was developed using React Hooks. Since the API doesn't change too frequently it was decided to cache the data using localstorage for each filter - chamber and session - this way we avoid making multiple request
<h3 align="center"> :iphone: Demo on Mobile: </h3>
<p align="center">
<img src="https://media.giphy.com/media/pOAFvSXpkQcZ4ATbyj/giphy.gif" width="600px" align="center" alt="demo-mobile">
</p>
<h3 align="center"> :iphone: Demo on Mobile 2: </h3>
<p align="center">
<img src="https://media.giphy.com/media/g1VslggCAYQk9PKpVE/giphy.gif" width="600px" align="center" alt="demo-desktop">
</p>
## :fireworks: Developing Tools:
- React v.17.0.2 with React Hooks (Frontend)
- Lint and prettier
- Working with scss modules to avoid styles conflicts
- React Icons for icons
- React routes to manage routes and query-string
- VSCode (Editor)
- Postman to test get requests
- Firefox Devtools
## :rocket: Goal:
Consume the public API to list USA congressman while implementing multiple filters and pagination
## :feet: How to clone or run?
- Download the zip or clone using github
- In the main folder run in your command line ```yarn``` then ```yarn start```
## :metal: Contributions
Pull requests are always welcomed. For major alterations please think about openning an issue first.
Any improvments advices would be great, don't hesitate on contacting me :smile:
## License
- Project 100% solo - concluded in 3 days.
- MIT License - Copyright (c) 2021 william-takayama
<file_sep>export type TypographyType =
| "heading1"
| "heading2"
| "heading3"
| "heading4"
| "heading5"
| "heading6"
| "body-heading"
| "body";
<file_sep>export interface Events {
"form-finalize": {
id: string;
type: "request" | "response" | "error";
data: any;
fields?: string[];
};
"search-change": {
id: string | number;
value: string;
// values: FormValues;
};
}
<file_sep>declare module "*.scss" {
const classes: { [key: string]: string };
export default classes;
}
declare module "clsx" {
const cn: (
...args: (string | undefined | Record<string, boolean | undefined>)[]
) => string;
export default cn;
}
<file_sep>export interface CongressMemberDetail {
id: string;
member_id: string;
first_name: string;
middle_name: string;
last_name: string;
suffix: null;
date_of_birth: Date;
gender: string;
url: string;
times_topics_url: string;
times_tag: string;
govtrack_id: string;
cspan_id: string;
votesmart_id: string;
icpsr_id: string;
twitter_account: string;
facebook_account: string;
youtube_account: null;
crp_id: string;
google_entity_id: string;
rss_url: null;
in_office: boolean;
current_party: string;
most_recent_vote: Date;
last_updated: string;
roles: Role[];
}
export interface Role {
congress: string;
chamber: string;
title: string;
short_title: string;
state: string;
party: string;
leadership_role: null;
fec_candidate_id: string;
seniority: string;
district: string;
at_large: boolean;
ocd_id: string;
start_date: Date;
end_date: Date;
office: null | string;
phone: null | string;
fax: null;
contact_form: null;
cook_pvi: null | string;
dw_nominate: number;
ideal_point: null;
next_election: string;
total_votes: number;
missed_votes: number;
total_present: number;
senate_class: string;
state_rank: string;
lis_id: string;
bills_sponsored: number;
bills_cosponsored: number;
missed_votes_pct: number;
votes_with_party_pct: number;
votes_against_party_pct: number;
committees: Committee[];
subcommittees: Committee[];
}
export interface Committee {
name: string;
code: string;
api_uri: string;
side?: Side;
title?: Title;
rank_in_party: number;
begin_date: string;
end_date: Date;
parent_committee_id?: ParentCommitteeID;
}
export enum ParentCommitteeID {
Hsag = "HSAG",
Hsas = "HSAS",
Hssm = "HSSM",
}
export enum Side {
Majority = "majority",
Minority = "minority",
}
export enum Title {
Chair = "Chair",
Member = "Member",
RankingMember = "Ranking Member",
}
<file_sep>import { Chamber } from "./Congress";
export type Member = {
api_uri: string;
contact_form: string;
cook_pvi: null;
crp_id: string;
cspan_id: string;
date_of_birth: string;
dw_nominate: number;
facebook_account: string;
fax: string;
fec_candidate_id: string;
first_name: string;
gender: string;
google_entity_id: string;
govtrack_id: string;
icpsr_id: string;
id: string;
ideal_point: null;
in_office: false;
last_name: string;
last_updated: string;
leadership_role: null;
lis_id: string;
middle_name: null;
missed_votes: number;
missed_votes_pct: number;
next_election?: string;
ocd_id: string;
office: string;
party: string;
phone: string;
rss_url: string;
senate_class: string;
seniority: string;
short_title: string;
state: string;
state_rank: string;
suffix: null;
title: string;
total_present: number;
total_votes: number;
twitter_account: string;
url: string;
votes_against_party_pct: number;
votes_with_party_pct: number;
votesmart_id: string;
youtube_account: string;
};
export type CongressMembersResult = {
chamber: Chamber;
congress: string;
members: Member[];
num_results: number;
offset: number;
};
<file_sep>import React from 'react';
import ReactDOM from 'react-dom';
import App from './App';
// fetch is not supported in node, which is the context in which these jest tests are run
// this means that the external API calls won't work. That's OK though, we probably don't
// want to rely on real API calls for our tests, so we can use this to stub out a dummy response.
// keep in mind you will need to edit this mock response with the appropriate mock API data
// so that your components recieve the data they expect.
beforeEach(() => {
global.fetch = jest.fn().mockImplementation(() => {
return new Promise((resolve, reject) => {
resolve({
ok: true,
Id: '123',
json: () => {
return {
// YOUR MOCK RESPONSE HERE
}
}
});
});
});
});
it('renders without crashing', () => {
const div = document.createElement('div');
ReactDOM.render(<App />, div);
ReactDOM.unmountComponentAtNode(div);
});
<file_sep>import axios from "axios";
import { Chamber } from "../types/Congress";
import { CongressMemberDetail } from "../types/CongressMemberDetail";
import { CongressMemberLeavingOffice } from "../types/CongressMemberLeavingOffice";
import { CongressMembersResult } from "../types/CongressMembers";
const BASE_URL = "https://api.propublica.org/congress/v1";
type CongressService = {
getMembersBySessionAndChamber: (
session?: number,
chamber?: Chamber
) => Promise<CongressMembersResult["members"] | undefined>;
getMember: (id: string) => Promise<CongressMemberDetail | undefined>;
getMemberLeavingOffice: (
session?: number,
chamber?: Chamber
) => Promise<CongressMemberLeavingOffice["members"] | undefined>;
};
export const congressService: CongressService = {
async getMembersBySessionAndChamber(session = 115, chamber = "senate") {
try {
const { data } = await axios.get(
`${BASE_URL}/${session}/${chamber}/members.json`,
{
headers: {
"content-type": "application/json",
"X-API-Key": "<KEY>",
},
}
);
const { results }: { results: CongressMembersResult[] } = data;
return results[0]?.members;
} catch (e) {
console.error(e);
}
},
async getMember(id: string) {
try {
const { data } = await axios.get(`${BASE_URL}/members/${id}.json`, {
headers: {
"content-type": "application/json",
"X-API-Key": "<KEY>",
},
});
const { results }: { results: CongressMemberDetail[] } = data;
return results[0];
} catch (e) {
console.error(e);
}
},
async getMemberLeavingOffice(session = 115, chamber = "senate") {
try {
const { data } = await axios.get(
`${BASE_URL}/${session}/${chamber}/members/leaving.json`,
{
headers: {
"content-type": "application/json",
"X-API-Key": "<KEY>",
},
}
);
const { results }: { results: CongressMemberLeavingOffice[] } = data;
return results[0]?.members;
} catch (e) {
console.error(e);
}
},
};
| b208d612f3d776effe77a10a94b5c2d093bd8570 | [
"Markdown",
"TypeScript",
"JavaScript"
] | 10 | TypeScript | william-takayama/react-usa-congress | 6b1e89d46f502a84ca229a9a488b9f1689a2d25b | 9346bbd8e85261b9a474fd0e271a400d41b463f6 |
refs/heads/master | <repo_name>jyolo/atcmf<file_sep>/app/crm/validate/ServiceApplyValidate.php
<?php
namespace app\crm\validate;
use app\crm\model\Enterprises;
use app\crm\model\ServiceGoods;
use think\Validate;
class ServiceApplyValidate extends Validate
{
protected $rule = [
'company_id' => 'require|number|isExistCompany',
'goods_id' => 'require|number|isExistGoods',
'apply_realname' => 'require|max:10',
'apply_mobile' => 'require|mobile',
'apply_email' => 'require|email',
'real_pay_money' => 'checkRealPayMongy',
'status' => 'number|max:1'
];
protected $message = [
'company_id.require' => '企业id必填',
'company_id.number' => '企业id必须是数字',
'goods_id.require' => '产品套餐id必填',
'goods_id.number' => '产品套餐id必须是数字',
'apply_realname.require' => '申请人姓名必填',
'apply_realname.max' => '真实姓名最多10个字符',
'apply_mobile.require' => '申请人手机号必填',
'apply_mobile.mobile' => '申请人手机号不合法',
'apply_email.require' => '申请人邮箱必填',
'apply_email.email' => '申请人邮箱不合法',
'status.number' => '状态必须是数字',
'status.max' => '状态最多1个字符',
];
protected function checkRealPayMongy($value ,$rule ,$data){
if(floatval($value) < 0.00) return '真实付款金额不能为负数';
return true;
}
protected function isExistCompany($value ,$rule ,$data){
$company = Enterprises::where('id',$value)
->where('audited_time','>',0)
->where('record_status', '=', 2)
->where('status', '=', 1)
->find();
if (!$company) return '企业不存在或者未通过审核';
return true;
}
protected function isExistGoods($value ,$rule ,$data){
$goods = ServiceGoods::where('id',intval($value))
->where('is_up',1)
->find();
if (!$goods) return '商品不存在或者未上架';
return true;
}
}<file_sep>/app/crm/controller/Manager.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use think\facade\Cache;
use app\crm\model\Manager as ManagerModel;
use app\crm\model\ManagerRoleMap as ManagerRoleMapModel;
use app\crm\validate\ManagerRoleMapValidate;
/**
* @des 企业公告
* @package app\crm\controller
*/
class Manager extends Base
{
/**
* @OA\Get(path="/v1/manager/list",
* tags={"权限管理"},
* summary="获取管理员列表 [管理员管理]",
* security={{"api_key": {}}},
* @OA\Parameter(name="pageSize",in="query",description="分页数量 默认10",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="page",in="query",description="分页页码",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function lists(){
$pageSize = (int) input('get.pageSize',10) ;
$page = (int) input('get.page',0) ;
$total = ManagerModel::where('status', '=', 1)->count();
$list = ManagerModel::where('status', '=', 1)
->with(['roleInfo'=>function($query){
$query->field('id,role_name,status');
}])
->field('id,name,nickname,real_name,tel,email')->page($page,$pageSize)->select();
$return['list'] = $list;
$return['total'] = $total;
return (count($list)) ? $this->jsonSuccess($return) : $this->jsonError('暂无数据');
}
/**
* @OA\Post(path="/v1/manager/bindRole",
* tags={"权限管理"},
* summary="管理员绑定角色 ",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="管理员id", property="manager_id", type="string", default="1"),
* @OA\Property(description="角色id 可多选 最多2个 逗号分隔", property="role_ids", type="integer", default="1"),
* required={"manager_id","role_ids"})
* )
* ),
* @OA\Response(response="200",description="绑定成功"),
* )
*/
public function bindRole(){
$data = input('post.');
try{
Validate(ManagerRoleMapValidate::class)->check($data);
$model = new ManagerRoleMapModel();
$isset = $model->where('manager_id',$data['manager_id'])->column('id');
if($isset) return $this->jsonError('该管理员已经绑定过角色');
$arr = [];
foreach(explode(',',$data['role_ids']) as $k => $v){
$arr[$k]['manager_id'] = $data['manager_id'];
$arr[$k]['role_id'] = $v;
}
$flag = $model->saveAll($arr);
return ($flag) ? $this->jsonSuccess('添加成功') : $this->jsonError('添加失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Put(path="/v1/manager/editBindRole",
* tags={"权限管理"},
* summary="管理员修改角色 ",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="管理员id", property="manager_id", type="string", default="1"),
* @OA\Property(description="角色id 可多选 最多2个 逗号分隔", property="role_ids", type="integer", default="1"),
* required={"manager_id","role_ids"})
* )
* ),
* @OA\Response(response="200",description="绑定成功"),
* )
*/
public function editBindRole(){
$data = input('post.');
try{
Validate(ManagerRoleMapValidate::class)->check($data);
$model = new ManagerRoleMapModel();
$arr = [];
foreach(explode(',',$data['role_ids']) as $k => $v){
$arr[$k]['manager_id'] = $data['manager_id'];
$arr[$k]['role_id'] = $v;
}
try{
$model->startTrans();
$flag = $model->where('manager_id',$data['manager_id'])->delete();
var_dump($flag);
$flag = $model->saveAll($arr);
if($flag){
$model->commit();
return $this->jsonSuccess('修改成功');
}
return $this->jsonError('修改失败');
}catch (\Exception $e){
$model->rollback();
return $this->jsonError('修改失败');
}
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
}
<file_sep>/app/admin/model/Manager.php
<?php
namespace app\admin\model;
use think\Model;
/**
* 自动化模型的model模板文件
*/
class Manager extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
protected $encrypt = null;
protected $insert = ['encrypt'];
protected $update = ['encrypt'];
// public static function init()
// {
// self::event('before_update',function($data){
// p($data);
// die();
// });
// }
//自动完成
protected function setRoleIdAttr($value)
{
if(!$value) return 0;
return join(',',$value);
}
protected function setLoginPwdAttr($value,$data)
{
//过滤所有的空白字符(空格、全角空格、换行等)
$search = array(" "," ","\n","\r","\t");
$replace = array("","","","","");
$value = str_replace($search, $replace, $value);
//没有修改则返回原来的值
if(strlen($value) == 0){
$old = $this->field('login_pwd,encrypt')->where($this->pk .' = '. $data[$this->pk])->find();
$this->encrypt = $old['encrypt'];
return $old['login_pwd'];
}else{
$this->encrypt = uniqid('',true);
$pwd = md5(md5($value).$this->encrypt);
return $pwd;
}
}
protected function setEncryptAttr($value,$data)
{
return $this->encrypt;
}
}<file_sep>/app/crm/controller/Notice.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use app\crm\model\Manager;
use Auth\MemberLib;
use Jwt\Exception\TokenExistsException;
use Jwt\JwtToken;
use think\facade\Request;
/**
* @des 企业公告
* @package app\crm\controller
*/
class Notice extends Base
{
public function lists(){
}
public function add(){
}
public function edit(){
}
public function del(){
}
}
<file_sep>/app/common/model/BaseCrmModel.php
<?php
namespace app\common\model;
use think\Model;
class BaseCrmModel extends Model
{
protected $connection = 'company_cloud';
}<file_sep>/app/apidoc/controller/Index.php
<?php
namespace app\apidoc\controller;
use app\common\controller\BaseController;
class Index extends BaseController
{
public function company()
{
$scanPath = root_path() . 'app/company';
$s = \OpenApi\scan($scanPath);
$arr = json_decode($s->toJson(),true);
// 根据配置文件中的 替换api文档 请求的server url
$arr['servers'][0]['url'] = preg_replace('/\/\/(\S+)/' , '//'.env('domain_bind.company_domain') , $arr['servers'][0]['url']);
$json = json_encode($arr);
echo $json;
}
public function crm()
{
$scanPath = root_path() . 'app/crm';
$s = \OpenApi\scan($scanPath);
$arr = json_decode($s->toJson(),true);
// 根据配置文件中的 替换api文档 请求的server url
$arr['servers'][0]['url'] = preg_replace('/\/\/(\S+)/' , '//'.env('domain_bind.crm_domain') , $arr['servers'][0]['url']);
$json = json_encode($arr);
echo $json;
}
}
<file_sep>/app/crm/controller/Enterprises.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use app\crm\model\Enterprises as EnterprisesModel;
use app\crm\model\EnterprisesPrivate;
use app\crm\model\Manager;
use Auth\MemberLib;
use Jwt\Exception\TokenExistsException;
use Jwt\JwtToken;
use think\facade\Cache;
use think\facade\Request;
/**
* @des 企业公告
* @package app\crm\controller
*/
class Enterprises extends Base
{
/**
* @OA\Get(path="/v1/enterprises/passed_lists",
* tags={"企业管理[not_menu]"},
* summary="获取所有已审核的企业列表 [企业列表]",
* security={{"api_key": {}}},
* @OA\Parameter(name="pageSize",in="query",description="分页数量 默认10",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="page",in="query",description="分页页码",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function passedLists(){
$pageSize = (int) input('get.pageSize',10) ;
$page = (int) input('get.page',0) ;
$company_name = input('get.company_name'); // corporate_name
$list = EnterprisesModel::field('id,user_name,contacts,contact_number,corporate_name')
->where('audited_time','>',0)
->where('record_status', '=', 2)
->where('status', '=', 1);
if($company_name){
$list->whereRaw(' (instr(corporate_name ,"'.$company_name.'" )) ');
}
$list = $list->page($page,$pageSize)
->select();
$total = EnterprisesModel::field('id,user_name,contacts,contact_number,corporate_name')
->where('audited_time','>',0)
->where('record_status', '=', 2)
->where('status', '=', 1)
->count();
$return['list'] = $list;
$return['total'] = $total;
return (count($list)) ? $this->jsonSuccess($return) : $this->jsonError('暂无数据');
}
/**
* @OA\Get(path="/v1/enterprises/detail",
* tags={"企业管理[not_menu]"},
* summary="获取企业详情 ",
* security={{"api_key": {}}},
* @OA\Parameter(name="company_id",in="query",description="企业id",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function detail(){
$id = input('get.company_id',0) ;
$detail = EnterprisesModel::field('id,user_name,contacts,contact_number,corporate_name')->find(intval($id));
return ($detail) ? $this->jsonSuccess($detail) : $this->jsonError('暂无数据');
}
/**
* @OA\Get(path="/v1/enterprises/detail",
* tags={"企业管理[not_menu]"},
* summary="获取私有认证企业申请信息列表",
* security={{"api_key": {}}},
* @OA\Parameter(name="page",in="query",description="分页页数",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="pageSize",in="query",description="分页条数",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="enterprise_name",in="query",description="名称搜索",required=false,@OA\Schema(type="string")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function privateCompanyList(){
$pageSize = (int) input('get.pageSize',10) ;
$page = (int) input('get.page',0) ;
$enterprise_name = input('get.enterprise_name',false);
$m = EnterprisesPrivate::page($page,$pageSize);
if ($enterprise_name){
$m->whereRaw(' instr(enterprise_name,"'.$enterprise_name.'") ');
}
$list = $m->select();
return ($list) ? $this->jsonSuccess($list) : $this->jsonError('暂无数据');
}
}
<file_sep>/app/common.php
<?php
function p($var){
echo '<pre>';
var_dump($var);
echo '</pre>';
}
function envStrToArray($name ,$defualt = '',$delimiter = ','){
return explode(',',env($name,$defualt));
}
<file_sep>/app/crm/controller/Crontab.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use think\facade\Db;
class Crontab extends Base
{
/**
* @OA\Get(path="/crontab/checkApplyOverTime",
* tags={"首页 [not_menu]"},
* summary="首页相关",
* security={{"api_key": {}}},
* @OA\Response(response=200,description="成功"),
* )
*/
public function checkApplyOverTime()
{
// 已审核 截止日志小于当前时间(已过期)
$sql = '
select company_id from (
select * from (
SELECT DISTINCT company_id ,apply_time,end_time
FROM `xfb_service_apply` FORCE INDEX ( over_time )
WHERE `status` = 1 AND `xfb_service_apply`.`delete_time` IS NULL
ORDER BY `apply_time` DESC
) as a
GROUP BY a.company_id
) as b where UNIX_TIMESTAMP(end_time) < '.time().'
';
$company_ids = Db::query($sql);
$ids = [];
foreach($company_ids as $k => $v){
array_push($ids,$v['company_id']);
}
\app\crm\model\Enterprises::where('self_support',1)->whereIn('id',$ids)->update(['self_support' => 0]);
return $this->jsonSuccess('ok');
}
}
<file_sep>/app/crm/validate/ServiceGoodsValidate.php
<?php
namespace app\crm\validate;
use think\Validate;
class ServiceGoodsValidate extends Validate
{
protected $rule = [
'name' => 'require|min:3|max:12',
'type' => 'require|number|max:2',
'expiry_type' => 'require|number|max:2',
'expiry_num' => 'require|number|max:2',
'desc' => 'require|min:6|max:200',
];
protected $message = [
'name.require' => '产品套餐名称必填',
'name.max' => '产品套餐名称最长12个字符',
'name.min' => '产品套餐名称最少3个字符',
'type.require' => '产品套餐类型必填',
'type.number' => '产品套餐类型必须是数字',
'type.max' => '产品套餐类型最多2个字符',
'expiry_type.require' => '有效期类型必填',
'expiry_type.number' => '有效期类型必须是数字',
'expiry_type.max' => '有效期类型最多2个字符',
'expiry_num.require' => '有效期时长必填',
'expiry_num.number' => '有效期时长必须是数字',
'expiry_num.max' => '有效期时长最多2个字符',
'desc.require' => '产品套餐描述必填',
'desc.min' => '产品套餐描述名最少3个字符',
'desc.max' => '产品套餐描述名最多200个字符',
];
}<file_sep>/app/provider.php
<?php
use app\common\ExceptionHandle;
use RedisClient\RedisClient;
// 容器Provider定义文件
return [
'think\exception\Handle' => ExceptionHandle::class,
'redisClient' => RedisClient::class,
'ip2Region' => Ip2Region::class
];
<file_sep>/app/crm/controller/Auth.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use app\crm\model\Manager;
use Auth\MemberLib;
use Jwt\Exception\TokenExistsException;
use Jwt\JwtToken;
use think\facade\Request;
class Auth extends Base
{
// token过期 时间 24小时
// const TOKEN_EXP_SECENDS = 86400;
// 临时满足产品的 骚操作 一年 过期 新旧token(共存)
const TOKEN_EXP_SECENDS = 31536000;
/**
* @OA\Post(path="/v1/auth/login",
* tags={"用户认证 [not_menu]"},
* summary="登录",
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="登录账号", property="account", type="string", default="<EMAIL>"),
* @OA\Property(description="账号类型 [email,mobile,uname]", property="account_type", type="string", default="email"),
* @OA\Property(description="密码", property="pwd", type="string",default="<PASSWORD>"),
* @OA\Property(description="客户端类型 [web,wechat,andriod,ios]", property="client_type", type="string",default="web"),
* @OA\Property(description="设备id [仅针对andriod,ios]", property="device_id", type="string",default=""),
* required={"account","account_type", "pwd","client_type","device_id"})
* )
* ),
*
* @OA\Response(response="200",description="登录成功",
* @OA\Header( header="Authorization",description="授权token", @OA\Schema(type="string") ),
* ),
*
* )
*/
public function login()
{
$jwt = new JwtToken();
$param = input('post.');
$platform = $param['client_type'];
$deviceId = $param['device_id'];
try{
$jwt->setRedis(app('redisClient'));
$jwt->sub($platform) //颁发给客端类型
->aud($deviceId) // 客户端设备id 针对安卓 或和 ios 的设备id
->exp(self::TOKEN_EXP_SECENDS) // token过期时间
->useHook('defualtHook'); // 使用钩子
# 注入用户model
MemberLib::bindModel(Manager::class ,[
'uname' => 'name',
'account' => ['email' => 'email','uname' => 'name','mobile' => 'tel'],
'pwd' => '<PASSWORD>',
'pwd_encrypt' => 'md5',
'pwd_salt' => '<PASSWORD>',
'user_status' => 'status',
'frozen_value' => 0
],'id,name,real_name as truename,password,email,mobile,salt,status');
MemberLib::bindRedis(app('redisClient'));
// 登录
$memberInfo = MemberLib::login($param);
$jwt->payload($memberInfo); // 装载非敏感信息
try{
$token = $jwt->getToken();
return $this->jsonSuccess(['login' => 'ok','Authorization'=>$token] );
}catch (TokenExistsException $e){
return $this->jsonError('登录失败:您已登录请不要重复登录',['login'=>'fail'] ,['Authorization'=>$e->getMessage() ]);
}
}catch (\Exception $e){
return $this->jsonError('登录失败'.$e->getMessage());
}
}
/**
* @OA\Post(path="/v1/auth/loginout",
* tags={"用户认证 [not_menu]"},
* summary="退出",
* security={{"api_key": {}}},
* @OA\Response(response=200,description="退出成功"),
* )
*
*/
public function loginOut()
{
$jwt = new JwtToken();
$jwt->setRedis(app('redisClient'))->useHook('defualtHook');
$token = Request::header('Authorization');
if(strlen(trim($token)) == 0 ) return $this->jsonError('token not found');
$flag = $jwt->delToken($token);
return $flag ? $this->jsonSuccess('退出成功') : $this->jsonError('退出失败');
}
/**
* @OA\Post(path="/v1/auth/sendVerifyCode",
* tags={"用户认证 [not_menu]"},
* summary="发送验证码",
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="登录账号", property="account", type="string", default="百事可乐"),
* @OA\Property(description="账号类型 [email,mobile,uname]", property="account_type", type="string", default="uname"),
* required={"account","account_type", "pwd","client_type","device_id"}
* )
* )
* ),
* @OA\Response(response="200",description="登录成功")
* )
*/
public function sendVerifyCode($account ,$account_type){
MemberLib::bindRedis(app('redisClient'));
$type = MemberLib::getAccountType($account ,$account_type);
switch ($type){
case 'mobile':
$flag = MemberLib::sendMobileVerifyCode($account);
break;
case 'email':
$flag = MemberLib::sendEmailVerifyCode($account);
break;
case 'uname':
return $this->jsonError('uname 账户类型不支持发送验证码');
break;
}
return ($flag) ? $this->jsonSuccess('验证码已发送') : $this->jsonError('验证码发送失败!请重试!');
}
/**
* @OA\Post(path="/v1/auth/checktoken",
* tags={"用户认证 [not_menu]"},
* summary="验证token是否正确",
* security={{"api_key": {}}},
* @OA\Response(response=200,description="验证成功"),
* )
*/
public function checktoken(){
$token = Request::header('Authorization');
$jwt = new JwtToken();
$jwt->setRedis(app('redisClient'))->useHook('defualtHook');
try{
return ($jwt->verify($token)) ? $this->jsonSuccess('success') : $this->jsonError('error');
}catch (\Exception $e){
return $this->jsonError('error');
}
}
}
<file_sep>/app/crm/controller/Index.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use app\crm\model\Manager as ManagerModel;
use think\Db;
use think\facade\Cache;
use Uploader\Uploader;
use Upyun\Config;
use Upyun\Upyun;
class Index extends Base
{
/**
* @OA\Get(path="/v1/index",
* tags={"首页 [not_menu]"},
* summary="首页相关",
* security={{"api_key": {}}},
* @OA\Response(response=200,description="成功"),
* )
*/
public function index()
{
return $this->jsonSuccess('企业客服CRM ');
}
public function upload()
{
try{
$upfile = Uploader::init('upyun')->upfile($_FILES);
return $this->jsonSuccess(['files' => $upfile],'上传成功');
}catch (\Exception $e){
return $this->jsonError($e->getMessage());
}
}
}
<file_sep>/app/crm/controller/ServiceGoodsRule.php
<?php
namespace app\crm\controller;
use app\crm\model\ServiceGoodsRule as ServiceGoodsRuleModel;
use app\crm\validate\ServiceGoodsRuleValidate;
/**
* @des 服务商品权限管理
* @package app\crm\controller
*/
class ServiceGoodsRule extends Base
{
/**
* @OA\Post(path="/v1/service_goods_rule/create",
* tags={"套餐权限管理 [not_menu]"},
* summary="套餐权限绑定",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="套餐产品id", property="service_goods_id", type="string", default="1"),
* @OA\Property(description="权限功能id", property="module_id", type="string", default="4,5,7"),
* required={"service_goods_id","module_id"})
* )
* ),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function create(){
$data = input('post.');
try{
Validate(ServiceGoodsRuleValidate::class)->check($data);
$flag = (new ServiceGoodsRuleModel())->add($data['service_goods_id'] , $data['module_id']);
return ($flag) ? $this->jsonSuccess('添加成功') : $this->jsonError('添加失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Put(path="/v1/service_goods_rule/edit",
* tags={"套餐权限管理 [not_menu]"},
* summary="修改套餐权限",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="套餐产品id", property="service_goods_id", type="string", default="1"),
* @OA\Property(description="权限功能id", property="module_id", type="string", default="4,5,7"),
* required={"service_goods_id","module_id"})
* )
* ),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function edit(){
try{
$data = input('post.');
Validate(ServiceGoodsRuleValidate::class)->check($data);
$flag = (new ServiceGoodsRuleModel())->edit($data['service_goods_id'] ,$data['module_id']);
return ($flag) ? $this->jsonSuccess('修改成功') : $this->jsonError('修改失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}catch (\Exception $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Delete(path="/v1/service_goods_rule/delete",
* tags={"套餐权限管理 [not_menu]"},
* summary="删除套餐权限",
* security={{"api_key": {}}},
* @OA\Parameter(name="service_goods_id",in="query",description="套餐产品id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function delete(){
$id = intval(input('get.service_goods_id'));
if(!$id) return $this->jsonError('id 参数必填');
$flag = (new ServiceGoodsRuleModel())->del($id);
return ($flag) ? $this->jsonSuccess('删除成功') : $this->jsonError('删除失败');
}
}
<file_sep>/app/crm/config/cache.php
<?php
// +----------------------------------------------------------------------
// | 缓存设置
// +----------------------------------------------------------------------
return [
// 默认缓存驱动
'default' => env('cache_crm.driver', 'redis'),
// 缓存连接方式配置
'stores' => [
'file' => [
// 驱动方式
'type' => 'File',
// 缓存保存目录
'path' => '',
// 缓存前缀
'prefix' => '',
// 缓存有效期 0表示永久缓存
'expire' => 0,
// 缓存标签前缀
'tag_prefix' => 'tag:',
// 序列化机制 例如 ['serialize', 'unserialize']
'serialize' => [],
],
// 更多的缓存连接
'redis' => [
'type' => env('cache_crm.type', 'redis'),
'host' => env('cache_crm.host', '127.0.0.1'),
'port' => env('cache_crm.port', 6379),
'password' => env('cache_crm.password', ''),
'prefix' => env('cache_crm.prefix', ''),
'select' => intval(env('cache_crm.select', 0))
]
],
];
<file_sep>/app/crm/validate/ServiceModuleValidate.php
<?php
namespace app\crm\validate;
use think\Validate;
class ServiceModuleValidate extends Validate
{
protected $rule = [
'app_name' => 'require|max:25',
'controller_desc' => 'require',
'controller_name' => 'require',
'method_desc' => 'require',
'method_name' => 'require',
];
protected $message = [
'app_name.require' => '应用名称必填',
'app_name.max' => '应用名称最长25个字符',
'controller_desc.require' => '控制器功能描述必填',
'controller_name.require' => '控制器名称必填',
'method_desc.require' => '方法功能描述必填',
'method_name.require' => '方法名称必填',
];
}<file_sep>/app/common/ExceptionHandle.php
<?php
namespace app\common;
use think\db\exception\DataNotFoundException;
use think\db\exception\ModelNotFoundException;
use think\Exception;
use think\exception\Handle;
use think\exception\HttpException;
use think\exception\HttpResponseException;
use think\exception\RouteNotFoundException;
use think\exception\ValidateException;
use think\Response;
use think\response\Json;
use Throwable;
/**
* 应用异常处理类
*/
class ExceptionHandle extends Handle
{
/**
* 不需要记录信息(日志)的异常类列表
* @var array
*/
protected $ignoreReport = [
HttpException::class,
HttpResponseException::class,
ModelNotFoundException::class,
DataNotFoundException::class,
ValidateException::class,
];
/**
* 记录异常信息(包括日志或者其它方式记录)
*
* @access public
* @param Throwable $exception
* @return void
*/
public function report(Throwable $exception): void
{
// 使用内置的方式记录异常日志
parent::report($exception);
}
/**
* Render an exception into an HTTP response.
*
* @access public
* @param \think\Request $request
* @param Throwable $e
* @return Response
*/
public function render($request, Throwable $e): Response
{
// 添加自定义异常处理机制
if(env('app_debug')){
if($e instanceof RouteNotFoundException){
$code = 404;
$msg = 'page not found';
}elseif($e instanceof ValidateException){
$code = 202;
$msg = $e->getMessage();
}elseif ($e instanceof Exception){
$code = 203;
$msg = $e->getMessage();
}else{
$code = 500;
$msg = $e->getMessage();
}
$data = [
'error_message' => $msg,
'file' => $e->getFile(),
'line' => $e->getLine(),
'code' => $e->getCode(),
];
}else{
if($e instanceof RouteNotFoundException){
$code = 404;
$msg = 'page not found';
}elseif($e instanceof ValidateException){
$code = 202;
$msg = $e->getMessage();
}elseif ($e instanceof Exception){
$code = 203;
$msg = $e->getMessage();
}else{
$code = 500;
$msg = 'server error';
}
$data = [ 'error_message' => $msg ];
}
// todo 错误日志的收集
// .......
// return json($data,$code);
// 其他错误交给系统处理
return parent::render($request, $e);
}
}
<file_sep>/app/admin/model/Rule.php
<?php
/**
* Created by PhpStorm.
* User: jyolo
* Date: 2017/2/8
* Time: 17:49
*/
namespace app\admin\model;
use think\Model;
class Rule extends Model
{
public function getList(){
$list = $this->field('rule_title,module,controller')->group('controller')->select();
$list = toArray($list);
foreach($list as $k => $v){
//p($v);
$son = $this->where(
[
'module' => $v['module'],
'controller' => $v['controller'],
]
)->select();
$list[$k]['son'] = toArray($son);
}
return $list;
}
}<file_sep>/app/admin/theme/defualt/recycle/info/index.html
{extend name='../base/index/main'}
{block name='main'}
<style>
.search-quote{float:left;width: 100%;}
.layui-form-item{float:left;clear: none;}
.layui-boxs{padding:15px;}
.layui-elem-quote{padding:10px 0;}
</style>
<div class="layui-layout">
<fieldset class="layui-elem-field layui-field-title" >
<legend>搜索</legend>
<blockquote class="layui-elem-quote search-quote">
<div class="layui-boxs">
<form class="layui-form layui-form-pane" action="{:url('Info/index')}" method="post" lay-filter="search">
<div class="layui-form-item" component-name="text">
<label class="layui-form-label">关键词</label>
<div class="layui-input-inline" style="margin-right:1px;">
<input lay-verify="" type="text" class="preview layui-input" name="keyword" value="" placeholder="请输入搜索的关键字">
</div>
<div class="layui-input-inline" style="width: 100px;">
<select name="keyword_type">
<option value="title" selected>标题</option>
<option value="content">内容</option>
<option value="brand_name">品牌</option>
<option value="memo">备注</option> <!--todo-->
<option value="suqiu">投诉诉求</option>
<option value="problem">投诉问题</option>
<option value="supplement_operator">专员补充</option> <!--todo-->
<option value="supplement">用户补充</option> <!--todo-->
<option value="attr">附加属性</option> <!--todo-->
<option value="keywords">关键词</option> <!--todo-->
</select>
</div>
<div class="layui-form-mid layui-word-aux"></div>
</div>
{:CMaker("text")->label('品牌名称')->name('where[brand_name][like]')->placeholder('请输入搜索的品牌名称')->render()}
{:CMaker("text")->label('投诉编号')->name('where[tsnumber][=]')->placeholder('请输入搜索的品牌名称')->render()}
{:CMaker("select")->label('是否显示')->option('1-是|0-否')->name('where[is_show~int][=]')->render()}
{:CMaker("select")->label('是否好评')->option('1-是|0-否')->name('where[is_good~int][=]')->render()}
{:CMaker("select")->label('是否推荐')->option('1-是|0-否')->name('where[is_recommend~int][=]')->render()}
{:CMaker("select")->label('是否热点')->option('1-是|0-否')->name('where[is_hot~int][=]')->render()}
{:CMaker("select")->label('客服备注')->option('1-有|0-无')->name('where[is_has_note~int][=]')->render()}
{:CMaker("select")->label('性别')->option('1-男|0-女')->name('where[sex~int][=]')->render()}
{:CMaker("select")->label('来源')->option('pc-pc|ios-ios|wechat-wechat|android-android|h5-h5')->name('where[source][=]')->render()}
{:CMaker("text")->label('手机号')->name('where[mobile][=]')->render()}
{:CMaker("text")->label('投诉人')->name('where[real_name][like]')->render()}
{:CMaker("text")->label('投诉编号')->name('where[tsnumber][like]')->render()}
{:CMaker("datepicker")->label('投诉时间区间')->placeholder('开始时间 , 结束时间')->range(',')->type('datetime')->name('where[add_time][between]')->shownowtime(false)->render()}
{:CMaker("datepicker")->label('转出时间区间')->placeholder('开始时间 , 结束时间')->range(',')->type('datetime')->name('where[turnout_time][between]')->shownowtime(false)->render()}
{:CMaker("datepicker")->label('完结时间区间')->placeholder('开始时间 , 结束时间')->range(',')->type('datetime')->name('where[finish_time][between]')->shownowtime(false)->render()}
{:CMaker("relation")->label("所属专员")->helpinfo("")->table("manager")->field("id,truename")->showtype("select")->name('where[operator_id~int][=]')->render()}
{:CMaker('linkselect')->label('一级分类|二级分类|所属品牌')
->helpinfo('')
->linkfield('cat_pid|where[cat_id~int][=]|where[brand_id~int][=]')
->serverUrl(url('info/linkselect'))
->param('type-topCat| type-sonCat | type-brand')
->showfield('id-cat_name|id-cat_name|id-brand_name')
->render()}
<div class="layui-form-item">
<div class="layui-inline">
<div class="layui-input-inline" style="width: 300px;">
<button class="layui-btn lay-submit" lay-submit data-type="search_reload_table" table-index="0">
<i class="layui-icon"></i> 搜索
</button>
<button type="reset" class="layui-btn lay-submit" >重置</button>
<button type="reset" class="layui-btn lay-submit" >导出</button>
</div>
</div>
</div>
</form>
</div>
</blockquote>
</fieldset>
<form class="layui-form layui-form-pane" action="{:url('Info/index')}" method="post" lay-filter="list-table">
{:CMaker("table")->filter("complaint_brand")->height('900')->cols([
['type'=>'checkbox'] ,
['field' => "_id",'title' => 'id','sort' => true ,'width' => '100'],
['field' => 'tsnumber','title' => '投诉编号','sort' => true ,'width' => '100' ,'templet'=> '#opentComplaint'] ,
['field' => 'title','title' => '标题','sort' => true ,'width' => '100'] ,
['field' => 'real_name','title' => '投诉人','sort' => true,'width' => '100' ] ,
['field' => 'mobile','title' => '手机号','sort' => true ,'width' => '120'] ,
['field' => 'is_show','title' => '是否显示','sort' => true ,'width' => '100'] ,
['field' => 'cat_pid_name','title' => '一级分类','sort' => true ,'width' => '100'] ,
['field' => 'cat_name','title' => '二级分类','sort' => true ,'width' => '100'] ,
['field' => 'brand_name','title' => '品牌','sort' => true ,'width' => '100'] ,
['field' => 'add_time','title' => '投诉时间','sort' => true ,'width' => '160'] ,
['field' => 'turnout_time','title' => '转出时间','sort' => true ,'width' => '160'] ,
['field' => 'finish_time','title' => '完成时间','sort' => true ,'width' => '160' ] ,
['field' => 'status','title' => '状态','sort' => true ,'width' => '100'] ,
['field' => 'is_good','title' => '是否好评','sort' => true ,'width' => '100'] ,
['field' => 'is_has_note','title' => '备注','sort' => true,'width' => '100' ] ,
['field' => 'source','title' => '来源','sort' => true ,'width' => 'auto'] ,
['toolbar' => '#actionTpl' ,'title' => '操作','fixed' => 'right','width' => '80']
])->page(true)->limit(20)->limits([40,80,100])->url('/admin.php/complaint/info/index.html')->editUrl('/admin.php/complaint/info/table_edit.html')->render()}
<script type="text/html" id="opentComplaint" lay-filter="opentComplaint">
<span data-url="{:url('info/detail')}" data-field="_id" lay-event="edit" >{{d.tsnumber}}</span>
</script>
<script type="text/html" id="actionTpl" lay-filter="opentComplaint">
<div class="layui-btn-group">
<span data-url="{:url('info/detail')}" data-field="_id" class="layui-btn layui-btn-sm" lay-event="edit">处理</span>
</div>
</script>
<blockquote class="layui-elem-quote layui-quote-nm">
<div class="layui-boxs">
<button class="layui-btn layui-btn-sm" lay-submit data-url="{:url('ComplaintBrand/pdel')}" data-call-back="reload_table" table_index="0">
批量删除
</button>
<div class="layui-btn-group">
<button class="layui-btn layui-btn-sm" lay-submit data-url="{:url('ComplaintBrand/batch',['type' => 'is_show' ,'value' => "1" ])}" data-call-back="reload_table" >
显示
</button>
<button class="layui-btn layui-btn-sm" lay-submit data-url="{:url('ComplaintBrand/batch',['type' => 'is_show' ,'value' => "0"])}" data-call-back="reload_table" >
取消显示
</button>
</div>
<button class="layui-btn layui-btn-sm link" data-type="page" data-url="{:url('ComplaintBrand/add')}" style="float: right">
添加
</button>
</div>
</blockquote>
</form>
</div>
<script type="text/javascript" src="__PLUGIN_PATH__/layui/layui.js"></script>
<script type="text/javascript" src="__PLUGIN_PATH__/lay-extend-module/config.js"></script>
{:CMakerJs("all")}
{/block}<file_sep>/app/crm/controller/Base.php
<?php
declare (strict_types = 1);
namespace app\crm\controller;
use app\common\controller\BaseController;
use think\App;
/**
* @OA\Info(title="客服CRM系统文档", version="0.1")
* @OA\Server(
* url="{schema}://crm.local.company.com",
* description="客服CRM系统接口文档",
* @OA\ServerVariable(
* serverVariable="schema",
* enum={"https", "http"},
* default="http"
* )
* )
*
* @OA\SecurityScheme(
* securityScheme="api_key",
* type="apiKey",
* in="header",
* name="Authorization"
* )
*/
class Base extends BaseController
{
/**
* 重写 error 方法
* @param string $msg
* @param null $url
* @param string $data
* @param int $wait
* @param array $header
*/
protected function jsonError($msg = 'fail', $data = '', array $header = [])
{
$result = [
'code' => 0,
'msg' => $msg,
'data' => $data,
];
return json($result,203,$header);
}
/**
* 重写 Success 方法
* @access protected
* @param mixed $msg 提示信息
* @param string $url 跳转的URL地址
* @param mixed $data 返回的数据
* @param integer $wait 跳转等待时间
* @param array $header 发送的Header信息
* @return void
*/
protected function jsonSuccess( $data = '',$msg = 'success', array $header = [])
{
if( is_string($data)){
$msg = $data;
$data = [];
}
$result = [
'code' => 1,
'msg' => $msg,
'data' => $data,
];
return json($result,200,$header);
}
}
<file_sep>/app/crm/controller/ServiceGoods.php
<?php
namespace app\crm\controller;
use \app\crm\model\ServiceApply as ServiceApplyModel;
use app\crm\model\ServiceGoods as ServiceGoodsModel;
use app\crm\model\ServiceGoodsRule as ServiceGoodsRuleModel;
use app\crm\validate\ServiceGoodsValidate;
/**
* @des 服务商品套餐
* @package app\crm\controller
*/
class ServiceGoods extends Base
{
/**
* @OA\Get(path="/v1/service_goods/lists",
* tags={"套餐管理"},
* summary="获取套餐商品列表 [套餐列表]",
* security={{"api_key": {}}},
* @OA\Parameter(name="pageSize",in="query",description="取多少条 默认 10",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="pageNum",in="query",description="分页条数 默认 1 ",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="is_up",in="query",description="是否上架 默认 0(下架), 1(上架) ",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function lists(){
$pageSize = input('get.pageSize',10) ;
$pageNum = input('get.pageNum',1);
$is_up = input('get.is_up',null);
$model = new ServiceGoodsModel();
$countModel = null;
if (isset($is_up) && in_array(intval($is_up) ,[0,1])){
$countModel = $model = $model->where('is_up',intval($is_up));
}else{
$countModel = $model;
}
$total = $countModel->count();
$list = $model->page($pageNum , $pageSize)->order('create_time','desc')->select();
$_return['list'] = $list;
$_return['total'] = $total;
return (count($list)) ? $this->jsonSuccess($_return) : $this->jsonError('暂无数据');
}
/**
* @OA\Get(path="/v1/service_goods/detail",
* tags={"套餐管理"},
* summary="套餐详情",
* security={{"api_key": {}}},
* @OA\Parameter(name="id",in="query",description="套餐商品的id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function detail(){
$id = intval(input('get.id',0));
if(!$id ) return $this->jsonError('缺少id');
$list = ServiceGoodsModel::with(['moduleInfo'])->find(intval($id));
if($list->moduleInfo){
$arr = $list->moduleInfo->toArray();
$newModuleInfo = [];
foreach($arr as $k => $v){
if($v['pid'] != 0) continue;
$newModuleInfo[$v['id']] = $v;
$newModuleInfo[$v['id']]['son'] = [];
}
foreach($arr as $k => $v){
if($v['pid'] == 0) continue;
foreach($newModuleInfo as $sk => $sv){
if($v['pid'] == $sk){
array_push($newModuleInfo[$sk]['son'],$v);
}
}
}
sort($newModuleInfo);
unset($list->moduleInfo);
$list->moduleInfo = $newModuleInfo;
}
return ($list) ? $this->jsonSuccess($list) : $this->jsonError('暂无数据');
}
/**
* @OA\Post(path="/v1/service_goods/create",
* tags={"套餐管理"},
* summary="添加套餐",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="产品名称", property="name", type="string", default="基础套餐"),
* @OA\Property(description="产品类型 1 公开发售 2 业务定制", property="type", type="string", default="1"),
* @OA\Property(description="产品价格", property="price", type="string",default="15000"),
* @OA\Property(description="有效期类型 1 天 2 月 3 年", property="expiry_type", type="string",default="2"),
* @OA\Property(description="有效期时长", property="expiry_num", type="string",default="3"),
* @OA\Property(description="产品套餐描述", property="desc", type="string",default="套餐描述描述描述描述......."),
* @OA\Property(description="是否上架 0 下架 1 上架", property="is_up", type="string",default="0"),
* required={"name","type", "price","expiry_type","expiry_num"})
* )
* ),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function create(){
$data = input('post.');
try{
Validate(ServiceGoodsValidate::class)->check($data);
$flag = (new ServiceGoodsModel())->save($data);
return ($flag) ? $this->jsonSuccess('添加成功') : $this->jsonError('添加失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Put(path="/v1/service_goods/edit",
* tags={"套餐管理"},
* summary="修改套餐",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="产品id", property="id", type="integer", default="1"),
* @OA\Property(description="产品名称", property="name", type="string", default=""),
* @OA\Property(description="产品类型 1 公开发售 2 业务定制", property="type", type="string", default=""),
* @OA\Property(description="产品价格", property="price", type="string",default=""),
* @OA\Property(description="有效期类型 1 天 2 月 3 年", property="expiry_type", type="string",default=""),
* @OA\Property(description="有效期时长", property="expiry_num", type="string",default=""),
* @OA\Property(description="产品套餐描述", property="desc", type="string",default=""),
* @OA\Property(description="是否上架 0 下架 1 上架", property="is_up", type="string",default=""),
* required={"id"}
* )
* )
* ),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function edit(){
try{
$data = input('post.');
if(!intval($data['id'])) return $this->jsonError('id 参数必填');
$model = ServiceGoodsModel::find($data['id']);
if (!$model) return $this->jsonError('数据不存在');
$data = filterEmptyVars($data);
$flag = $model->save($data);
return ($flag) ? $this->jsonSuccess('修改成功') : $this->jsonError('修改失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Delete(path="/v1/service_goods/delete",
* tags={"套餐管理"},
* summary="删除套餐",
* security={{"api_key": {}}},
* @OA\Parameter(name="id",in="query",description="商品id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function delete(){
$id = intval(input('get.id'));
if(!$id) return $this->jsonError('id 参数必填');
$isset = ServiceApplyModel::where('goods_id',$id)->find();
if($isset) return $this->jsonError('该套餐已被企业申请使用 禁止删除');
$model = ServiceGoodsModel::find($id);
if (!$model) return $this->jsonError('数据不存在');
$flag = $model->delete($id);
return ($flag) ? $this->jsonSuccess('删除成功') : $this->jsonError('删除失败');
}
}
<file_sep>/app/crm/controller/ManagerRole.php
<?php
namespace app\crm\controller;
use app\crm\model\ManagerRoleMap;
use app\crm\model\ManagerRoleRule as ManagerRoleRuleModel;
use app\crm\validate\ManagerRoleMenusValidate;
use app\crm\validate\ManagerRoleValidate;
use app\crm\model\ManagerRole as ManagerRoleModel;
use think\exception\ValidateException;
use think\facade\Db;
/**
* @des 权限角色
* @package app\crm\controller
*/
class ManagerRole extends Base
{
/**
* @OA\Get(path="/v1/manager_role/list",
* tags={"权限管理"},
* summary="获取管理员角色列表 [角色管理]",
* security={{"api_key": {}}},
* @OA\Parameter(name="pageSize",in="query",description="分页数量 默认10",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="page",in="query",description="分页页码",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function lists(){
$page = (int) input('page',0);
$pageSize = (int) input('pageSize',10);
$total = ManagerRoleModel::count();
$list = ManagerRoleModel::page($page,$pageSize)->select();
$_return['list'] = $list;
$_return['total'] = $total;
return ($list) ? $this->jsonSuccess($_return) : $this->jsonError('暂无数据');
}
/**
* @OA\Post(path="/v1/manager_role/create",
* tags={"权限管理"},
* summary="创建管理员角色 ",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="角色名称", property="role_name", type="string", default="超级管理员"),
* required={"role_name"})
* )
* ),
* @OA\Response(response="200",description="添加成功"),
* )
*/
public function create(){
$data = input('post.');
try{
Validate(ManagerRoleValidate::class)->check($data);
$model = new ManagerRoleModel();
$isset = $model->where('role_name',$data['role_name'])->find();
if($isset) throw new ValidateException($data['role_name'] .' 已存在');
$flag = $model->save($data);
return ($flag) ? $this->jsonSuccess('添加成功') : $this->jsonError('添加失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Post(path="/v1/manager_role/bind_rule_menu",
* tags={"权限管理"},
* summary="角色绑定权限节点",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="角色id", property="role_id", type="string", default="7"),
* @OA\Property(description="menu_ids", property="menu_ids", type="string", default="176,188,189"),
* required={"role_id","menu_ids"})
* )
* ),
* @OA\Response(response="200",description="添加成功"),
* )
*/
public function bindRuleMenu(){
$data = input('post.');
try{
Validate(ManagerRoleMenusValidate::class)->check($data);
$model = new ManagerRoleRuleModel();
$isset = $model->where('role_id',$data['role_id'])->column('id');
if($isset) return $this->jsonError('该角色已绑定过权限菜单节点');
$arr = [] ;
$menu_id_arr = explode(',',$data['menu_ids']);
foreach($menu_id_arr as $k => $v){
$arr[$k]['role_id'] = $data['role_id'];
$arr[$k]['menu_id'] = $v;
}
$flag = $model->saveAll($arr);
return ($flag) ? $this->jsonSuccess('添加成功') : $this->jsonError('添加失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Put(path="/v1/manager_role/edit_bind_rule_menu",
* tags={"权限管理"},
* summary="修改角色权限节点",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="角色id", property="role_id", type="string", default="7"),
* @OA\Property(description="menu_ids", property="menu_ids", type="string", default="176,188,189"),
* required={"role_id","menu_ids"})
* )
* ),
* @OA\Response(response="200",description="添加成功"),
* )
*/
public function editBindRuleMenu(){
$data = input('post.');
try{
Validate(ManagerRoleMenusValidate::class)->check($data);
$arr = [] ;
$menu_id_arr = explode(',',$data['menu_ids']);
foreach($menu_id_arr as $k => $v){
$arr[$k]['role_id'] = $data['role_id'];
$arr[$k]['menu_id'] = $v;
}
$model = new ManagerRoleRuleModel();
try{
$model->startTrans();
$model->where('role_id',$data['role_id'])->delete();
$flag = $model->saveAll($arr);
if($flag){
$model->commit();
return $this->jsonSuccess('修改成功');
}
return $this->jsonError('修改失败');
}catch (\Exception $e){
$model->rollback();
return ($flag) ? $this->jsonSuccess('修改成功') : $this->jsonError('修改失败');
}
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Get(path="/v1/manager_role/get_rule_menu",
* tags={"权限管理"},
* summary="获取管理员角色权限节点 ",
* security={{"api_key": {}}},
* @OA\Parameter(name="role_id",in="query",description="角色id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function getRuleMenu(){
$role_id = (int) input('role_id',0);
if(!$role_id) return $this->jsonError('缺少 角色id');
$menus = Db::name('manager_role_rule')->alias('r')->field('m.menu_name,m.id')
->leftJoin('xfb_manager_role_menus m','r.menu_id = m.id')
->where('r.role_id',$role_id)
->select();
return ($menus) ? $this->jsonSuccess($menus) : $this->jsonError('暂无数据');
}
/**
* @OA\Put(path="/v1/manager_role/edit",
* tags={"权限管理"},
* summary="修改管理员角色",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="角色id", property="id", type="integer", default="2"),
* @OA\Property(description="角色名称", property="role_name", type="string", default="kkk"),
* required={"id","role_name"})
* )
* ),
* @OA\Response(response="200",description="修改成功"),
* )
*/
public function edit(){
$data = input('post.');
try{
if(intval($data['id']) == 1) throw new ValidateException('超级管理员 角色不允许修改');
Validate(ManagerRoleValidate::class)->check($data);
$model = new ManagerRoleModel();
$isset = $model->where('id',$data['id'])->find();
if(!$isset) throw new ValidateException('数据不存在 修改失败');
$flag = $isset->save($data);
return ($flag) ? $this->jsonSuccess('修改成功') : $this->jsonError('修改失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Delete(path="/v1/manager_role/delete",
* tags={"权限管理"},
* summary="删除管理员角色 ",
* security={{"api_key": {}}},
* @OA\Parameter(name="id",in="query",description="角色id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功"),
* )
*/
public function delete(){
$id = intval(input('get.id'));
try{
if(! $id ) throw new ValidateException('角色id 必填');
if($id == 1) throw new ValidateException('超管角色不允许删除');
$model = new ManagerRoleModel();
$isset = $model->where('id',$id)->find();
if(!$isset) throw new ValidateException('数据不存在 修改失败');
$flag = $isset->delete();
return ($flag) ? $this->jsonSuccess('删除成功') : $this->jsonError('删除失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
}
<file_sep>/app/crm/controller/BetterLive.php
<?php
namespace app\crm\controller;
use app\crm\controller\Base;
use app\crm\model\BetterLiveArticleConf;
use app\crm\model\Manager as ManagerModel;
use think\Db;
use think\facade\Cache;
use think\Request;
use Uploader\Uploader;
use Upyun\Config;
use Upyun\Upyun;
use \app\crm\model\ServiceApply as ServiceApplyModel;
use \app\crm\model\Enterprises as EnterprisesModel;
class BetterLive extends Base
{
/**
* @OA\Get(path="/v1/better_live/company_list",
* tags={"优质生活 [not_menu]"},
* summary="优质生活企业列表",
* security={{"api_key": {}}},
* @OA\Response(response=200,description="成功"),
* )
*/
public function companyList(Request $request)
{
$pageNum = $request->get('page',0);
$limit = $request->get('limit',10);
$company_name = $request->get('company_name',null);
$list = ServiceApplyModel::alias('sa')
->field('sa.company_id,sg.name as goods_name,sa.end_time,blac.max_article_num,blac.manager_name')
->leftJoin('xfb_service_goods sg','sg.id = sa.goods_id')
->leftJoin('xfb_service_goods_rule ru','sg.id = ru.service_goods_id')
->leftJoin('xfb_better_live_article_conf blac' ,'blac.company_id = sa.company_id')
->whereIn('ru.module_id',[44])
->where('sa.end_time','>',date('Y-m-d H:i:s'))
->order('sa.apply_time','desc');
if($company_name){
$company_ids = EnterprisesModel::whereRaw(' (instr(corporate_name,"'.$company_name.'")) ')->column('id');
$list->whereIn('sa.company_id',$company_ids);
}
$total = $list->count();
$listItem = $list->page($pageNum,$limit)->select();
foreach ($listItem as $k => $v){
$listItem[$k]->company_name = EnterprisesModel::where('id',$v->company_id)->value('corporate_name');
if(!$listItem[$k]->max_article_num) {
$listItem[$k]->max_article_num = env('BETTER_LIVE_COMPANY_DEFUALT_ARTICLE_NUM',5);
}
}
return $this->jsonSuccess(['list' => $listItem ,'total' => $total]);
}
/**
* @OA\Post(path="/v1/better_live/saveArticleNum",
* tags={"修改优质生活发布文章数量"},
* summary="修改优质生活发布文章数量 ",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="数量", property="num", type="string", default="5"),
* required={"manager_id","role_ids"})
* )
* ),
* @OA\Response(response="200",description="修改成功"),
* )
*/
public function saveArticleNum(Request $request){
$max_article_num = (int) $request->post('max_article_num',0);
$company_id = (int) $request->post('company_id',0);
$company_name = (string) $request->post('company_name',0);
if(!$max_article_num) return $this->jsonError('max_article_num 参数必填');
$id = BetterLiveArticleConf::where('company_id',$company_id)->value('id');
$data = [
'company_id' => $company_id,
'company_name' => $company_name,
'max_article_num' => $max_article_num,
'manager_id' => $request->manager_id ,
'manager_name' => \app\crm\model\Manager::where('id',$request->manager_id)->value('real_name') ,
];
$redis = app('redisClient');
$key = 'better_live_article_conf';
if($id){
$data['update_time'] = date('Y-m-d H:i:s');
// // 存入有序集合
$redis->zadd($key,$data['max_article_num'],$data['company_id']);
$res = BetterLiveArticleConf::where('id',$id)->save($data);
}else{
// 存入有序集合
$redis->zadd($key,$data['max_article_num'],$data['company_id']);
$res = BetterLiveArticleConf::create($data);
}
return ($res) ? $this->jsonSuccess('修改成功'): $this->jsonError('修改成功');
}
}
<file_sep>/app/crm/middleware/ActionLogger.php
<?php
namespace app\crm\middleware;
use Jwt\JwtToken;
use think\facade\Db;
use think\facade\Log;
use think\facade\Response;
use think\facade\Request;
use MongoDB\Driver\Exception as MongoDbException;
class ActionLogger
{
// ip 黑名单
const BLACK_IP = [
];
const TABLE_PRRFIX = 'crm_visit_log_';
public function handle($request, \Closure $next)
{
try{
// 不记录 options 请求
if ($request->method() == 'OPTIONS') return $next($request);
$actionData = [];
$actionData['request_url'] = $request->url();
$actionData['domain'] = $request->domain();
$actionData['rule'] = $request->rule()->getRule();
$actionData['route'] = $request->rule()->getRoute();
$actionData['method'] = $request->method();
$actionData['params'] = $request->param();
$controllerWithAction = explode('@' ,$request->rule()->getOption('prefix') . $request->rule()->getRoute()) ;
$actionData['controller'] = $controllerWithAction[0];
$actionData['action'] = $controllerWithAction[1];
$actionData['ip'] = $request->ip();
try{
$ip2regin = app('ip2Region');
$info = $ip2regin->memorySearch($actionData['ip']);
$arr = explode('|', $info['region'] );
$actionData['country'] = $arr[0];
$actionData['province'] = $arr[2];
$actionData['city'] = $arr[3];
$actionData['isp'] = $arr[4];
}catch (\Exception $e){
$actionData['country'] = $e->getMessage();
$actionData['province'] = null;
$actionData['city'] = null;
$actionData['isp'] = null;
}
$actionData['action_time'] = date('Y-m-d H:i:s' , $request->time());
$jwt = $request->header('authorization');
if($jwt){
$jwtArr = JwtToken::parseToken($jwt);
$actionData['uid'] = $jwtArr['uid'];
$actionData['nickname'] = $jwtArr['nickname'];
$actionData['token_type'] = $jwtArr['sub'];
$actionData['token_iss'] = $jwtArr['iss'];
$request->manager_id = $jwtArr['uid'];
$request->manager_name = $jwtArr['nickname'];
}else{
$actionData['uid'] = $actionData['token_type'] = $actionData['token_iss'] = $actionData['nickname'] = '';
}
$collection = self::TABLE_PRRFIX . date('Y-m-d');
Db::connect('mongo')->table($collection)->insert($actionData);
}catch (MongoDbException $e){
Log::error($e->getMessage());
}catch (\Exception $e){
Log::error($e->getMessage());
}
return $next($request);
}
}
<file_sep>/app/crm/controller/ServiceApply.php
<?php
namespace app\crm\controller;
use app\crm\model\Enterprises;
use app\crm\model\EnterprisesPrivate;
use app\crm\model\ServiceApply as ServiceAppluModel;
use app\crm\model\ServiceGoods as ServiceGoodsModel;
use app\crm\validate\ServiceApplyValidate;
use app\crm\validate\ServiceGoodsValidate;
use app\crm\model\ServiceGoodsRule as ServieGocodsRuleModel;
use think\Request;
/**
* @des 套餐申请管理
* @package app\crm\controller
*/
class ServiceApply extends Base
{
/**
* @OA\Get(path="/v1/service_apply/lists",
* tags={"套餐申请管理"},
* summary="申请列表 [申请列表]",
* security={{"api_key": {}}},
* @OA\Parameter(name="pageSize",in="query",description="取多少条 默认 10",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="pageNum",in="query",description="分页条数 默认 1 ",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="apply_realname",in="query",description="申请人姓名",required=false,@OA\Schema(type="string")),
* @OA\Parameter(name="company_name",in="query",description="公司名称",required=false,@OA\Schema(type="string")),
* @OA\Parameter(name="goods_id",in="query",description="服务等级 (套餐商品) 名称 ",required=false,@OA\Schema(type="integer")),
* @OA\Parameter(name="manager_id",in="query",description="选择申请对接的专员 ",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function lists(){
$pageSize = input('get.pageSize',10) ;
$pageNum = input('get.pageNum',1);
$apply_realname = input('get.apply_realname',null);
$company_name = input('get.company_name',null);
$goods_id = input('get.goods_id',null);
$manager_id = input('get.manager_id',null);
$model = new ServiceAppluModel();
$countModel = null;
if($company_name){
$company_ids = Enterprises::where('corporate_name','like','%'.$company_name.'%')
->column('id');
if(count($company_ids)){
$countModel = $model = $model->whereIn('company_id',$company_ids);
}else{
return $this->jsonError('暂无数据');
}
}
$countModel = $model = $model->withJoin([
'serviceGoodsInfo' => ['name','type','price','expiry_type','expiry_num'] ,
'managerInfo' => ['name','real_name','nickname'],
'applyManagerInfo' => ['name','real_name','nickname'],
],'LEFT');
if ($apply_realname){
$countModel = $model = $model->where('apply_realname','like','%'.$apply_realname.'%');
}
if ($manager_id){
$countModel = $model = $model->where('manager_id',intval($manager_id));
}
if ($goods_id){
$countModel = $model = $model->where('goods_id',intval($goods_id));
}
$total = $countModel->count();
$list = $model->page(intval($pageNum) , intval($pageSize))->order('create_time','desc')->select();
// 跨库查询
foreach($list as $k => $v){
$list[$k]['companyInfo'] = Enterprises::field('id,user_name,contacts,contact_number,corporate_name')->where('id',$v['company_id'])->find();
}
$return['list'] = $list;
$return['total'] = $total;
return ($list) ? $this->jsonSuccess($return) : $this->jsonError('暂无数据');
}
/**
* @OA\Get(path="/v1/service_apply/detail",
* tags={"套餐申请管理"},
* summary="申请详情",
* security={{"api_key": {}}},
* @OA\Parameter(name="id",in="query",description="申请的 id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function detail(){
$id = intval(input('get.id',0));
if(!$id ) return $this->jsonError('缺少id');
$data = ServiceAppluModel::with(['serviceGoodsInfo'])->find($id);
if($data){
$data['companyInfo'] = Enterprises::field('user_name,corporate_name,legal_person_name')->where('id',$data['company_id'])->find();
}
return ($data) ? $this->jsonSuccess($data) : $this->jsonError('暂无数据');
}
/**
* @OA\Post(path="/v1/service_apply/create",
* tags={"套餐申请管理"},
* summary="提交申请",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="multipart/form-data",
* @OA\Schema(
* @OA\Property(description="企业 id", property="company_id", type="integer", default="1"),
* @OA\Property(description="服务套餐 id", property="goods_id", type="integer", default="1"),
* @OA\Property(description="申请人真实姓名", property="apply_realname", type="string",default="赵四"),
* @OA\Property(description="申请人真手机号", property="apply_mobile", type="string",default="15926900789"),
* @OA\Property(description="申请人真邮箱", property="apply_email", type="string",default="<EMAIL>"),
* @OA\Property(description="合同编号", property="contract_sn", type="string",default=""),
* @OA\Property(description="实收金额", property="real_pay_money", type="string",default=""),
* @OA\Property(description="有效时长", property="apply_expiry_num", type="integer",default=""),
* @OA\Property(description="有效期类型 1 天 2 月 3 年", property="apply_expiry_type", type="integer",default=""),
* required={"company_id","goods_id", "apply_realname","apply_mobile","apply_email"})
* )
* ),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function create(Request $request){
$data = input('post.');
try{
$data['manager_id'] = $request->manager_id;
Validate(ServiceApplyValidate::class)->check($data);
$goods_info = (new ServiceGoodsModel())->find($data['goods_id']);
if($goods_info['is_up'] == 0 ) return $this->jsonError('该套餐商品 未上架');
$company = (new Enterprises())
->where('id','=',intval($data['company_id']))
->where('audited_time','>',0)
->where('record_status', '=', 2)
->where('status', '=', 1)
->find();
if(!$company) return $this->jsonError('企业不存在 或 未通过审核');
$data['order_sn'] = ServiceAppluModel::getOrderSn();
$model = new ServiceAppluModel();
// 检查该企业是否已有 已通过审核的 申请订单
$isset = $model->where('company_id',intval($data['company_id']))
->where('status' ,1)
->where('end_time' ,'>', date('Y-m-d H:i:s' ,time()))
->find();
if($isset) return $this->jsonError('该企业已经申请过套餐且还未到期');
if(!isset($data['apply_expiry_num']) || !(intval($data['apply_expiry_num']) > 0) || !(intval($data['apply_expiry_type']) > 0) )
{
return $this->jsonError('申请订单缺少 有效时长 和 有效时长类型');
}
// 录入的时候 状态默认是 0
$data['status'] = 0 ;
$flag = $model->save($data);
return ($flag) ? $this->jsonSuccess('添加成功') : $this->jsonError('添加失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Put(path="/v1/service_apply/edit",
* tags={"套餐申请管理"},
* summary="修改申请",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="申请 id", property="id", type="integer", default="30"),
* @OA\Property(description="企业 id", property="company_id", type="integer", default=""),
* @OA\Property(description="服务套餐 id", property="goods_id", type="integer", default=""),
* @OA\Property(description="申请人真实姓名", property="apply_realname", type="string",default=""),
* @OA\Property(description="申请人真手机号", property="apply_mobile", type="string",default=""),
* @OA\Property(description="申请人真邮箱", property="apply_email", type="string",default=""),
* @OA\Property(description="合同编号", property="contract_sn", type="string",default=""),
* @OA\Property(description="实收金额", property="real_pay_money", type="integer",default=""),
* required={"id","company_id","goods_id", "apply_realname","apply_mobile","apply_email"}
* )
* )
* ),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function edit(Request $request){
try{
$data = input('post.');
if(!intval($data['id'])) return $this->jsonError('id 参数必填');
Validate(ServiceApplyValidate::class)->check($data);
$model = new ServiceAppluModel();
$now = date('Y-m-d H:i:s' ,time());
$isSameApply = $model->where('company_id',intval($data['company_id']))
->where('status' ,1)
->where('end_time' ,'>', $now)
->find();
if($isSameApply) return $this->jsonError('该企业已经申请过套餐且还未到期');
$isset = $model->withJoin([
'serviceGoodsInfo' => ['name','type','price','expiry_type','expiry_num'] ,
])->find(intval($data['id']));
if (!$isset) return $this->jsonError('申请数据不存在');
$data = filterEmptyVars($data);
// 审核通过 算出服务截止时间
if(isset($data['status']) ) {
unset($data['status']);
}
// 管理员id 录入的时候 归属就定了
$data['manager_id'] = $isset['manager_id'];
$data['update_time'] = $now;
$flag = $model->where('id',$isset['id'])->update($data);
return ($flag) ? $this->jsonSuccess('修改成功') : $this->jsonError('修改失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Put(path="/v1/service_apply/apply",
* tags={"套餐申请管理"},
* summary="审核申请",
* security={{"api_key": {}}},
* @OA\RequestBody(
* @OA\MediaType(
* mediaType="application/x-www-form-urlencoded" ,
* @OA\Schema(
* @OA\Property(description="申请 id", property="id", type="integer", default="30"),
* @OA\Property(description="订单状态 0:待审核 1 :已审核", property="status", type="integer",default=""),
* required={"id","status"}
* )
* )
* ),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function apply(Request $request){
try{
$id = input('post.id');
$status = (int)input('post.status');
if(!$id) return $this->jsonError('id 参数必填');
if(!in_array($status,[0,1]) ) return $this->jsonError('status 参数不正确');
$model = new ServiceAppluModel();
$isset = $model->withJoin([
'serviceGoodsInfo' => ['name','type','price','expiry_type','expiry_num'] ,
])->find($id);
if (!$isset) return $this->jsonError('数据不存在');
// 审核的时候 才 判断 该企业已有审过通过且还未到期的订单
if($status == 1) {
$hasApplyOrder = ServiceAppluModel::where('company_id',$isset['company_id'])
->where('status',1)
->where('end_time','>=',date('Y-m-d H:i:s'))
->count();
if($hasApplyOrder > 0 ) return $this->jsonError('审核失败:该企业已有审过通过且还未到期的订单');
}
$company = (new Enterprises())
->where('id','=',$isset['company_id'])
->where('audited_time','>',0)
->where('record_status', '=', 2)
->where('status', '=', 1)
->find();
if(!$company) return $this->jsonError('企业不存在 或 未通过审核');
//检查企业是否是 申请认证 私有企业
// $private_company_rule_id = env('PRIVATE_COMPANY_RUEL_ID', 45) ; // 固定
# 不再从套餐权限中判断 是否是 私有企业
// $is_private_company = ServieGocodsRuleModel::where('service_goods_id',$isset->goods_id)->where('module_id',$private_company_rule_id)->count();
$update_company_data = [];
switch ($status){
case 0:
$data['end_time'] = $data['apply_time'] = null;
$update_company_data['self_support'] = 0;
break;
case 1:
if($isset->apply_expiry_num && $isset->apply_expiry_type){
//获取截止日 根据 goods 套餐来
// $time_arr = $model->getEndTime($isset,$isset->serviceGoodsInfo);
//获取截止日 根据 apply 申请订单来
$time_arr = $model->getEndTimeByApply($isset);
$data['apply_time'] = $time_arr['apply_time'];
$data['end_time'] = $time_arr['end_time'];
$update_company_data['self_support'] = 1;
$update_company_data['self_support_number'] = EnterprisesPrivate::getEnterprisesPrivateApplySn($isset->company_id);
}else{
return $this->jsonError('申请订单缺少 有效时长 和 有效时长类型');
}
break;
}
// 管理员id 录入的时候 归属就定了
$data['apply_manager_id'] = $request->manager_id;
$data['update_time'] = date('Y-m-d H:i:s');
$data['status'] = $status;
// 有私有企业认证标识的
if($isset['is_private_company'] == 1){
try{
Enterprises::where('id',$isset->company_id)->update($update_company_data);
}catch (\Exception $e){
return $this->jsonError('修改私有企业认证标识失败');
}
}
$flag = $model->where('id',$isset['id'])->update($data);
return ($flag) ? $this->jsonSuccess('修改成功') : $this->jsonError('修改失败');
}catch (ValidateException $e){
return $this->jsonError($e->getMessage());
}
}
/**
* @OA\Delete(path="/v1/service_apply/delete",
* tags={"套餐申请管理"},
* summary="删除申请",
* security={{"api_key": {}}},
* @OA\Parameter(name="id",in="query",description="申请id",required=true,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="修改成功")
* )
*/
public function delete(){
$id = intval(input('get.id'));
if(!$id) return $this->jsonError('id 参数必填');
$info = ServiceAppluModel::find($id);
if (!$info) return $this->jsonError('数据不存在');
$now = date('Y-m-d H:i:s' ,time());
if($info['status'] == 1 && $info['end_time'] > $now) return $this->jsonError('该申请已通过审核且还未到期 禁止删除');
$flag = $info->delete($id);
return ($flag) ? $this->jsonSuccess('删除成功') : $this->jsonError('删除失败');
}
}
<file_sep>/app/admin/model/CustomFormComponent.php
<?php
namespace app\admin\model;
use think\Model;
/**
* 自动化模型的model模板文件
*/
class CustomFormComponent extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
}<file_sep>/app/admin/model/Menu.php
<?php
namespace app\admin\model;
use think\Model;
use think\Db;
/**
* 自动化模型的model模板文件
*/
class Menu extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
protected $auto = ['path'];
//获取器 值得转化
public function setPidAttr($value)
{
if(!strlen($value)) return 0;
return $value;
}
//获取器 值得转化
public function getIsshowAttr($value)
{
$status = json_decode('["\u662f","\u5426"]',true);
if($value == 1)return $status[0];
return $status[1];
}
public function setPathAttr($value,$data){
if(!isset($data['pid']))return $this->where('id', $data['id'])->value('path');
//if($data['pid'] == 0 || $data['pid'] == null) return '0,';
if($data['pid'] == 0 || $data['pid'] == null) return '0';
$parent_path = $this->where('id', $data['pid'])->value('path');
$parent_path = trim($parent_path ,',');
//return $parent_path.','.$data['pid'] .',' ;
return $parent_path.','.$data['pid'] ;
}
public function _save($post){
$oldpath = $this->where('id','=',$post['id'])->value('path');
$path = $this->setPathAttr($post['pid'],$post);
//没有改变层级
if($oldpath == $path)return $this->isUpdate(true)->save($post);
$all_update_data = [];
$son = $this->field('id,path')
->where('','exp','instr(path,",'.$post['id'].',")')
->select()->toArray();
//组装子元素要更新的数据
foreach($son as $k => $v){
$new_path = str_replace($oldpath,$path ,$v['path']);
$v['path'] = $new_path;
$all_update_data[$k] = $v;
}
$parent_update = [
'id' => $post['id'] ,
'pid' => (intval($post['pid']) ? $post['pid'] : 0) ,
'path' => $path
];
$parent_update = array_merge($post,$parent_update);
//压入要更新parent的数据
array_unshift($all_update_data , $parent_update );
Db::startTrans();
try{
foreach($all_update_data as $k => $v){
$flag = Db::table($this->getTable())->update($v);
if(!$flag){
Db::rollback();
return false;
}
}
Db::commit();
return true;
}catch (Exception $e){
Db::rollback();
throw new Exception($e->getMessage());
return false;
}
}
}<file_sep>/app/admin/model/Form.php
<?php
/**
* Created by PhpStorm.
* User: jyolo
* Date: 2017/2/6
* Time: 10:22
*/
namespace app\admin\model;
use think\Model;
class Form extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
protected function setHtmlAttr($value){
return htmlentities($value);
}
}<file_sep>/app/crm/validate/ManagerRoleMenusValidate.php
<?php
namespace app\crm\validate;
use app\crm\model\ManagerRoleMenus as ManagerRoleMenusModel;
use app\crm\model\Manager;
use app\crm\model\ManagerRole;
use app\crm\model\ManagerRoleMap;
use think\Validate;
class ManagerRoleMenusValidate extends Validate
{
protected $rule = [
'role_id' => 'require|ifRoleExists',
'menu_ids' => 'require|ifMenusExists',
];
protected $message = [
'role_id.require' => '角色 id必填',
'menu_ids.max' => '权限菜单id 必填',
];
protected function ifRoleExists($value ,$rule ,$data){
$isset = ManagerRole::where('id',$value)->find();
if (!$isset) return '角色 id不存在';
return true;
}
protected function ifMenusExists($value ,$rule ,$data){
$arr = explode(',',$value);
$isset = ManagerRoleMenusModel::whereIn('id',$arr)->column('id');
if(count($arr) != count($isset)) {
$diff = array_diff($arr,$isset);
return join(',',$diff) . ' id 权限菜单不存在';
}
return true;
}
}<file_sep>/app/crm/service/RuleMenuBuilder.php
<?php
namespace app\crm\service;
use app\crm\model\ManagerRoleMenus;
use app\crm\model\ServiceRuleMenu as ServiceRuleMenuModel;
use app\crm\model\RuleMenu as RuleMenuModel;
use think\Exception;
use think\exception\HttpException;
use think\facade\Cache;
use think\facade\Db;
/**
* 权限菜单 / 权限节点
* Class RuleMenu
* @package app\crm\service
*/
class RuleMenuBuilder
{
private const APP_LIST = [
'crm' => ManagerRoleMenus::class,
'company' => ServiceRuleMenuModel::class
];
private $swaggerArr ;
private $topMenu = [] ;
public $appName ;
public function __construct($appName)
{
if (!in_array($appName,array_keys(self::APP_LIST))) throw new Exception('未知的应用');
$scanPath = root_path() . 'app/'.$appName;
$s = \OpenApi\scan($scanPath);
$this->appName = $appName;
$this->swaggerArr = json_decode($s->toJson(),true);
$modelClass = self::APP_LIST[$appName];
$this->model = (new $modelClass());
$this->menuCacheKey = $appName.'_rule_menu';
$this->moduleCacheKey = $appName.'_module_tree';
}
/**
* 获取树形菜单数据结构
*/
public function getAllMenuTree($reCache = 0){
$data = Cache::get($this->menuCacheKey);
if($data && $reCache === 0) return $data;
$data = $this->model->field('id,pid,menu_name,sort,is_show')->where([
['is_show' ,'=' ,1],
['pid' ,'=' ,0],
])->order('sort','desc')->select()->toArray();
foreach($data as $k => $v){
$sub = $this->model->field('id,pid,menu_name,sort,is_show')->where([
['is_show' ,'=' ,1],
['pid' ,'=' , $v['id']],
])->order('sort','desc')->select()->toArray();
$data[$k]['son'] = $sub;
}
Cache::set($this->menuCacheKey,$data);
return $data;
}
/**
* 获取树形权限功能节点
*/
public function getModuleTree($reCache = 0){
$data = Cache::get($this->moduleCacheKey);
if($data && $reCache === 0) return $data;
$data = $this->model->field('id,pid,menu_name,sort,is_show')->where([
['un_check' ,'=' ,0],
['pid' ,'=' ,0],
])->order('sort','desc')->select()->toArray();
foreach($data as $k => $v){
$sub = $this->model->field('id,pid,menu_name,sort,is_show')->where([
['un_check' ,'=' ,0],
['pid' ,'=' , $v['id']],
])->order('sort','desc')->select()->toArray();
$data[$k]['son'] = $sub;
}
Cache::set($this->moduleCacheKey,$data);
return $data;
}
/**
* 初始化权限菜单
*/
public function initMenu(){
try{
Db::startTrans();
$this->buildTopMenu();
$this->buildSubMenu();
Db::commit();
return true;
}catch (\Exception $e){
Db::rollback();
return $e->getMessage();
// throw new HttpException($e->getMessage());
}
}
private function getTopMenuNodeInfo(){
$topNode = $topNodeTemp = [];
foreach($this->swaggerArr['paths'] as $k => $v){
$v = array_values($v)[0];
$data = [];
if(strpos($v['tags'][0],'[not_menu]') !== false){
$data['pid'] = $data['is_show'] = 0;
$data['menu_name'] = str_replace(['[not_menu]',' '],'',$v['tags'][0]);
}else{
$data['pid'] = 0;
$data['is_show'] = 1;
$data['menu_name'] = $v['tags'][0];
}
$data['app_name'] = $this->appName;
$controller = explode('::',$v['operationId'])[0];
$controllerName = explode('\\',$controller);
$controllerName = array_pop($controllerName);
$data['controller_name'] = [];
$topNodeTemp[$data['menu_name'] .'_'. $controllerName] = [];
$topNode[$data['menu_name']] = $data;
}
foreach($topNodeTemp as $k => $v){
$var = explode('_',$k);
foreach ($topNode as $sk => $sv){
if($var[0] == $sk){
array_push($topNode[$sk]['controller_name'],$var[1]);
}
}
}
unset($topNodeTemp);
foreach($topNode as $k =>$v){
$topNode[$k]['controller_name'] = join(',',$topNode[$k]['controller_name']);
}
sort($topNode);
return $topNode;
}
private function getSubMenuInfo(){
$node = [];
foreach($this->swaggerArr['paths'] as $k => $v){
$v = array_values($v)[0];
$data = [] ;
$data['app_name'] = $this->appName;
$top_menu_name =str_replace(['[not_menu]',' '],'',$v['tags'][0]);
$data['pid'] = $this->topMenu[$top_menu_name];
$oprateId = explode('::',$v['operationId']);
$data['controller_name'] = $oprateId[0];
preg_match('/\[(\S+)\]/',$v['summary'],$match);
if($match){
$data['menu_name'] = $match[1] ;
$data['is_show'] = 1;
}else{
$data['menu_name'] = $v['summary'];
$data['is_show'] = 0 ;
}
$data['method_name'] = $oprateId[1];
$data['request_route'] = $k;
array_push($node,$data);
}
return $node;
}
/**
* 创建一级菜单
*/
private function buildTopMenu(){
$topNode = $this->getTopMenuNodeInfo();
foreach($topNode as $k => $v){
$isset = $this->model->field('id,menu_name,controller_name')
->whereOr('menu_name',$v['menu_name'])
->whereOr('controller_name',$v['controller_name'])
->find();
if($isset){
$this->model->where([
['id','=',$isset['id']],
])->save($v);
}else{
$v['create_time'] = $v['update_time'] = date('Y-m-d H:i:s');
$this->model->insert($v);
}
}
$topMenu = $this->model->field('id,menu_name')->where([
['pid','=',0],
])->select()->toArray();
$this->topMenu = [];
foreach($topMenu as $k => $v){
$this->topMenu[$v['menu_name']] = $v['id'];
}
}
/**
* 创建二级菜单
*/
private function buildSubMenu(){
$subNode = $this->getSubMenuInfo();
// p($subNode);
// throw new Exception('zzz');
foreach($subNode as $k => $v){
$isset = $this->model->where([
['request_route','=',$v['request_route']],
['pid','>',0],
])->find();
if($isset){
$this->model->where([
['id','=',$isset['id']],
])->update($v);
}else{
$v['create_time'] = $v['update_time'] = date('Y-m-d H:i:s');
$this->model->insert($v);
}
}
}
}<file_sep>/app/crm/validate/ManagerRoleMapValidate.php
<?php
namespace app\crm\validate;
use app\crm\model\Manager;
use app\crm\model\ManagerRole;
use app\crm\model\ManagerRoleMap;
use think\Validate;
class ManagerRoleMapValidate extends Validate
{
protected $rule = [
'manager_id' => 'require|ifManagerExists',
'role_ids' => 'require|MaxRoleId|ifRoleExists',
];
protected $message = [
'manager_id.require' => '管理员id必填',
'role_ids.max' => '角色id 必填',
];
protected function ifManagerExists($value ,$rule ,$data){
$isset = Manager::where('id',$value)->find();
if (!$isset) return '管理员id不存在';
return true;
}
protected function MaxRoleId($value ,$rule ,$data){
$arr = explode(',',$value);
if(count($arr) > 2) return '一个管理员最多绑定2个角色';
return true;
}
protected function ifRoleExists($value ,$rule ,$data){
$arr = explode(',',$value);
$isset = ManagerRole::whereIn('id',$arr)->column('id');
if(count($arr) != count($isset)) {
$diff = array_diff($arr,$isset);
return join(',',$diff) . ' id 角色不存在';
}
return true;
}
}<file_sep>/app/admin/model/SettingGroup.php
<?php
namespace app\admin\model;
use think\Model;
/**
* 自动化模型的model模板文件
*/
class SettingGroup extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
//获取器 值得转化
public function getFormIdAttr($value)
{
if(!$value) return '暂无';
if(strpos($value,',')){
$arr = explode(',' ,$value);
$res = $this->name('role')->where('id','in',$arr)->select();
$return = '';
foreach($res as $k => $v){
$return .= $v['form_title'].',';
}
return trim($return ,',');
}else{
return $this->name('custom_form')->where('id',$value)->value('form_title');
}
}
//获取器 值得转化
public function setFormIdAttr($value)
{
$value = is_array($value) ? join(',',$value) : $value;
return $value;
}
//获取器 值得转化
public function getRoleIdsAttr($value)
{
if(!$value) return '暂无';
if(strpos($value,',')){
$arr = explode(',' ,$value);
$res = $this->name('role')->where('id','in',$arr)->select();
$return = '';
foreach($res as $k => $v){
$return .= $v['role_name'].',';
}
return trim($return ,',');
}else{
return $this->name('role')->where('id',$value)->value('role_name');
}
}
//获取器 值得转化
public function setRoleIdsAttr($value)
{
$value = is_array($value) ? join(',',$value) : $value;
return $value;
}
}<file_sep>/app/admin/controller/Index.php
<?php
namespace app\admin\controller;
use app\admin\model\Menu;
use CMaker\Component;
use think\facade\View;
class Index
{
public function index()
{
$where[] = ['isshow','=','1'];
//不是超管则只显示已拥有的菜单
// if(!in_array(1 ,explode(',',session('role.id')))){
// $where[] = ['id','in',session('role.auth_ids')];
// }
//showSql();
$config['treeData'] = Menu::where($where)->select()->toArray();
$config['field'] = 'id,pid,name';
$menu = Component::get_tree_array($config ,true ,true);
View::assign('menu',$menu);
return View::fetch();
}
public function main(){
return View::fetch();
}
/**
* 清空缓存
*/
public function clear_cache(){
$runtimePath = RUNTIME_PATH . 'cache';
//异步同步前端 清除缓存
if(config('runtime.domain')){
AnsyncCurlTask('clearCache');
}
$ingore_dir = ['.','..','log'];
$func = function ($dir) use(&$func ,$ingore_dir){
$dirs = scandir($dir);
foreach ($dirs as $k => $v){
if(in_array($v,$ingore_dir))continue;
if(is_dir($dir.DIRECTORY_SEPARATOR.$v)) $func($dir.DIRECTORY_SEPARATOR.$v);
is_file($dir.DIRECTORY_SEPARATOR.$v) ? unlink($dir.DIRECTORY_SEPARATOR.$v) : rmdir($dir.DIRECTORY_SEPARATOR.$v);
}
};
$func($runtimePath);
$this->success('清除成功');
}
}<file_sep>/app/admin/model/Role.php
<?php
/**
* Created by PhpStorm.
* User: jyolo
* Date: 2017/2/8
* Time: 17:49
*/
namespace app\admin\model;
use think\Model;
class Role extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
//获取单个数据
public function getOne($where,$field = ['*']){
if(gettype($where) !== 'array' ){
$where = ['id' => $where];
}
$return = $this->where($where)->field($field)->find();
$return = toArray($return);
return $return;
}
public function getRule($id){
$res = $this->getOne(['id' => $id],'rule_ids');
$rule = new Rule();
$rules = $rule->field('module,controller,action')->where('id in('.$res['rule_ids'].')')->select();
$rules = toArray($rules);
if($rules) return $rules;
return false;
}
}<file_sep>/app/admin/model/Setting.php
<?php
namespace app\admin\model;
use think\Model;
/**
* 自动化模型的model模板文件
*/
class Setting extends Model
{
protected $pk = 'keys';
public function _getAll(){
$res = $this->select()->toArray();
$set = [];
foreach($res as $k => $v){
$set[$v['keys']] = $v['values'];
}
return $set;
}
public function _save($post){
$insert = $update = [];
$i = 0;
foreach($post as $k => $v){
$isset = $this->where('keys','=',$k)->count('keys');
if(!$isset){
$insert[$i]['keys'] = $k;
$insert[$i]['values'] = is_array($v) ? json_encode($v) : $v;
}else{
$update[$i]['keys'] = $k;
$update[$i]['values'] = is_array($v) ? json_encode($v) : $v;
}
$i++;
}
$this->startTrans();
if(count($update)){
$flag = $this->isUpdate(true)->saveAll($update);
if(!$flag){
$this->rollback();
return false;
}
}
if(count($insert)){
$flag = $this->insertAll($insert);
if(!$flag){
$this->rollback();
return false;
}
}
//缓存设置
$all = array_merge($update,$insert);
$at_setting_cache = [];
foreach($all as $k => $v){
$at_setting_cache[$v['keys']] = $v['values'];
}
cache('at_setting',$at_setting_cache,0);
$this->commit();
return true;
}
protected function setValuesAttr($value)
{
return is_array($value) ? json_encode($value) : $value;
}
protected function getValuesAttr($value)
{
return json_decode($value,true) ? json_decode($value,true) : $value;
}
}<file_sep>/app/crm/route/crm_v1.php
<?php
// +----------------------------------------------------------------------
// | ThinkPHP [ WE CAN DO IT JUST THINK ]
// +----------------------------------------------------------------------
// | Copyright (c) 2006~2018 http://thinkphp.cn All rights reserved.
// +----------------------------------------------------------------------
// | Licensed ( http://www.apache.org/licenses/LICENSE-2.0 )
// +----------------------------------------------------------------------
// | Author: liu21st <<EMAIL>>
// +----------------------------------------------------------------------
use think\facade\Route;
Route::get('/',function (){
echo 'crm index';
});
//// 定时器相关
//Route::group('crontab',function(){
// // 检查套餐申请是否过期
// Route::get('checkApplyOverTime','Crontab@checkApplyOverTime');
//
//
//})->prefix('app\crm\controller\\');
//Route::get('crontab','Crontab@checkApplyOverTime');
//
//Route::group('v1',function(){
//
// // 首页相关
// Route::get('index','Index@index');
//
// // 测试上传
// Route::post('upload','Index@upload');
//
//
//
// // 登录相关
// Route::group('auth',function (){
// // 登录
// Route::post('login','Auth@login');
// // 注册
// Route::post('register','Auth@register');
// // 发送验证码
// Route::post('sendVerifyCode','Auth@sendVerifyCode');
// // 退出
// Route::post('loginout','Auth@loginOut')->middleware([\middleware\CheckToken::class]);
// // 检查token合法性 for test
// Route::post('checktoken','Auth@checktoken')->middleware([\middleware\CheckToken::class]);
// });
//
// // 必须要登录的 token的 认证的接口
// Route::group(function(){
//
// Route::group(function(){
//
// // 套餐服务商品
// Route::group('service_goods',function (){
// // 添加
// Route::post('create','ServiceGoods@create');
// // 修改
// Route::put('edit','ServiceGoods@edit');
// // 获取列表
// Route::get('lists','ServiceGoods@lists');
// // 获取详情
// Route::get('detail','ServiceGoods@detail');
// // 删除
// Route::delete('delete','ServiceGoods@delete');
// });
//
// // 套餐服务商品对应的权限
// Route::group('service_goods_rule',function (){
// // 添加
// Route::post('create','ServiceGoodsRule@create');
// // 修改
// Route::put('edit','ServiceGoodsRule@edit');
// // 获取详情
// Route::get('detail','ServiceGoodsRule@detail');
// // 删除
// Route::delete('delete','ServiceGoodsRule@delete');
// });
//
//
// // 套餐服务对应功能权限模块
// Route::group('service_rule_menu',function (){
// // 初始化 / 刷新 企业客服应用权限菜单节点
// Route::post('init','ServiceRuleMenu@initMenu');
// // 获取所有企业客服应用权限菜单节点
// Route::get('lists','ServiceRuleMenu@lists');
// });
//
//
// // 套餐商品服务申请管理
// Route::group('service_apply',function (){
// // 添加
// Route::post('create','ServiceApply@create');
// // 修改
// Route::put('edit','ServiceApply@edit');
// // 修改
// Route::put('apply','ServiceApply@apply');
// // 获取列表
// Route::get('lists','ServiceApply@lists');
// // 获取详情
// Route::get('detail','ServiceApply@detail');
// // 删除
// Route::delete('delete','ServiceApply@delete');
// });
//
// // 套餐商品服务申请管理
// Route::group('enterprises',function (){
// // 获取审核通过的 企业列表
// Route::get('passed_lists','Enterprises@passedLists');
// // 获取企业详情
// Route::get('detail','Enterprises@detail');
//
// // 获取私有企业认证申请列表
// Route::get('private_list','Enterprises@privateCompanyList');
//
// });
//
// // 后台管理员
// Route::group('manager',function (){
// // 获取管理员列表
// Route::get('list','Manager@lists');
// // 管理员绑定角色
// Route::post('bindRole','Manager@bindRole');
// // 管理员修改绑定角色
// Route::put('editBindRole','Manager@editBindRole');
// });
//
// // 后台管理员角色管理
// Route::group('manager_role',function (){
// // 获取管理员角色列表
// Route::get('list','ManagerRole@lists');
// // 创建管理员角色列表
// Route::post('create','ManagerRole@create');
// // 角色绑定权限节点
// Route::post('bind_rule_menu','ManagerRole@bindRuleMenu');
// // 获取角色已绑定权限节点
// Route::get('get_rule_menu','ManagerRole@getRuleMenu');
// // 修改角色权限节点
// Route::put('edit_bind_rule_menu','ManagerRole@editBindRuleMenu');
// // 修改管理员角色列表
// Route::put('edit','ManagerRole@edit');
// // 删除管理员角色列表
// Route::delete('delete','ManagerRole@delete');
// });
//
// // 后台管理员角色权限菜单
// Route::group('manager_role_menus',function (){
// // 获取管理员权限菜单初始化 // 只允许超管操作
// Route::get('init','ManagerRoleMenus@init')->middleware(\app\crm\middleware\CheckRoleRuleOnlyAdmin::class);
// // 获取管理员权限菜单
// Route::get('getMenu','ManagerRoleMenus@getMenu');
// Route::get('getModuleTree','ManagerRoleMenus@getModuleTree');
// });
//
//
// // 后台优质生活
// Route::group('better_live',function (){
// // 获取管理员权限菜单
// Route::get('company_list','BetterLive@companyList');
// Route::post('saveArticleNum','BetterLive@saveArticleNum');
//
// });
//
//
// })->middleware([
// \app\crm\middleware\CheckRoleRule::class
// ]);
//
//
//
// })->middleware([
// \app\crm\middleware\CheckToken::class
// ]);
//
//
//
//})->prefix('app\crm\controller\\')
// ->middleware([
// \middleware\CheckRequsetMethod::class,
// \app\crm\middleware\ActionLogger::class,
// ])
// ->allowCrossDomain();
<file_sep>/app/admin/common.php
<?php
// +----------------------------------------------------------------------
// | ThinkPHP [ WE CAN DO IT JUST THINK ]
// +----------------------------------------------------------------------
// | Copyright (c) 2006-2016 http://thinkphp.cn All rights reserved.
// +----------------------------------------------------------------------
// | Licensed ( http://www.apache.org/licenses/LICENSE-2.0 )
// +----------------------------------------------------------------------
// | Author: 流年 <<EMAIL>>
// +----------------------------------------------------------------------
use think\facade\Request;
use think\exception\HttpResponseException;
use think\facade\Db;
//获取唯一的id
function get_uniqid() {
$id = uniqid('',true).uniqid('',true);
return md5($id);
}
//模板中调用
function FormMaker($type){
return FormMaker::build($type);
//return FormMaker::build($type);
}
//获取js脚本的钩子
function FormMakerStaticHook(){
return FormMaker::getStaticHook();
}
//监听sql语句 需在sql语句之前调用
function showSql(){
Db::listen(function($sql, $time, $explain){
// 记录SQL
echo $sql. ' ['.$time.'s]';
// 查看性能分析结果
dump($explain);
});
}
/**
* 解析post where 条件
*/
function parseWhere($postWhere){
$where = [];
foreach($postWhere as $k => $v){
if(is_array($v)){ //数组的形式 where[open_time][between time][]
array_walk($v,function($sv ,$sk)use($k,$v,&$where){
//范围选择 两个值都为true between
if(strlen($sv[0]) && strlen($sv[1])){
$where[$k] = [$sk,[$sv[0] ,$sv[1]] ];
}
//范围选择 第一个值为true > 大于
if(strlen($sv[0]) && !strlen($sv[1])){
$where[$k] = ['>',$sv[0] ];
}
//范围选择 第二个值为true < 小于
if(!strlen($sv[0]) && strlen($sv[1])){
$where[$k] = ['<',$sv[1] ];
}
});
}else{ //非数组的形式 where[admin_name]
if(strlen($v)){
//如果是自动生成的path 字段则左右两侧加上逗号
$where[] = ['' ,'exp' ,'instr('.$k.',\''.$v.'\')'];
}
}
}
return $where;
}
/**
* 随机字符串生成
* @param $length
* @return null|string
*/
function getRandChar($length = 4){
$str = null;
$strPol = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz";
$max = strlen($strPol)-1;
for($i=0;$i<$length;$i++){
$str .= $strPol[mt_rand(0,$max)];//rand($min,$max)生成介于min和max两个数之间的一个随机整数
}
return $str;
}
/**
* 返回封装后的API数据到客户端
* @access protected
* @param mixed $data 要返回的数据
* @param integer $code 返回的code
* @param mixed $msg 提示信息
* @param string $type 返回数据格式
* @param array $header 发送的Header信息
* @return void
*/
function result($data, $code = 1, $msg = '', $type = '', array $header = []){
if(isset($data['count'])){
$count = $data['count'];
unset($data['count']);
}else{
$count = 0;
}
$result = [
'code' => $code,
'msg' => $msg,
'time' => $_SERVER['REQUEST_TIME'],
'data' => $data,
'count' =>$count ,
];
$isAjax = Request::isAjax();
$ResponseType = $isAjax ? Config::get('default_ajax_return') : Config::get('default_return_type');
$type = $type ?: $ResponseType;
$response = Response::create($result, $type)->header($header);
throw new HttpResponseException($response);
}
function get_custom_form($form_id){
$res = Db::name('custom_form_component')
->alias('a')
->field('action_name,component_name,setting,b.values')
->leftJoin('setting b','a.action_name = b.keys')
->where('form_id','=',intval($form_id))->order('sorts asc')->select();
$str = '';
foreach($res as $k => $v){
$set = json_decode($v['setting'] ,true);
$obj = CMaker\Maker::build($v['component_name']);
$obj->name($v['action_name']);
$obj->value($v['values']);
foreach($set['base'] as $sk => $sv){
$obj->$sk($sv);
}
$str .= $obj->render();
}
return $str;
}
/**
* 获取系统配置
* @param $key
* @return string
*/
function _config($key){
$at_setting = cache('system_setting');
if(!$at_setting){
$all_config = Db::name('setting')->select();
if(!$all_config) return '';
$at_setting = [];
foreach($all_config as $k => $v){
$at_setting[$v['keys']] = $v['values'];
// $all_config[$v['keys']] = $v['values'];
}
cache('system_setting',$at_setting);
}
return key_exists($key,$at_setting) ? $at_setting[$key] : '';
}
/**
* 修复在php 7.2 中 count 不是数组 就会报错的问题
*/
function _count($var){
if(is_array($var)){
return count($var);
}
return 0 ;
}
function getNow(){
return date('Y-m-d H:i:s' ,time());
}
<file_sep>/app/crm/validate/ServiceGoodsRuleValidate.php
<?php
namespace app\crm\validate;
use app\crm\model\ServiceRuleMenu;
use think\Validate;
use app\crm\model\ServiceGoods;
class ServiceGoodsRuleValidate extends Validate
{
protected $rule = [
'service_goods_id' => 'require|number|checkGoodsIdExists',
'module_id' => 'require|checkRuleMenuIdExists',
];
protected $message = [
'service_goods_id.require' => '产品套餐id 必填',
'service_goods_id.number' => '产品套餐id 必须是数字',
'module_id.require' => '权限功能id 必填',
];
// 自定义验证规则 检查商品 套餐 id 是否存在
protected function checkGoodsIdExists($value, $rule, $data=[])
{
$flag = ServiceGoods::where('id',intval($value))->column('id');
if(!$flag) return '套餐不存在';
return true;
}
// 自定义验证规则 检查权限 id 是否有 不存在的
protected function checkRuleMenuIdExists($value, $rule, $data=[])
{
$ids = explode(',',$value);
$union = '';
foreach ($ids as $k => $v){
$union .= ' select '.$v.' as id from xfb_service_rule_menu union';
}
$union = rtrim($union,'union');
$sql = 'select B.id from (
'.$union.'
) as B
left join (
select id from xfb_service_rule_menu where id in ('.$value.')
) as A
on A.id = B.id
where A.id is null
';
$res = ServiceRuleMenu::query($sql);
if (count($res)){
$notFoundIds = [] ;
foreach($res as $k => $v){
array_push($notFoundIds, $v['id']);
}
return join(',',$notFoundIds) . ' 权限 id 不存在';
}
return true;
}
}<file_sep>/app/crm/controller/ServiceRuleMenu.php
<?php
namespace app\crm\controller;
use app\crm\service\RuleMenu;
use app\crm\service\RuleMenuBuilder;
use think\Db;
use think\Exception;
use think\exception\ValidateException;
use app\crm\validate\ServiceModuleValidate;
use app\crm\model\ServiceRuleMenu as ServiceRuleMenuModel;
use app\crm\model\ServiceGoodsRule as ServiceGoodsRuleModel;
use think\Request;
/**
* @des 企业客服权限菜单
* @package app\crm\controller
*/
class ServiceRuleMenu extends Base
{
/**
* @OA\Get(path="/v1/service_rule_menu/lists",
* tags={"套餐权限管理 [not_menu]"},
* summary="获取所有企业客服应用权限菜单节点",
* security={{"api_key": {}}},
* @OA\Parameter(name="refreshCache",in="query",description="刷新缓存 [1 , 0 ] 默认 0",required=false,@OA\Schema(type="string")),
* @OA\Response(response="200",description="添加成功")
* )
*/
public function lists(){
$refreshCache = (int) input('refreshCache',0);
$ruleMenu = new RuleMenuBuilder('company');
$data = $ruleMenu->getAllMenuTree($refreshCache);
return ($data ) ? $this->jsonSuccess($data,'success') : $this->jsonError($data);
}
/**
* @OA\Post(path="/v1/service_rule_menu/init",
* tags={"套餐权限管理 [not_menu]"},
* summary="初始化/刷新 企业客服权限菜单节点",
* security={{"api_key": {}}},
* @OA\Response(response="200",description="同步成功")
* )
*/
public function initMenu(){
// $ruleMenu = new RuleMenuBuilder('company');
// $data = $ruleMenu->initMenu();
// return ($data === true) ? $this->jsonSuccess($data,'success') : $this->jsonError($data);
return $this->jsonError('关闭自动化; 需手动修改表');
}
}
<file_sep>/app/admin/model/AdminLog.php
<?php
/**
* Created by PhpStorm.
* User: Administrator
* Date: 2018-04-10
* Time: 17:25
*/
namespace app\admin\model;
use think\Model;
use think\facade\Request;
class AdminLog extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'action_time'; //定义创建时间字段
protected $insert = ['manager_id','login_name','module','controller','action','method','client_ip'];
protected function setManagerIdAttr(){
return session('manager.id');
}
protected function setLoginNameAttr(){
return session('manager.login_name');
}
protected function setModuleAttr(){
return Request::module();
}
protected function setControllerAttr(){
return Request::controller();
}
protected function setActionAttr(){
return Request::action();
}
protected function setMethodAttr(){
return Request::method();
}
protected function setClientIpAttr(){
return Request::ip();
}
}<file_sep>/app/crm/validate/ManagerRoleValidate.php
<?php
namespace app\crm\validate;
use app\crm\model\ManagerRole;
use think\Validate;
class ManagerRoleValidate extends Validate
{
protected $rule = [
'role_name' => 'require|max:30',
];
protected $message = [
'role_name.require' => '角色名称必填',
'role_name.max' => '角色名称最多30个字符',
];
}<file_sep>/app/crm/common.php
<?php
/**
* 过滤空的数组元素
* @param $arr
* @return mixed
*/
function filterEmptyVars($arr){
foreach($arr as $k => $v){
if(!strlen(trim($v))){
unset($arr[$k]);
}
}
return $arr;
}
<file_sep>/app/crm/controller/ManagerRoleMenus.php
<?php
namespace app\crm\controller;
use app\crm\service\RuleMenuBuilder;
use app\crm\validate\ManagerRoleValidate;
use app\crm\model\ManagerRole as ManagerRoleModel;
use think\exception\ValidateException;
/**
* @des 企业公告
* @package app\crm\controller
*/
class ManagerRoleMenus extends Base
{
/**
* @OA\Get(path="/v1/manager_role_menus/init",
* tags={"权限菜单 [not_menu]"},
* summary="初始化权限菜单 ",
* security={{"api_key": {}}},
* @OA\Response(response="200",description="添加成功")
* )
*/
public function init(){
$builder = new RuleMenuBuilder('crm');
$tree = $builder->initMenu();
return $this->jsonSuccess($tree);
}
/**
* @OA\Get(path="/v1/manager_role_menus/getMenu",
* tags={"权限菜单 [not_menu]"},
* summary="获取左侧菜单 ",
* security={{"api_key": {}}},
* @OA\Parameter(name="refreshCache",in="query",description="刷新缓存 默认0 ; 1 为刷新缓存",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="获取成功")
* )
*/
public function getMenu(){
$refreshCache = intval(input('get.refreshCache',0)) ;
$builder = new RuleMenuBuilder('crm');
$menu = $builder->getAllMenuTree($refreshCache);
return $this->jsonSuccess($menu);
}
/**
* @OA\Get(path="/v1/manager_role_menus/getModuleTree",
* tags={"权限菜单 [not_menu]"},
* summary="获取所有权限节点 ",
* security={{"api_key": {}}},
* @OA\Parameter(name="refreshCache",in="query",description="刷新缓存 默认0 ; 1 为刷新缓存",required=false,@OA\Schema(type="integer")),
* @OA\Response(response="200",description="获取成功")
* )
*/
public function getModuleTree(){
$refreshCache = intval(input('get.refreshCache',0)) ;
$builder = new RuleMenuBuilder('crm');
$menu = $builder->getModuleTree($refreshCache);
return $this->jsonSuccess($menu);
}
}
<file_sep>/app/crm/middleware/CheckRoleRule.php
<?php
namespace app\crm\middleware;
use app\crm\model\ManagerRoleMap as ManagerRoleMapModel;
use app\crm\model\ManagerRoleMenus;
use Jwt\JwtToken;
use think\facade\Db;
class CheckRoleRule
{
//检查token的合法性
public function handle($request, \Closure $next)
{
try{
if(!$request->header('Authorization')) throw new \Exception('Token not found');
$tokenStr = $request->header('Authorization');
// 验证 令牌的
$info = JwtToken::parseToken($tokenStr);
$manger_id = ($info['uid']);
$role = ManagerRoleMapModel::where('manager_id',$manger_id)->column('role_id');
//超管直接pass
if(in_array(1,$role)) return $next($request);
$menus = Db::name('manager_role_rule')->alias('r')
->leftJoin('xfb_manager_role_menus m','r.menu_id = m.id')
->whereIn('r.role_id',$role)
->where('m.un_check',0)
->column('m.request_route');
$request_url = explode('?',$request->url())[0];
// un_check 的直接pass
$un_check = ManagerRoleMenus::where('request_route',$request_url)->value('un_check');
if($un_check && $un_check == 1) return $next($request);
if(!in_array($request_url,$menus)) throw new \Exception('没有权限进行该操作');
return $next($request);
}catch (\Exception $e){
return json([
'code' => 0,
'msg' => 'no permission : ' . $e->getMessage(),
'data' => [],
],403);
}
}
}
<file_sep>/app/crm/config/upload.php
<?php
return [
//又拍云配置
'upyun_bucketname' => "xiaofeibao",
'upyun_operator_name' => "xiaofeibao",
'upyun_operator_pwd' =>"<PASSWORD>",
'upyun_domain_old_01' => "http://xiaofeibao.b0.upaiyun.com",
'upyun_domain_old_02' => "https://xiaofeibao.b0.upaiyun.com",
'upyun_domain_old_03' => "http://img.xfb315.com",
'upyun_domain' => "https://img.xfb315.com",
'upyun_file_suffix_format' => 'Y_m_d_H_i_s' ,
'upyun_upload_path' => '/company_crm/'
];<file_sep>/app/crm/middleware/CheckRoleRuleOnlyAdmin.php
<?php
namespace app\crm\middleware;
use app\crm\model\ManagerRoleMap as ManagerRoleMapModel;
use Jwt\JwtToken;
use think\facade\Db;
class CheckRoleRuleOnlyAdmin
{
//检查token的合法性
public function handle($request, \Closure $next)
{
try{
if(!$request->header('Authorization')) throw new \Exception('Token not found');
$tokenStr = $request->header('Authorization');
// 验证 令牌的
$info = JwtToken::parseToken($tokenStr);
$manger_id = ($info['uid']);
$role = ManagerRoleMapModel::where('manager_id',$manger_id)->column('role_id');
//超管直接pass
if(in_array(1,$role)) return $next($request);
throw new \Exception('没有权限访问该页面');
return $next($request);
}catch (\Exception $e){
return json([
'code' => 0,
'msg' => 'no permission : ' . $e->getMessage(),
'data' => [],
],403);
}
}
}
<file_sep>/app/admin/model/CustomForm.php
<?php
namespace app\admin\model;
use think\Model;
/**
* 自动化模型的model模板文件
*/
class CustomForm extends Model
{
protected $autoWriteTimestamp = 'datetime';//开启自动写入时间字段
protected $createTime = 'add_time'; //定义创建时间字段
protected $updateTime = 'update_time'; //定义更新时间字段
public function _find($id){
$id = intval($id);
$data = $this->alias('a')->join('custom_form_component b','b.form_id = a.id')->where([
['a.id' ,'=',$id] ,
['b.form_id' ,'=',$id]
])->order('sorts asc')->select()->toArray();
return $data;
}
public function _save($post){
$data['form_title'] = $post['form_title'];
$this->startTrans();
$this->isUpdate(false)->save($data);
$form_id = $this->getLastInsID();
if(!$form_id){
$this->rollback();
return false;
}
$component_data = [];
foreach($post['setting'] as $k => $v){
$setting = json_decode(cache($v) ,true);
$component_data[$k]['component_name'] = $post['component_name'][$k];
$component_data[$k]['action_name'] = $setting['action_name'];
$component_data[$k]['sorts'] = $k;
$component_data[$k]['form_id'] = $form_id;
$component_data[$k]['setting'] = cache($v);
}
$custom_form_component_db = new CustomFormComponent();
$flag = $custom_form_component_db->isUpdate(false)->saveAll($component_data);
if(!$flag){
$this->rollback();
return false;
}
$this->commit();
return true;
}
public function _edit($post){
$update = $del = $add = [];
foreach($post['setting'] as $k => $v){
$setting = json_decode(cache($v) ,true);
if(isset($post['component_id'][$k]) && intval($post['component_id'][$k])){
$update[$k]['action_name'] = $setting['action_name'];
$update[$k]['component_name'] = $post['component_name'][$k];
$update[$k]['setting'] = cache($v);
$update[$k]['sorts'] = $post['form_order'][$k];
$update[$k]['id'] = $post['component_id'][$k];
}else{
$add[$k]['action_name'] = $setting['action_name'];
$add[$k]['component_name'] = $post['component_name'][$k];
$add[$k]['setting'] = cache($v);
$add[$k]['form_id'] = $post['form_id'];
$add[$k]['sorts'] = $post['form_order'][$k];
}
}
$custom_form_component_db = new CustomFormComponent();
$old = $custom_form_component_db->where('form_id','=',intval($post['form_id']))->column('id');
$del = array_diff($old ,$post['component_id']);
$this->startTrans();
$flag = $this->isUpdate(true)->save(['form_title' => $post['form_title'] ,'id' => intval($post['form_id'])]);
if(!$flag){
$this->rollback();
return false;
}
if(count($update)){
$flag = $custom_form_component_db->saveAll($update);
if(!$flag){
$this->rollback();
return false;
}
}
if(count($del)){
sort($del);
//删除掉 setting 表中的设置选项
$keys = $custom_form_component_db->where('id','in',$del)->column('action_name');
$settingDb = new Setting();
$flag = $settingDb->where('keys','in',$keys)->delete();
if(!$flag){
$this->rollback();
return false;
}
$flag = $custom_form_component_db->where('id','in',$del)->delete();
if(!$flag){
$this->rollback();
return false;
}
}
if(count($add)){
$flag = $custom_form_component_db->saveAll($add);
if(!$flag){
$this->rollback();
return false;
}
}
$this->commit();
return true;
}
public function _del($id){
$ids = [];
if(is_array($id) && count($id)){
$ids = $id;
}else{
array_push($ids,intval($id));
}
$where = [
['id' ,'in',$ids]
];
$this->startTrans();
showSql();
$flag = $this->where($where)->delete();
if(!$flag){
$this->rollback();
return false;
}
$custom_form_component_db = new CustomFormComponent();
$component_db_where = [
['form_id' ,'in' ,$ids]
];
$flag = $custom_form_component_db->where($component_db_where)->delete();
if(!$flag){
$this->rollback();
return false;
}
$this->commit();
return true;
}
} | 6c27fa5965cf370c14aa70073bd37fc0a2026429 | [
"HTML",
"PHP"
] | 47 | PHP | jyolo/atcmf | f2759cc753fd037f8f720adae74d95a6951232d0 | ba8f4fd50c414be439660d3e6b943c9247f49c70 |
refs/heads/master | <file_sep>angular.module('starter.directive', [])
.directive('hideTabs', function ($rootScope) { //隐藏底部tabs指令
return {
restrict: 'AE',
link: function ($scope) {
$rootScope.hideTabs = 'tabs-item-hide';
//监听$destory事件,这个事件会在页面发生跳转的时候触发
$scope.$on('$destroy', function () {
$rootScope.hideTabs = ' ';
})
}
}
})
.directive('hideShow', function () { //点击触发显示隐藏元素指令
return {
restrict: 'A',
link: function (scope, element, attrs) {
scope.showme = true;
scope.toggle = function (arg) {//每次点击调用此方法都让scope.showme值反转1次
if (arg == 0) {
scope.showme = !scope.showme;
}
}
}
}
})
.directive('scrollTop', function ($ionicScrollDelegate) {//返回顶部指令
return {
restrict: 'AE',
link: function (scope, element, attrs) {
scope.scrollTop = function () {
$ionicScrollDelegate.scrollTop(500);
};
}
}
})
.directive('toolTip', [function () { //提示框tooltip
return {
restrict: 'EA',
templateUrl: 'html/popover/tooltip.html',
scope: {
message: "=",
type: "="
},
link: function (scope, element, attrs) {
}
};
}])
.directive('checkForm', function ($rootScope, CommonService) {//验证表单类型 提示
$rootScope.verifyarray = [];
return {
restrict: 'A',
link: function (scope, element, attrs) {
$rootScope.commonService = CommonService;
$rootScope.verifyarray = [];
$rootScope.verify = true;
$rootScope.verifyarray[scope.$id] = true;
scope.publicCheckForm = function (regular, value, content,isshowtip) { //验证公共部分封装
if (regular) {
if(value==0){
$rootScope.verifyarray[scope.$id] = false;
$rootScope.verify = false;
return;
}
$rootScope.verifyarray[scope.$id] = true;
$rootScope.verify = true;
angular.forEach($rootScope.verifyarray, function (item, index) {
if (!item) {
$rootScope.verify = false;
}
})
} else {
if (value || value == 0) {
if(isshowtip){
$rootScope.commonService.toolTip(content, '');
}
$rootScope.verifyarray[scope.$id] = false;
$rootScope.verify = false;
}
if (!attrs.required && value == null) {//非必填项 清空不验证
$rootScope.verifyarray[scope.$id] = true;
$rootScope.verify = true;
angular.forEach($rootScope.verifyarray, function (item) {
if (!item) {
$rootScope.verify = false;
}
});
}
}
}
scope.checkForm = function (value, content, type, regular, isShow, maxvalue) {
var isShow = isShow || true;
if (type == 'regular') {//自定义正则验证
scope.publicCheckForm(eval(regular).test(value), value, content, isShow)
}
if (type == 'mobilephone') {//验证手机号
scope.publicCheckForm(/^1(3|4|5|7|8)\d{9}$/.test(value), value, content, isShow)
}
if (type == 'maxvalue') {//最大不能超过maxvalue值
scope.publicCheckForm(value > 0 && value <= maxvalue, value, content, isShow);
}
if (type == 'positivenumber') {//正数验证(如 价格)
scope.publicCheckForm(/^(0|[1-9][0-9]{0,9})(\.[0-9]{1,2})?$/.test(value), value, content, isShow)
}
if (type == 'positiveinteger') {//正整数
scope.publicCheckForm(/^[1-9]\d*$/.test(value), value, content, isShow);
}
if (type == 'identitycard') {//验证身份证号
scope.publicCheckForm(/^[1-9]\d{5}[1-9]\d{3}((0\d)|(1[0-2]))(([0|1|2]\d)|3[0-1])\d{3}([0-9]|X)$/.test(value), value, content, isShow)
}
};
scope.checkAtLeastOne = function (array,keystr){ //检测相同遍历数据至少填写一个
$rootScope.verifyLeastOne = false;
angular.forEach(array,function (item,index) {
if(item[keystr]){
$rootScope.verifyLeastOne = true;
}
})
}
scope.checkAtLeastOneIsSame = function (array, keystr1, keystr2) { //两个数据 每种品类至少填写一个数据
var checkAtLeastOneIsSame = [];//每条记录的验证true false
angular.forEach(array, function (item, index) {
if (item.checked) {
checkAtLeastOneIsSame[index] = false;
angular.forEach(item.details, function (items) {
if (items[keystr1] || items[keystr2]) {
checkAtLeastOneIsSame[index] = true;
}
})
}
})
$rootScope.checkAtLeastOneIsSame = checkAtLeastOneIsSame.indexOf(false) == -1 ? true : false; //有一个类别是false就是false
}
}
}
})
.directive('repeatFinish', function() { //利用angular指令监听ng-repeat渲染完成后执行脚本
return {
link: function(scope, element, attrs) {
if (scope.$last) { // 这个判断意味着最后一个 OK
scope.$eval(attrs.repeatFinish) // 执行绑定的表达式
}
}
}
})
<file_sep>angular.module('starter.services', [])
//service在使用this指针,而factory直接返回一个对象
.service('CommonService', function ($ionicPopup, $ionicPopover, $rootScope, $ionicPlatform, $state, $ionicHistory, $timeout, $ionicViewSwitcher, $ionicModal) {
return {
platformPrompt: function (msg, stateurl, stateparams) {
CommonService = this;
$rootScope.commonService = CommonService;
$rootScope.commonService.toolTip(msg, "tool-tip-message-success");
if (stateurl == null || stateurl == '') {
$ionicHistory.goBack();
} else if (stateurl == 'close') {//不处理
} else {
$state.go(stateurl, stateparams, {reload: true});
}
},
showAlert: function (title, template, stateurl, stateparams) {
// 一个提示对话框
var alertPopup = $ionicPopup.alert({
cssClass: "show-alert",
title: title,
template: template,
okText: '确定',
okType: 'button-energized'
});
alertPopup.then(function (res) {
if (stateurl == null || stateurl == '') {
$ionicHistory.goBack();
} else if (stateurl == 'close') {//不处理
} else {
$state.go(stateurl, stateparams, {reload: true});
}
});
},
showConfirm: function (title, template, okText, cancelText, stateurl, closeurl, confirmfunction, stateparams, stateparams2) {
var confirmPopup = $ionicPopup.confirm({
cssClass: "show-confirm",
title: '<strong>' + title + '</strong>',
template: template,
okText: okText,
cancelText: cancelText,
okType: 'button-energized',
cancelType: 'button-stable'
});
confirmPopup.then(function (res) {
if (res) {
if (stateurl != '') {
$state.go(stateurl, stateparams, {reload: true});
$ionicViewSwitcher.nextDirection("forward");//前进画效果
} else {
confirmfunction();
}
} else {
if (closeurl == 'close') {//不处理
return;
}
$state.go((closeurl == null || closeurl == '') ? 'tab.main' : closeurl, stateparams2, {reload: true})
$ionicViewSwitcher.nextDirection("back");//后退动画效果
}
});
},
customModal: function ($scope, templateurl, index, animation) { //自定义modal ndex页面出现多个模态框的情况 进行命名区别 index 可以为1.2.3. animation动画slide-in-left slide-in-right
index = index == undefined ? "" : index;
$ionicModal.fromTemplateUrl(templateurl, {
scope: $scope,
animation: 'slide-in-up'
}).then(function (modal) {
$scope["modal" + index] = modal;
});
$scope.openModal = function () {
$scope["modal" + index].show();
};
$scope.closeModal = function () {
$scope["modal" + index].hide();
};
//当我们用到模型时,清除它!
$scope.$on('$destroy', function () {
$scope["modal" + index].remove();
});
// 当隐藏的模型时执行动作
$scope.$on('modal.hidden', function () {
// 执行动作
$scope.modalName = ''; //清除modal名
});
// 当移动模型时执行动作
$scope.$on('modal.removed', function () {
// 执行动作
});
}
,
ionicPopover: function ($scope, templateUrl, index) {//页面出现多个Popover框的情况 进行命名区别 index 可以为1.2.3
index = index == undefined ? "" : index;
$ionicPopover.fromTemplateUrl('html/popover/' + templateUrl, {
scope: $scope,
}).then(function (popover) {
$scope["popover" + index] = popover;
});
$scope.openPopover = function ($event) {
$scope["popover" + index].show($event);
//动态计算popover高度
$rootScope.popoversize = document.querySelectorAll("#mypopover a").length * 55 + 'px';
};
$scope.closePopover = function () {
$scope["popover" + index].hide();
};
//Cleanup the popover when we're done with it! 清除浮动框
$scope.$on('$destroy', function () {
$scope["popover" + index].remove();
});
$scope.$on('$ionicView.leave', function () {
$scope["popover" + index].hide();
});
// 在隐藏浮动框后执行
$scope.$on('popover' + index + '.hidden', function () {
// Execute action
});
// 移除浮动框后执行
$scope.$on('popover' + index + '.removed', function () {
// Execute action
});
},
toolTip: function (msg, type) { //全局tooltip提示
this.message = msg;
this.type = type;
//提示框显示最多3秒消失
var _self = this;
$timeout(function () {
_self.message = null;
_self.type = null;
}, 3000);
},
goBack: function () { //h5原生APP返回
try {
if (ionic.Platform.is('ios')) {
ios.goBack();
} else if (ionic.Platform.is('android')) {
window.AndroidJSInterface.goback();
} else {
$ionicHistory.goBack();
}
} catch (e) {
$ionicHistory.goBack();
}
}
}
})
.service('MainService', function ($q, $http, CallCenter) { //首页服务定义
return {
getAdvList: function (params) { //获取轮播图
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetTopBanner",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
gtHeadHelpInfoList: function (params) { //获取帮助页面的接口列表
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetHeadHelpInfoList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
setAfterSalesInfo: function (params) { //提交售后信息申请
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/SetAfterSalesInfo",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
setTrainOrganInfo: function (params) { //提交培训机构信息申请
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/SetTrainOrganInfo",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
}
,
getUserProfile: function (params) { //获取用户信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetUserProfile",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getAppOperatorHelpList: function (params) { //返回app操作帮助说明
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetAppOperatorHelpList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
}
}
})
.service('DetailsService', function ($q, $http, CallCenter) { //帮助详情服务
return {
getHelpInfoDetails: function (params) { //获取帮助信息详情接口
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetHelpInfoDetails",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
}
}
})
.service('SigninService', function ($q, $http, CallCenter) { //签到服务
return {
getSignin: function (params) { //获取个人签到信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetSignin",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
setSignin: function (params) { //提交个人签到信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/SetSignin",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getScoreRecord: function (params) { //获取签到记录
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetScoreRecord",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getSigninHeadLines: function (params) { //获取签到头条
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetSigninHeadLines",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getStuSignInfoList: function (params) { //获取当天抢到红包人员
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetStuSignInfoList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getHFOrganAdList: function (params) { //返回广告列表
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetHFOrganAdList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
signin: function (params) { //签到
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/Signin",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getUserAutoSigninList: function (params) { //自动返回随机签到牌
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetUserAutoSigninList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
setAutoSignin: function (params) { //提交翻牌子的信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/SetAutoSignin",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
}
})
.service('CompareService', function ($q, $http, CallCenter, $ionicScrollDelegate, $ionicLoading) { //对比服务
return {
GetAllTrainClassList: function (params) { //根据机构获取对应全部班级信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetAllTrainClassList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getTrainDetailInfo: function (params) { //获取考霸代言的机构信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetTrainDetailInfo",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
getTopMasterClassList: function (params) { //获取班级对应的代言考霸列表信息
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/GetTopMasterClassList",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
setFollowExam: function (params) { //关注取消考试
var deferred = $q.defer();// 声明延后执行,表示要去监控后面的执行
var promise = deferred.promise
promise = $http({
method: 'GET',
url: CallCenter.api + "/SetFollowExam",
params: params
}).success(function (data) {
deferred.resolve(data);// 声明执行成功,即http请求数据成功,可以返回数据了
}).error(function (data) {
deferred.reject(data);// 声明执行失败,即服务器返回错误
});
return promise; // 返回承诺,这里并不是最终数据,而是访问最终数据的API
},
selectCity: function ($scope) { //选择城市
$ionicLoading.show({
template: '<p><ion-spinner icon="spiral" class="spinner-light"></ion-spinner></p>',
noBackdrop: true
});
//请求城市数据
var d = "";
$http({
method: 'GET',
url: CallCenter.api + "/GetAllTrainClassList",
params: {
praviteKey: '<KEY>'
},
cache: true
}).success(function (data) {
console.log(data);
d = data.Data;
}).error(function (data, header, config, status) {
}).then(function () {
var newCities = []
// 初始化城市索引
var cities = []
var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
chars.split('').forEach(function (c) {
var c = {
index: c,
cities: [],
}
cities.push(c)
}, this);
cities.forEach(function (c) {
d.forEach(function (city) {
if (c.index == city.ItemIndex) {
c.cities.push(city);
}
}, this)
}, this);
cities.forEach(function (c) {
if (c.cities.length > 0) {
newCities.push(c);
}
}, this);
$ionicLoading.hide();
//城市数据
$scope.cities = newCities;
//modal打开执行
$scope.$on('modal.shown', function () {
if ($scope.modalName == 'comparemodal') {
function alphabetMove(pPositionY) {
var currentItem, targetItem;
var d = document;
// 根据手指触摸的位置找到当前的element
currentItem = d.elementFromPoint(d.body.clientWidth - 1, pPositionY);
// 判断当前的element是不是 索引
if (!currentItem || currentItem.className.indexOf('index-bar') < 0) return;
// 根据当前的索引找到列表的索引
targetItem = document.getElementById(currentItem.innerText);
document.getElementById('indexs-title').style.display = 'block'
document.getElementById('indexs-title').innerText = currentItem.innerText;
$ionicScrollDelegate.$getByHandle('cityScroll').scrollBy(0, targetItem.getBoundingClientRect().top - 88, false)
}
//绑定事件
var indexsBar = document.getElementById('indexs-bar');
indexsBar.addEventListener('touchstart', function (e) {
alphabetMove(e.changedTouches[0].clientY);
});
indexsBar.addEventListener('touchmove', function (e) {
e.preventDefault();
alphabetMove(e.changedTouches[0].clientY);
});
indexsBar.addEventListener('touchend', function () {
document.getElementById('indexs-title').style.display = 'none';
});
}
});
})
}
}
})
.factory('MyInterceptor', function ($injector) {//设置请求头信息的地方是$httpProvider.interceptors。也就是为请求或响应注册一个拦截器。使用这种方式首先需要定义一个服务
return {
request: function (config) {////通过实现 request 方法拦截请求: 该方法会在 $http 发送请求道后台之前执行
if (config.url.toString().indexOf('http://') === 0) {
//http请求Loading加载动画
$injector.get('$ionicLoading').show({
template: '<p><ion-spinner icon="spiral" class="spinner-light"></ion-spinner></p>',
noBackdrop: true
});
//授权
config.headers = config.headers || {};
var token = localStorage.getItem('token');
if (token) {
config.headers.authorization = token;
}
}
return config;
},
requestError: function (config) {//通过实现 requestError 方法拦截请求异常: 请求发送失败或者被拦截器拒绝
if (config.url.toString().indexOf('http://') === 0) {
$injector.get('$ionicLoading').hide();
}
return config;
},
response: function (response) {//通过实现 response 方法拦截响应: 该方法会在 $http 接收到从后台过来的响应之后执行
if (response.config.url.toString().indexOf('http://') === 0) {
$injector.get('$ionicLoading').hide();
}
return response;
},
responseError: function (response) {////通过实现 responseError 方法拦截响应异常:后台调用失败 响应异常拦截器可以帮助我们恢复后台调用
if (response.config.url.toString().indexOf('http://') === 0) {
$injector.get('$ionicLoading').hide();
if (response.status == 401) {
$injector.get('CommonService').platformPrompt("访问授权失败");
} else if (response.status == 404) {
$injector.get('CommonService').platformPrompt("访问连接404");
} else if (response.status == -1) {
$injector.get('CommonService').platformPrompt("网络请求超时");
}
}
return response;
}
};
})
| f502b92028dbc4c955e98e58584cff83c4e40fc8 | [
"JavaScript"
] | 2 | JavaScript | DreamPWJ/callcenter | 9bc85966c40a9286bd784d7f2b75769b1e7cd2d2 | c1c66f0c3dac2b02540654a2243427b5b570d7f7 |
refs/heads/master | <repo_name>EBrown8534/BrainfuckSharp<file_sep>/Brainfuck Interpreter/Transpiler/TokenSymbol.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace BrainfuckSharp.Transpiler
{
public enum TokenSymbol
{
None,
MoveLeft,
MoveRight,
LoopBegin,
LoopEnd,
Increment,
Decrement,
OutputValue,
InputCharacter
}
}
<file_sep>/Brainfuck Interpreter/Transpiler/Interpreter.cs
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace BrainfuckSharp.Transpiler
{
public class Interpreter
{
public OptimizationLevel OptimizationLevel { get; set; } = OptimizationLevel.None;
public List<string> Lines { get; } = new List<string>();
private List<TokenSymbol> _symbols = new List<TokenSymbol>();
private static List<char> _validTokens = new List<char> { '<', '>', ',', '.', '+', '-', '[', ']' };
private static List<TokenSymbol> _compressionOptimizableTokens = new List<TokenSymbol> { TokenSymbol.Decrement, TokenSymbol.Increment, TokenSymbol.MoveLeft, TokenSymbol.MoveRight };
private static Dictionary<char, TokenSymbol> _tokenMap = new Dictionary<char, TokenSymbol>
{
{ '<', TokenSymbol.MoveLeft },
{ '>', TokenSymbol.MoveRight },
{ ',', TokenSymbol.InputCharacter },
{ '.', TokenSymbol.OutputValue },
{ '[', TokenSymbol.LoopBegin },
{ ']', TokenSymbol.LoopEnd },
{ '+', TokenSymbol.Increment },
{ '-', TokenSymbol.Decrement }
};
private void Tokenize(string brainfuck)
{
foreach (char c in brainfuck)
{
if (_validTokens.Contains(c))
{
_symbols.Add(_tokenMap[c]);
}
}
}
public void WriteToFile(string filename)
{
using (var sw = new StreamWriter(filename))
{
foreach (string line in Lines)
{
sw.WriteLine(line);
}
}
}
private static bool IsRepeatedSymbolSequenceEnding(TokenSymbol previous, TokenSymbol current) => previous != current && previous != TokenSymbol.None && _compressionOptimizableTokens.Contains(previous);
public void Interpret(string brainfuck)
{
Tokenize(brainfuck);
var lastSymbol = TokenSymbol.None;
var repetitions = 0;
var indents = 3;
foreach (var symbol in _symbols)
{
if (OptimizationLevel >= OptimizationLevel.Level1)
{
var isOptimizableSymbol = _compressionOptimizableTokens.Contains(symbol);
if (IsRepeatedSymbolSequenceEnding(lastSymbol, symbol))
{
Lines.AddRange(SymbolToLines(lastSymbol, repetitions, indents));
repetitions = isOptimizableSymbol ? 1 : 0;
}
else
{
if (isOptimizableSymbol)
{
repetitions++;
}
else
{
repetitions = 0;
}
}
}
switch (symbol)
{
case TokenSymbol.LoopBegin:
Lines.AddRange(SymbolToLines(symbol, repetitions, indents));
indents += 1;
break;
case TokenSymbol.LoopEnd:
indents -= 1;
Lines.AddRange(SymbolToLines(symbol, repetitions, indents));
break;
case TokenSymbol.InputCharacter:
case TokenSymbol.OutputValue:
Lines.AddRange(SymbolToLines(symbol, repetitions, indents));
break;
}
lastSymbol = symbol;
}
}
private string[] SymbolToLines(TokenSymbol symbol, int repeated, int indentLevels)
{
switch (symbol)
{
case TokenSymbol.Decrement:
if (repeated == 1)
{
return new[] { $"{GetIndents(indentLevels)}buffer[index]--;" };
}
else
{
return new[] { $"{GetIndents(indentLevels)}buffer[index] -= {repeated};" };
}
case TokenSymbol.Increment:
if (repeated == 1)
{
return new[] { $"{GetIndents(indentLevels)}buffer[index]++;" };
}
else
{
return new[] { $"{GetIndents(indentLevels)}buffer[index] += {repeated};" };
}
case TokenSymbol.InputCharacter:
return new[] { $"{GetIndents(indentLevels)}buffer[index] = ReadChar();" };
case TokenSymbol.LoopBegin:
return new[] { "", $"{GetIndents(indentLevels)}while (buffer[index] != 0)", $"{GetIndents(indentLevels)}{{" };
case TokenSymbol.LoopEnd:
return new[] { $"{GetIndents(indentLevels)}}}", "" };
case TokenSymbol.MoveLeft:
if (repeated == 1)
{
return new[] { $"{GetIndents(indentLevels)}index--;" };
}
else
{
return new[] { $"{GetIndents(indentLevels)}index -= {repeated};" };
}
case TokenSymbol.MoveRight:
if (repeated == 1)
{
return new[] { $"{GetIndents(indentLevels)}index++;" };
}
else
{
return new[] { $"{GetIndents(indentLevels)}index += {repeated};" };
}
case TokenSymbol.OutputValue:
return new[] { $"{GetIndents(indentLevels)}Console.Write((char)buffer[index]);" };
}
return null;
}
public static string GetIndents(int levels) => new string(' ', levels * 4);
}
}
<file_sep>/Brainfuck Interpreter/Program.cs
using Evbpc.Framework.Utilities.Logging;
using Evbpc.Framework.Utilities.Prompting;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace BrainfuckSharp
{
class Program
{
static void Main(string[] args)
{
var logger = new ConsoleLogger(LoggingType.All);
var transpiler = new Transpiler.Transpiler(logger);
transpiler.OptimizationLevel = Transpiler.OptimizationLevel.Level1;
var prompter = new ConsolePrompt(logger);
transpiler.AcquireInput(prompter);
transpiler.Transpile();
Process.Start(transpiler.ExecutableFile);
Console.WriteLine("Press enter key to exit...");
Console.ReadLine();
}
}
}
<file_sep>/README.md
# BrainfuckSharp
A C# brainfuck interpreter/compiler.
This uses classes from [`Evbpc.Framework`](https://github.com/EBrown8534/Framework) version 0.2. You can create replacement classes if you do not wish to add that project.
# FizzBuzz Compiler Test Programme
FizzBuzz by <NAME>. http://codereview.stackexchange.com/q/57382/73844<file_sep>/Brainfuck Interpreter/Transpiler/Transpiler.cs
using Evbpc.Framework.Utilities.Logging;
using Evbpc.Framework.Utilities.Prompting;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace BrainfuckSharp.Transpiler
{
public class Transpiler
{
public ILogger Logger { get; }
public string SourceFile { get; set; }
public string ProgrammeName { get; set; }
public string DestinationFile { get; set; }
public string ExecutableFile { get; set; }
public int BufferSize { get; set; }
public string DotNetVersion { get; set; }
public OptimizationLevel OptimizationLevel { get; set; }
private static Dictionary<string, string> _dotNetFolders = new Dictionary<string, string> { { "2.0", "v2.0.50727" }, { "3.5", "v3.5" }, { "4.0", "v4.0.30319" } };
public Transpiler(ILogger logger)
{
Logger = logger;
}
public void AcquireInput(IPrompt prompt)
{
SourceFile = AddExtension(prompt.Prompt<string>("Enter the source filename (.bf)", PromptOptions.Required, validationMethod: x => !string.IsNullOrWhiteSpace(x)), ".bf");
ProgrammeName = SourceFile.Substring(0, SourceFile.LastIndexOf('.'));
DestinationFile = AddExtension(prompt.Prompt("Enter the destination filename (.cs)", PromptOptions.Optional, ProgrammeName + ".cs", validationMethod: x => !string.IsNullOrWhiteSpace(x)), ".cs");
ExecutableFile = AddExtension(prompt.Prompt("Enter the destination executable (.exe)", PromptOptions.Optional, ProgrammeName + ".exe", validationMethod: x => !string.IsNullOrWhiteSpace(x)), ".exe");
BufferSize = prompt.Prompt("Enter buffer size", PromptOptions.Optional, 2048);
DotNetVersion = prompt.Prompt("Enter .NET version to compile with", PromptOptions.Optional, "4.0", "Version can be any of: 2.0, 3.5, 4.0", x => _dotNetFolders.ContainsKey(x));
}
private static string AddExtension(string data, string extension)
{
if (data.IndexOf('.') == -1)
{
data += extension;
}
return data;
}
public string LoadBrainfuckCode(string filename) => File.ReadAllText(filename);
public void Transpile()
{
if (string.IsNullOrWhiteSpace(SourceFile)
|| string.IsNullOrWhiteSpace(ProgrammeName)
|| string.IsNullOrWhiteSpace(DestinationFile)
|| string.IsNullOrWhiteSpace(ExecutableFile)
|| string.IsNullOrWhiteSpace(DotNetVersion)
|| BufferSize <= 0)
{
throw new ArgumentException($"The values for {nameof(SourceFile)}, {nameof(ProgrammeName)}, {nameof(DestinationFile)}, {nameof(ExecutableFile)}, {nameof(DotNetVersion)} cannot be null or whitespace, and {nameof(BufferSize)} cannot be zero or negative. Did you forget to call {nameof(AcquireInput)}?");
}
// Now that that's over.
var interpreter = new Interpreter();
interpreter.OptimizationLevel = OptimizationLevel;
var brainfuck = LoadBrainfuckCode(SourceFile);
interpreter.Interpret(brainfuck);
var template = AcquireTemplate("CompiledTemplate.cs");
var sb = new StringBuilder();
foreach (var line in interpreter.Lines)
{
sb.AppendLine(line);
}
var result = template;
result = result.Replace("BufferSize", BufferSize.ToString());
result = result.Replace("BrainfuckCode;", sb.ToString().TrimStart());
WriteToCsFile(DestinationFile, result);
CompileCsFile(_dotNetFolders[DotNetVersion], DestinationFile, ExecutableFile);
}
public void WriteToCsFile(string filename, string data)
{
using (StreamWriter sw = new StreamWriter(filename))
{
sw.WriteLine(data);
}
}
public string AcquireTemplate(string filename) => File.ReadAllText(filename);
public void CompileCsFile(string dotNetPath, string sourceFile, string executableFile)
{
Logger.LogImportant($"C:\\Windows\\Microsoft.NET\\Framework\\{dotNetPath}\\csc.exe /t:exe /out:\"{Directory.GetCurrentDirectory()}\\{executableFile}\" \"{Directory.GetCurrentDirectory()}\\{sourceFile}\"");
ProcessStartInfo info = new ProcessStartInfo($@"C:\Windows\Microsoft.NET\Framework\{dotNetPath}\csc.exe", $"/t:exe /out:\"{Directory.GetCurrentDirectory()}\\{executableFile}\" \"{Directory.GetCurrentDirectory()}\\{sourceFile}\"");
var process = Process.Start(info);
process.WaitForExit();
}
}
}
<file_sep>/Brainfuck Interpreter/Transpiler/OptimizationLevel.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace BrainfuckSharp.Transpiler
{
/// <summary>
/// Determines how the <see cref="Interpreter"/> will optimize Brainfuck code.
/// </summary>
public enum OptimizationLevel
{
/// <summary>
/// Do not apply any optimizations.
/// </summary>
None,
/// <summary>
/// Apply first level optimizations (combining sequences of increment/decrement characters into one increment/decrement per sequence).
/// </summary>
Level1,
}
}
<file_sep>/Brainfuck Interpreter/CompiledTemplate.cs
using System;
namespace Brainfuck_Interpreter
{
class CompiledTemplate
{
static byte ReadChar()
{
return (byte)Console.ReadKey().KeyChar;
}
static void Main(string[] args)
{
byte[] buffer = new byte[BufferSize];
int index = 0;
BrainfuckCode;
Console.WriteLine("Program terminated successfully...");
Console.ReadLine();
}
}
}
| 478f8e6ccdd85f484bd592b5c5244f5d796f0e1a | [
"Markdown",
"C#"
] | 7 | C# | EBrown8534/BrainfuckSharp | 2ed85b3610d9c903ac66e39c3f4f281cfec00754 | 400e4f84654ce78f3f90abacb5f0619ec8372844 |
refs/heads/master | <repo_name>toalamin/crm<file_sep>/app/Models/Expense.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Expense extends Model {
//not column protechted for save
protected $guarded = [''];
//default timestamps remove
public $timestamps = false;
//date formate
protected $dates = ['created_at'];
public function head() {
$this->belongsTo(ExpenseHead::class);
}
public function sale_person() {
$this->belongsTo(User::class);
}
}
<file_sep>/app/Models/Customer.php
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
class Customer extends Model {
/*
The attributes that are not mass assignable.
* */
protected $guarded = ['remember_token'];
/*
The attributes that should be hidden for arrays.
*
* */
protected $hidden = ['password', 'remember_token'];
/*
The attributes that should be cast to native types.
* */
protected $casts = [
'email_verified_at' => 'datetime',
'active' => 'bool',
];
//get user info
public function user() {
return $this->belongsTo(User::class);
}
public function meetings() {
return $this->hasMany(Meeting::class);
}
}
<file_sep>/app/Models/Lead.php
<?php
namespace App\models;
use Illuminate\Database\Eloquent\Model;
class Lead extends Model {
//f
protected $guarded = [''];
//get meeting
public function meeting() {
return $this->belongsTo(Meeting::class);
}
//get all leadmeetings
public function meetings() {
return $this->hasMany(LeadMeeting::class);
}
}
| 93b3c50b364f4b0c8ab381a459372ba3645cf92f | [
"PHP"
] | 3 | PHP | toalamin/crm | 2e4aa38e97ff4d6a94ce20077446dbb3d8b14b1b | 3a1d0f1f63384ae56a0e0cc224e32667375b92e0 |
refs/heads/master | <file_sep>import React, {Component} from 'react';
import '../style/App.css';
import axios from 'axios';
import MonthList from './MonthList.js';
class App extends Component {
constructor(props) {
super(props);
axios.get(`xxx/feriados`).then((resp) => {
this.setState((prevState) => (
{
calendarInfo: {
year: prevState.calendarInfo.year,
holidays: resp.data
}
}
));
});
}
state = {
calendarInfo: {
year: 2018,
holidays: {}
}
};
render() {
return (
<div>
<div className=""> Calendario de pontes:</div>
<div className="mb-5">
<MonthList {...this.state.calendarInfo}/>
</div>
</div>
);
}
}
export default App;
<file_sep>import React from 'react';
import Month from './Month.js';
import _ from 'lodash';
const MonthList = (props) => {
return (
<div>
<div className="" style={{"backgroundColor": "#ddd"}}>
Calendars for {props.year}
</div>
<div className="row">
{
_.range(1,13).map( nr =>
<div key={"month"+nr} className="col-lg-4 mt-3">
<Month {...props} month={nr} />
</div>
)
}
</div>
</div>
);
};
export default MonthList; | 88aca8634f82c0e7e8da7cf98d2bed6550c48b8a | [
"JavaScript"
] | 2 | JavaScript | AWRyder/uptest | 1967ec40191167a7875bf2efdeac0e421d7dfa2a | 666294de540571fc195123c9145cb81f61e4acf6 |
refs/heads/master | <repo_name>eddy-anderson/hw04<file_sep>/Makefile
EDITOR = /usr/bin/gedit
INDENT = /usr/bin/indent
CC = clang
LDFLAGS = -o
Libs = -lm
CFLAGS = -Weverything $(LDFLAGS)
SOURCES = indented_circle.c
EXECUTABLE= indented_circle
OBJECTS=$(SOURCES:.c=.o)
.SUFFIXES:
.SUFFIXES: .c .o .h
.PHONY: edit clean veryclean
target = inscribed_circle
all: $(target)
$(target): $(target).c
$(CC) $(target).c $(CFLAGS) $(target) $(Libs)\
edit : $(target).c
$(EDITOR) $<
$(INDENT) $<
clean :
rm -f *.o
rm -f *.*~
veryclean : clean
rm -f $(target)
<file_sep>/inscribed_circle.c
#include <stdio.h>
#include <math.h>
struct point
{
double x;
double y;
};
struct triangle
{
struct point a;
struct point b;
struct point c;
};
double area (struct triangle t);
double area (struct triangle t)
{
double s;
s = 0.5 * fabs ((t.b.x - t.a.x) * (t.c.y - t.a.y) - (t.c.x -
t.a.x) * (t.b.y - t.a.y));
return s;
}
double line_length (struct point a, struct point b);
double line_length (struct point a, struct point b)
{
double l =
sqrt (fabs ((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y)));
return l;
}
double perimeter (struct triangle t);
double perimeter (struct triangle t)
{
double p =
line_length (t.a, t.b) + line_length (t.b, t.c) + line_length (t.c,
t.a);
return p;
}
int main (void)
{
struct triangle t = (struct triangle) { {2., 2.}, {-2., 1.}, {0., -3.} };
double a = area (t);
double p = perimeter (t);
double r = ((2 * a) / p);
printf ("Radius of inscribed circle = %f\n", r);
return 0;
}
<file_sep>/README.md
# hw04
Program to find the radius of a circle inscribed in a given triangle.
| a71f8e72c0fc3e74bbd99c757ce514fa4395d4aa | [
"Markdown",
"C",
"Makefile"
] | 3 | Makefile | eddy-anderson/hw04 | c31c8a75cfb4622f1a43a9a36198be60c9222387 | 9769effb2f0ba014f76247ed19facef9e419135b |
refs/heads/master | <repo_name>kishankunal/CSE316Assingment<file_sep>/Question_no_17.c
#include<stdio.h>
#include<stdio.h>
#include<stdlib.h>
struct process
{
int Procees_number;
int Arrrival_Time, Waiting_Time, TurnTime, Priority, BurstTimeCopy,BurstTime;
}Queue1[10],Queue2[10];
int main(void)
{
struct process temp;
int i,time=0,t1,t2,bu_t=0,largest,totalProcess,count=0,k,pf2=0,totalProcess2,n,pos,j,flag=0,y;
float wait_time=0,turnaround_time= 0,average_waiting_time,average_turnaround_time;
printf("\n Enter Total Number of Processes:\t");
scanf("%d", &totalProcess);
n=totalProcess;
for(i=0;i<totalProcess;i++)
{
printf("\nEnter Process name:-");
//fflush(stdin);//to flush the buffer
scanf("%d",&Queue1[i].Procees_number);
printf("\nEnter Details For processor %d:\n",Queue1[i].Procees_number);
printf("Enter Arrival Time:-");
scanf("%d",&Queue1[i].Arrrival_Time);
printf("Enter Burst Time:-");
scanf("%d",&Queue1[i].BurstTime);
Queue1[i].BurstTimeCopy=Queue1[i].BurstTime;
printf("Enter Priority:\t");
scanf("%d",&Queue1[i].Priority);
}
printf("\nEnter Time Quantum for Fixed Priority queue:-");
scanf("%d",&t1);
printf("\nEnter Time Quantum for Round Robin queue:-");
scanf("%d",&t2);
printf("\n\nProcess\t|Turnaround Time|Waiting Time\n\n");
for(i=0;i<totalProcess;i++)
{
pos=i;
for(j=i+1;j<totalProcess;j++)
{
if(Queue1[j].Arrrival_Time<Queue1[pos].Arrrival_Time)
pos=j;
}
temp=Queue1[i];
Queue1[i]=Queue1[pos];
Queue1[pos]=temp;
}
time=Queue1[0].Arrrival_Time;
for(i=0;totalProcess!=0;i++)
{
while(count!=t1)
{
count++;
if(Queue1[i].Arrrival_Time<=time)
{
for(j=i+1;j<totalProcess;j++)
{
if(Queue1[j].Arrrival_Time==time && Queue1[j].Priority<Queue1[i].Priority)//pr<
{
Queue2[pf2]=Queue1[i];
pf2++;
for(k=i; k<totalProcess-1;k++)
Queue1[k]=Queue1[k+1];
totalProcess--;
count=0;
i=j-1;
j--;
}
}
}
time++;
Queue1[i].BurstTime--;
if(Queue1[i].BurstTime==0)
{
Queue1[i].TurnTime=time-Queue1[i].Arrrival_Time;
Queue1[i].Waiting_Time=Queue1[i].TurnTime-Queue1[i].BurstTimeCopy;
printf("%d\t|\t%d\t|\t%d\n",Queue1[i].Procees_number,Queue1[i].TurnTime,Queue1[i].Waiting_Time);
wait_time+=time-Queue1[i].Waiting_Time;
turnaround_time+=time-Queue1[i].TurnTime;
for(k=i;k<totalProcess-1;k++)
Queue1[k]=Queue1[k+1];i--;
totalProcess--;
count=t1;break;
}
}
count=0;
if(Queue1[i].BurstTime!=0)
{
Queue2[pf2]=Queue1[i];
pf2++;
for(k=i;k<totalProcess-1;k++)
Queue1[k]=Queue1[k+1];
totalProcess--;
}
if(i==totalProcess-1)
i=-1;
}
totalProcess2=pf2;
for(count=0;totalProcess2!=0;)
{
if(Queue2[count].BurstTime<=t2&&Queue2[count].BurstTime>0)
{
time+=Queue2[count].BurstTime;
Queue2[count].BurstTime=0;
flag=1;
}
else if(Queue2[count].BurstTime>0)
{
Queue2[count].BurstTime-=t2;
time+=t2;
}
if(Queue2[count].BurstTime==0&&flag==1)
{
totalProcess2--;
Queue2[count].TurnTime=time-Queue2[count].Arrrival_Time;
Queue2[count].Waiting_Time=Queue2[count].TurnTime-Queue2[count].BurstTimeCopy;
printf("%d\t|\t%d\t|\t%d\n",Queue2[count].Procees_number,Queue2[count].TurnTime,Queue2[count].Waiting_Time);
turnaround_time+=time-Queue2[count].Arrrival_Time;
wait_time+=time-Queue2[count].Arrrival_Time-Queue2[count].BurstTimeCopy;
for(k=count; k<totalProcess2;k++)
Queue2[k]=Queue2[k+1];count--;
flag=0;
}
if(count==totalProcess2-1)
count=0;
else
count++;
}
printf("\n Average Waiting Time= %f\n", wait_time/n);
printf("Avg Turnaround Time = %f" ,turnaround_time/n);
}
<file_sep>/Question_No_10.c
#include<stdio.h>
#include<conio.h>
#include<stdlib.h>
int main(void)
{
int A[100],Size,Begin,Seek=0,Difference;
float Average;
printf("Enter the total number of Cylinders :");
scanf("%d",&Size);
printf("\nTotal number of Cylinder You Entered is :%d",Size);
for(int X=1; X<=Size; X++)
{
printf("\nEnter the %d Cylinder Address ",X);
scanf("\n%d",&A[X]);
}
printf("Enter the Adress of Header Adress :");
scanf("%d",&Begin);
A[0]=Begin;
printf("X\tY\tSeek Difference\n");
for(int Y=0; Y<Size; Y++)
{
Difference=abs(A[Y+1]-A[Y]);
Seek+=Difference;
printf("%d\t%d\t%d\n",A[Y],A[Y+1],Difference);
}
printf("Total distance travelled is %d\n :",Seek);
Average=Seek/Size;
printf("The avergae would come as %f",Average);
getch();
}
<file_sep>/README.md
# CSE316Assingment
Assingmnet 2019 Cse 316
Name-<NAME> || Section -K17SJ || Registration Number-11712595
Course Instructor-<NAME>
Github url:-github.com/kishankunal/CSE316Assingment
Question N0-10
Write a C program to solve the following problem:
Suppose that a disk drive has 5 ,000 cylinders, numbered 0 to 4999. The drive is currently serving a request at cylinder 143 , and the previous request was at cylinder 125. The queue of pending requests, in FIFO Order, is: 86 , 1470, 913 , 1774, 948, 1509, 1022, 175 0, 130 Starting from the current head position, what is the total distance (in cylinders) that the disk arm moves to satisfy all the pending requests for each of the SCAN disk-scheduling algorithms?
Solution : First come, first served (FCFS) is an operating system process scheduling algorithm
and a network routing management mechanism that automatically executes queued requests and processes by the order of their arrival. With first come, first served, what comes first is handled first; the next request in line will be executed once the one before it is complete.
FCFS provides an efficient, simple and error-free process scheduling algorithm that saves valuable CPU resources. It uses non-preemptive scheduling in which a process is automatically queued and processing occurs according to an incoming request or process order. FCFS derives its concept from real-life customer service.
Let's take a look at how FCFS process scheduling works. Suppose there are three processes in a
queue: P1, P2 and P3. P1 is placed in the processing register with a waiting time of zero seconds and 10 seconds for complete processing. The next process, P2 , must wait 10 seconds and is placed in the processing cycle until P1 is processed. Assuming that P2 will take 15 seconds to complete, the final process, P3, must wait 25 seconds to be processed. FCFS may not be the fastest process scheduling algorithm, as it does not check for priorities associated with processes. These priorities may depend on the processes' individual execution times.
Question no-17. Design a scheduling program to implements a Queue with two levels:
Level 1 : Fixed priority preemptive Scheduling
Level 2: Round Robin Scheduling
For a Fixed priority preemptive Scheduling (Queue1), the Priority 0 is highest priority. If one process P1 is scheduled and running, another process P2 with higher priority comes. The New process (high priority) process P2 preempts currently running process P1 and process P1 will go to second level queue. Time for which process will strictly execute must be considered in the multiples of 2. All the processes in second level queue will complete their execution according to round robin scheduling.
Consider: 1. Queue 2 will be processed after Queue 1 becomes empty.
solution:
This is a scheduling program to implement a Queue with two levels:
Level 1 : Fixed priority preemptive Scheduling.
Level 2 : Round Robin Scheduling
For a Fixed priority pre-emptive scheduling if one process P1 is scheduled and running and another process P2 with higher priority comes. The New process with high priority process P2 preempts currently running process P1 and process P1 will go to second level queue. Time for which process will strictly execute must be considered in the multiples of 2.
All the processes in second level queue will complete their execution according to round robin scheduling.
In this program Queue 2 will be processed after Queue 1 becomes empty and Priority of Queue 2 has lower priority than in Queue 1.
Algorithm:
In this problem algorithm for round robin scheduling and multilevel queue scheduling is used.
Algorithm for round robin scheduling:
1- Create an array Rem_Bt[] to keep track of remaining
burst time of processes. This array is initially a
copy of Bt[] (burst times array)
2- Create another array Wt[] to store waiting times
of processes. Initialize this array as 0.
3- Initialize time : t = 0
4- Keep traversing the all processes while all processes
are not done. Do following for i'th process if it is
not done yet.
a- If Rem_Bt[i] > quantum
(i) t = t + quantum
(ii) Bt_Rem[i] -= quantum;
c- Else // Last cycle for this process
(i) t = t + Bt_Rem[i];
(ii) Wt[i] = t - bt[i]
(ii) Bt_Rem[i] = 0; // This process is over
Algorithm For Multilevel Queue:
1. When a process starts executing then it first enters queue 1.
2. In queue 1 process executes for 4 unit and if it completes in this 4 unit or it gives CPU for I/O operation in this 4 unit than the priority of this process does not change and if it again comes in the ready queue than it again starts its execution in Queue 1.
3. If a process in queue 1 does not complete in 4 unit then its priority gets reduced and it shifted to queue 2.
4. Above points 2 and 3 are also true for queue 2 processes but the time quantum is 8 unit.In a general case if a process does not complete in a time quantum than it is shifted to the lower priority queue.
5. In the last queue, processes are scheduled in FCFS manner.
6. A process in lower priority queue can only execute only when higher priority queues are empty.
7. A process running in the lower priority queue is interrupted by a process arriving in the higher priority queue.
Complexity: O(n3)
Boundary Conditions:
• Level 1 : Fixed priority preemptive Scheduling
• Level 2 : Round Robin Scheduling
• Consider: 1. Queue 2 will be processed after Queue 1 becomes empty.
• Consider 2. Priority of Queue 2 has lower priority than in Queue 1.
| 1aaecb44e6102a0661e1cd94e56f52ab57ed5ef3 | [
"Markdown",
"C"
] | 3 | C | kishankunal/CSE316Assingment | d5c489f826fd3f3cf5df4be1dd73d19cc297f834 | db0ee4a37c6900b98bfbba72010158c3481f07af |
refs/heads/master | <file_sep># Pictroid
## About
This is a school project. It is intended to teach programming beginners the basics of programming by using pictures as
the programming instructions. It looks as follows:

## Try it
[Here](http://alainhorner.ch/experiments/pictroid) you go
## Setup
- Check out the git repo
- Open 'index.html' in your browser
- Have fun
## Usage
- Your goal is to let the pictroid reach the delicious oil
- Drag the commands on the left and drop them into the 'code area' on their left
- You can reorder and remove commands that are already in the 'code area' (again with drag'n'drop)
- Let the pictroid run the code by clicking on 'run code'
- You can adapt the commands and let the pictroid rerun the code - but first you need to reset him
- If you have syntax errors in your code, pictroid will give you hints on how you can fix them<file_sep>/*global YUI */
YUI.add('pictroid-parser', function (Y) {
'use strict';
/****************************************************************************************/
/************************************* constructor **************************************/
/****************************************************************************************/
function Parser(config) {
Parser.superclass.constructor.apply(this, arguments);
}
Parser.NAME = 'parser';
/****************************************************************************************/
/************************************ public members ************************************/
/****************************************************************************************/
Parser.ATTRS = {
placeholder: {
value: 'placeholder'
},
command: {
value: 'command'
},
repeat: {
value: 'repeat'
},
condition: {
value: 'condition'
},
counter: {
value: 'counter'
},
conditional: {
value: 'conditional'
},
left: {
value: 'left'
},
right: {
value: 'right'
},
up: {
value: 'up'
},
down: {
value: 'down'
},
// note: commands need to be read reverse because the will directly land in the stack
// command, placeholder needs to be ['placeholder', 'command'], because 'command must be on top of the stack'
replacementRules: {
value: {
command: [
['left'],
['right'],
['up'],
['down']
],
conditional: [
['stone'],
['field']
],
counter: [
['two'],
['three'],
['four'],
['five'],
['six'],
['seven'],
['eight'],
['nine'],
['ten'],
['infinite']
],
placeholder: [
[],
['placeholder', 'command'],
['placeholder', 'endrepeat', 'placeholder', 'counter', 'repeat'],
['placeholder', 'endcondition', 'placeholder', 'conditional', 'condition']
]
}
}
};
Y.extend(Parser, Y.Base, {
/****************************************************************************************/
/*********************************** private members ************************************/
/****************************************************************************************/
/****************************************************************************************/
/*********************************** private methods ************************************/
/****************************************************************************************/
_getReplacement: function (availableReplacements, currentInstruction) {
var i;
for (i = 0; i < availableReplacements.length; i++) {
if (availableReplacements[i][0] === currentInstruction) {
return availableReplacements[i];
}
}
return null;
},
_getPlaceholderReplacement: function (instructions, currentIndex) {
switch (instructions[currentIndex]) {
case this.get('repeat'):
return this.get('replacementRules.placeholder')[2];// repeat
break;
case this.get('condition'):
return this.get('replacementRules.placeholder')[3];// condition
break;
case this.get('left'):
case this.get('right'):
case this.get('up'):
case this.get('down'):
return this.get('replacementRules.placeholder')[1];// command and placeholder
break;
default:
return this.get('replacementRules.placeholder')[0];// empty
}
},
_getCommandReplacement: function (instructions, currentIndex) {
var replacement = this._getReplacement(this.get('replacementRules.command'), instructions[currentIndex]);
return replacement || ['e_command_expected'];// TODO: this can actually never happen - think a bit about it
},
_getCounterReplacement: function (instructions, currentIndex) {
var replacement = this._getReplacement(this.get('replacementRules.counter'), instructions[currentIndex]);
return replacement || ['e_counter_expected'];
},
_getConditionalReplacement: function (instructions, currentIndex) {
var replacement = this._getReplacement(this.get('replacementRules.conditional'), instructions[currentIndex]);
return replacement || ['e_conditional_expected'];
},
_removePlaceholders: function (instructions) {
return instructions.filter(function (instruction) {
return instruction !== this.get('placeholder');
}, this);
},
_getSymbolLabel: function (classes, body) {
return '<span class="label ' + classes + '">' + body + '</span>';
},
_getErrorMessage: function (nextStackElement) {
var message;
switch (nextStackElement) {
case 'endrepeat':
message = 'Pictroid expects a ' + this._getSymbolLabel('green', 'green symbol with cross') + ' at this position.';
break;
case 'endcondition':
message = 'Pictroid expects a ' + this._getSymbolLabel('red', 'red symbol with cross') + ' at this position.';
break;
case 'conditional':
case 'e_conditional_expected':
message = 'Pictroid expects an ' + this._getSymbolLabel('orange', 'orange symbol') + ' at this position.';
break;
case 'counter':
case 'e_counter_expected':
message = 'Pictroid expects a ' + this._getSymbolLabel('blue', 'blue symbol') + ' at this position.';
break;
case undefined:
message =
'Pictroid expects a ' +
this._getSymbolLabel('yellow', 'yellow symbol') +
', a ' +
this._getSymbolLabel('red', 'red question mark') +
' or ' +
this._getSymbolLabel('green', 'a green circular arrow') +
' at this position.';
break;
default:
message = 'Pictroid is not sure what\'s wrong.';
break;
}
return 'Pictroid doesn\'t understand your program: ' + message;
},
/****************************************************************************************/
/************************************ event handlers ************************************/
/****************************************************************************************/
/****************************************************************************************/
/************************************ public methods ************************************/
/****************************************************************************************/
isValid: function (instructions) {
var i,
numInstructions = instructions.length,
stack = [this.get('placeholder')],
replacement,
nextStackElement,
protocol = {
success: true
};
for (i = 0; i < numInstructions; i++) {
nextStackElement = stack.pop();
switch (nextStackElement) {
case this.get('placeholder'):
replacement = this._getPlaceholderReplacement(instructions, i);
stack = stack.concat(replacement);
i -= 1;
break;
case this.get('command'):
replacement = this._getCommandReplacement(instructions, i);
stack = stack.concat(replacement);
i -= 1;
break;
case this.get('counter'):
replacement = this._getCounterReplacement(instructions, i);
stack = stack.concat(replacement);
i -= 1;
break;
case this.get('conditional'):
replacement = this._getConditionalReplacement(instructions, i);
stack = stack.concat(replacement);
i -= 1;
break;
default:
if (nextStackElement !== instructions[i]) {
protocol.success = false;
protocol.err = {
description: this._getErrorMessage(nextStackElement),
index: i
};
return protocol;
}
}
}
stack = this._removePlaceholders(stack);
if (stack.length > 0) {
protocol.success = false;
protocol.err = {
description: this._getErrorMessage(stack.pop()),
index: instructions.length
};
}
return protocol;
},
/****************************************************************************************/
/****************************** extended methods / members ******************************/
/****************************************************************************************/
initializer: function (cfg) {
},
destructor: function () {
}
});
Y.namespace('Pictroid').Parser = Parser;
}, '0.1', { requires: ['base']});
| 4fbc026d5e4734f342049cd650324dda12efea9c | [
"Markdown",
"JavaScript"
] | 2 | Markdown | elHornair/pictroid | 24be093b453e923a8d88052123a8269dbc2ebb12 | d9a281d8eb9c29da7bd111ea47178c760d8ca30c |
refs/heads/master | <repo_name>heremyas/file_organizer<file_sep>/main.py
# OBJECTIVES
# list files in a directory
# check files (folder or not)
# check extension name
# create directories based on their ext name
# move files to corresponding directory
from os import listdir, path, mkdir
import shutil
TARGET_DIR = "path/of/target/directory"
# list files and directories in a folder
def listFiles(target_dir):
files = listdir(target_dir)
if("File Organizer.bat" in files):
files.remove("File Organizer.bat")
return files
else:
return files
# check files (folder or not)
def checkFile(target_dir):
folders = []
files = []
files_list = listFiles(target_dir)
for i in files_list:
try:
listFiles(path.join(target_dir, i))
folders.append(i)
except:
files.append(i)
return {"folders": folders, "files": files}
# check extension name (make them unique)
def checkExt(target_dir):
ext_list = []
files_list = checkFile(target_dir)["files"]
for i in files_list:
ext_list.append(i.split('.')[-1])
filtered_ext = list(dict.fromkeys(ext_list))
return filtered_ext
# create directory
def createDirectories(target_dir):
for i in checkExt(target_dir):
try:
mkdir(path.join(target_dir, i))
print(f"Folder {i} Created")
except:
print(f"Folder {i} exist")
# modve files to target directory
def moveFilesToDir(target_dir):
file_list = checkFile(target_dir)['files']
if file_list:
try:
for i in range(len(file_list)):
ext_name = file_list[i].split('.')[-1]
source = path.join(target_dir, file_list[i])
target = path.join(target_dir, ext_name)
shutil.move(source, target)
print(f"File {file_list[i]} moved to {ext_name}")
except Exception as e:
print("Error moving files:", e)
print("File Organization Successful!")
else:
print("Files organized")
def main(target_dir):
createDirectories(target_dir)
moveFilesToDir(target_dir)
if (input("Press Enter to Exit... ") == ""):
quit()
if __name__ == '__main__':
main(TARGET_DIR)
| 16aa4418b1d858cc21297179218f0454a38e7e3f | [
"Python"
] | 1 | Python | heremyas/file_organizer | 474b511233f00cd4a4931a9e604ed57358d442f7 | df898c3645aef6ce02ff95fda53e5661a2bcfeb8 |
refs/heads/master | <file_sep># Crawler
爬虫实例
<file_sep>// Crawler project main.go
package main
import (
"fmt"
"github.com/henrylee2cn/pholcus/exec"
_ "github.com/henrylee2cn/pholcus_lib"
)
func main() {
fmt.Println("Hello World!")
exec.DefaultRun("gui")
}
| 28e6d35922132c605cfcb4a493ef76f840acf895 | [
"Markdown",
"Go"
] | 2 | Markdown | liyoung1992/Crawler | 18186439b6b6a56459895de9abb25dc48f2f1ce9 | 37bce708f1822ceafde86f0e962b94ec4c1fbf0f |
refs/heads/master | <repo_name>QuantumThings/TPE-2019-site<file_sep>/app.js
if ( WEBGL.isWebGLAvailable() === false ) {
document.body.appendChild( WEBGL.getWebGLErrorMessage() );
}
var container, stats;
var camera, cameraTarget, scene, renderer;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
camera = new THREE.PerspectiveCamera( 35, window.innerWidth / window.innerHeight, 1, 10 );
camera.position.set( 0, 3, 0 );
cameraTarget = new THREE.Vector3( 0, 0, 0 );
scene = new THREE.Scene();
// ASCII file
var loader = new THREE.STLLoader();
loader.load( 'models/maison2.stl', function ( geometry ) {
var material = new THREE.MeshPhongMaterial( { color: 0x1111ff, specular: 0x010101, shininess: 50 } );
var mesh = new THREE.Mesh( geometry, material );
mesh.position.set( -Math.PI/10, 0, 0 );
mesh.rotation.set( Math.PI/2, Math.PI, 0);
mesh.scale.set( 0.01, 0.01, 0.01 );
mesh.castShadow = true;
mesh.receiveShadow = true;
scene.add( mesh );
} );
// Lights
scene.add( new THREE.HemisphereLight( 0xffffff, 0xffffff ) );
addShadowedLight( 1, 1, 1, 0xffffff, 1.35 );
addShadowedLight( 0.5, 1, - 1, 0xffaa00, 1 );
// renderer
renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
renderer.gammaInput = true;
renderer.gammaOutput = true;
renderer.shadowMap.enabled = true;
container.appendChild( renderer.domElement );
// stats
stats = new Stats();
container.appendChild( stats.dom );
//
window.addEventListener( 'resize', onWindowResize, false );
}
function addShadowedLight( x, y, z, color, intensity ) {
var directionalLight = new THREE.DirectionalLight( color, intensity );
directionalLight.position.set( x, y, z );
scene.add( directionalLight );
directionalLight.castShadow = true;
var d = 1;
directionalLight.shadow.camera.left = - d;
directionalLight.shadow.camera.right = d;
directionalLight.shadow.camera.top = d;
directionalLight.shadow.camera.bottom = - d;
directionalLight.shadow.camera.near = 1;
directionalLight.shadow.camera.far = 4;
directionalLight.shadow.mapSize.width = 1024;
directionalLight.shadow.mapSize.height = 1024;
directionalLight.shadow.bias = - 0.002;
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
}
function animate() {
requestAnimationFrame( animate );
render();
stats.update();
}
function render() {
var timer = Date.now() * 0.0005
camera.position.x = Math.cos( timer ) * 3;
camera.position.z = Math.sin( timer ) * 3;
camera.lookAt( cameraTarget );
renderer.render( scene, camera );
} | 70f4dec5310a733d1397eab4a05162b3e447b387 | [
"JavaScript"
] | 1 | JavaScript | QuantumThings/TPE-2019-site | bd7c2a8f3a089654913cef3d518ae844a1d7c66d | 1fb9122bae92df2233e38efc81b296000276c47c |
refs/heads/master | <file_sep>if(location.host == '192.168.1.188'){/*如果是测试连接*/
//项目名称
var workspace= 'maotai-backstage';
var getAjaxOrigin = function getAjaxOrigin(){//返回请求接口的 origin
return 'http://maotai.hmsh.com';
};
var loginPage = function loginPage(){
//通过项目名称变量 拼接正则
var x = workspace.replace(/\-/ig,'\\-').replace(/\_/ig,'\\_');
var r = RegExp(x,'ig');
//截取拼接href直到根目录
var h = location.href;
var q = h.split(r)[0] + workspace + '/login.html';
parent.location.href = q;
}
}else{
var workspace= '';
var getAjaxOrigin = function getAjaxOrigin(){//返回请求接口的 origin
return '';
};
var loginPage = function loginPage(){//跳转登录页
location.href = '/admin/login';
}
}
function getOrigin() {//返回项目所在的 origin
return location.origin||location.host;
}
/*
登录验证
* */
if (!Cookies.enabled) {
alert('浏览器不支持cookies,请检查是否禁用!');
}
//cookies 参数
var cookiesOptions = {
expires: 60*30
};
var tokenname = 'tokenPC';
var tokenAdminName = 'adminNamePC';
var tokenPowerName = '<PASSWORD>';
//是否为不需要验证的页面
var isLoginPage = /(login)\.html/ig.test(location.pathname);
//是否存在cookies
var isToken = Cookies.get(tokenname);
judgeLoginType()
function judgeLoginType() { //每次刷新页面 或 ajax操作前执行方法
if(!isLoginPage ){
if(isToken){
delayedToken();
}else{
toLoginPage('init');
return false;
}
}
return true;
}
function toLoginPage(info) {//登录过期,1s自动跳转登录页/按钮立即跳转
// layer.closeAll();
layer.open({
type: 1
,title: false //不显示标题栏
,closeBtn: false
,area: '150px;'
,shade: 0.8
,id: 'logout' //设定一个id,防止重复弹出
,btnAlign: 'c'
,moveType: 1 //拖拽模式,0或者1
,content: '<div style="padding: 20px 10px; text-align:center;width:auto;line-height: 22px; background-color: #393D49; color: #fff; font-weight: 300;"><i class="layui-icon layui-anim layui-anim-loop" style="font-size: 36px;"></i><br>登录过期...</div>'
});
Cookies.set(tokenname,'',{expires:0});
setTimeout(function() {
parent.window.loginPage();
}, 1000);
console.log('toLoginPage【yes】:',info);
toLoginPage = function(info){
console.log('toLoginPage【no】:',info);
}
}
function delayedToken(){//延长token
Cookies.set(tokenname, isToken,cookiesOptions);
Cookies.set(tokenAdminName, Cookies.get(tokenAdminName),cookiesOptions);
Cookies.set(tokenPowerName, Cookies.get(tokenPowerName),cookiesOptions);
}
function loginSaveToken(data) {//登录记录token
// if(/login\.html/ig.test(location.href)){
//登录页
Cookies.set(tokenname,data.data.token,cookiesOptions);
Cookies.set(tokenAdminName,data.data.adminName,cookiesOptions);
Cookies.set(tokenPowerName,data.data.powerName,cookiesOptions);
// }
}
//首页导航菜单
var navs = [
// {
// name:'系统',
// childs:[
// {
// name:'后台系统菜单',
// url:'pages/system/menu.html?__hbt=1517217704191'
// }
// ]
// },
{
name:'会员管理',
childs:[
{
name:'注册新会员',
url:'pages/member/register.html'
},
// {
// name:'等待审核',
// url:'pages/member/audit-waiting.html'
// },
{
name:'会员列表',
url:'pages/member/member-list.html'
},
{
name:'会员升级管理',
url:'pages/member/upgrade-management.html'
},
{
name:'会员晋升明细',
url:'pages/member/promotion.html'
}
]
},
{
name:'财务管理',
childs:[
{
name:'会员充值管理',
url:'pages/financialManagement/menber-prepaid.html'
},
{
name:'会员提现管理',
url:'pages/financialManagement/menber-withdrawal.html'
},
{
name:'财务明细管理',
url:'pages/financialManagement/financial-details.html'
}
]
},
{
name:'奖金管理',
childs:[
{
name:'会员奖金明细',
url:'pages/bonusManagement/member-bonus-details.html'
},
{
name:'会员奖金汇总',
url:'pages/bonusManagement/member-bonus-summary.html'
},
// {
// name:'奖金总统计',
// url:'pages/bonusManagement/bonus-total-statistics.html'
// },
{
name:'奖金参数设置',
url:'pages/bonusManagement/bonus-parameter-settings.html'
}
]
},
{
name:'商品管理',
childs:[
{
name:'分类管理',
url:'pages/goodsManagement/classify-new.html'
},
{
name:'商品列表',
url:'pages/goodsManagement/goods-list.html'
},
{
name:'新增商品',
url:'pages/goodsManagement/goods-edit.html'
},
{
name:'订单管理',
url:'pages/goodsManagement/order-list.html'
}
]
}
]
//财务
var navs_caiwu = [
{
name:'财务管理',
childs:[
{
name:'会员充值管理',
url:'pages/financialManagement/menber-prepaid.html'
},
{
name:'会员提现管理',
url:'pages/financialManagement/menber-withdrawal.html'
},
{
name:'财务明细管理',
url:'pages/financialManagement/financial-details.html'
}
]
},
{
name:'奖金管理',
childs:[
{
name:'会员奖金明细',
url:'pages/bonusManagement/member-bonus-details.html'
},
{
name:'会员奖金汇总',
url:'pages/bonusManagement/member-bonus-summary.html'
}
]
},
{
name:'商品管理',
childs:[
{
name:'订单管理',
url:'pages/goodsManagement/order-list.html'
}
]
}
]
//采购
var navs_caigou = [
{
name:'商品管理',
childs:[
{
name:'分类管理',
url:'pages/goodsManagement/classify-new.html'
},
{
name:'商品列表',
url:'pages/goodsManagement/goods-list.html'
},
{
name:'新增商品',
url:'pages/goodsManagement/goods-edit.html'
}
]
}
]
//采购
var navs_wuliu = [
{
name:'商品管理',
childs:[
{
name:'订单管理',
url:'pages/goodsManagement/order-list.html'
}
]
}
]
/*
各种参数
* */
//支付方式
var arr_pay_type = ['未支付','余额支付', '积分支付','粮票支付'];
arr_pay_type[11]='支付宝';
arr_pay_type[13]='快钱支付';
//发货状态
var shipments_type = ['待付款', '待发货', '待收货', '完成', '订单已取消', '退货'];
//审核状态
var auditStatus = ["未审核","审核通过","拒绝"];
//是否冻结
var frezzeState =["已冻结","未冻结"]
//充值金额类型
var moneyType = ["余额","积分","粮票"];
//充值类型
var rechargeType = ["系统充值","会员充值","微信充值","支付宝充值"];
//收入支出的类型
var bonusType = [];
bonusType[0] = '全部';
bonusType[1] = '余额提现';
bonusType[2] = '余额奖金';
bonusType[3] = '余额充值';
bonusType[4] = '余额购物';
bonusType[5] = '提现拒绝';
bonusType[12] = '积分奖金';
bonusType[13] = '积分充值';
bonusType[14] = '积分购物';
bonusType[20] = '购物赠送粮票';
bonusType[21] = '推荐赠送粮票';
bonusType[23] = '粮票充值';
//是否显示
var isShow = ['隐藏','显示'];
//审核状态
//var status = ["未审核","审核通过","拒绝"];
//菜单管理
//菜单类型
var menuType = ['目录','菜单','按钮'];
//菜单是否显示
<file_sep>function MenuTree (data) {
//对象:可修改默认参数
this.opt = $.extend({},{
view:$('#treeMenu'),
url:'data.json',
openButton:false, //是否显示新增、编辑、删除按钮
openSelect:false, //是否启用全选功能
hasRecordDown:true,//是否记录本地展开收起状态
nowOpenStatus:[],//数组:保存本地展开收起状态(将展开的目录menuId保存在数组中),以便刷新时判断展开
clickCallback:function(e,$btn,data,type){//事件委托中执行函数:点击.layui-tree-item触发
if(type == 'add'){
console.log('add',$btn,data);
if(typeof newOrder == 'function'){
newOrder({
parentName: data.name,
parentId: data.menuId,
type: data.menuId ===0 ? 0:1
},$btn);
}
}else if(type == 'edit'){
console.log('edit',$btn,data);
if(typeof newOrder == 'function'){
newOrder(data,$btn);
}
}else if(type == 'delete'){
console.log('delete',$btn,data);
if(typeof deleteMenu == 'function'){
deleteMenu(data,$btn);
}
}
},
createLi:function(view,data) {
//console.log('生成:'+data.name,view.get(0),data);
/*
* 生成单个菜单项<li>的html字符串或Jquery对象,并作为返回值。
* 注意:不用创建ul,即使有子菜单
* */
var li = "<li>";
//1、展开收起箭头
li += this.createOpenArror(data);
li += "<a class='layui-tree-item'>";
//2、文件夹或文件图标
li += this.createFileIcon(data);
//3、判断开启全选
li += this.createSelect(data) ;
//4、菜单名称
li += this.createName(data) ;
//5、判断开启新增、编辑、删除
li += this.createButton(data) ;
return li + '</a></li>';
},
createOpenArror:function createOpen(data){ //展开收起箭头
if(typeof data.children == 'undefined' || data.children.length<1) {
//没有子菜单,不显示箭头
return '<i class="layui-icon layui-tree-spread"></i>';
} else if( ( typeof data.open != 'undefined' && !!data.open ) || ( !!this.hasRecordDown && this.nowOpenStatus instanceof Array && $.inArray(data.menuId,this.nowOpenStatus)>=0 ) ) {
//收起:有子菜单,且需要展开(.active),显示向下箭头
return '<i class="layui-icon layui-tree-spread active"></i>';
} else {
//收起:有子菜单,无需展开,显示向右箭头
return '<i class="layui-icon layui-tree-spread"></i>';
}
return '<i class="layui-icon layui-tree-spread"></i>';
},
createFileIcon:function (data) { //文件夹或文件图标
if(data.parentId < 0 || (typeof data.type != 'undefined' && data.type === 0)) { //显示图标-文件夹
return '<i class="layui-icon layui-tree-branch"></i>';
} else { //显示图标-文件
return '<i class="layui-icon "></i>';
}
},
createName:function (data) { //菜单名称
if(data.parentId < 0 || (typeof data.type != 'undefined' && data.type === 0)) { //文件夹
return '<cite>' + data.name + '</cite>';
} else { //非文件
if(typeof data.type != 'undefined' && data.type === 1) {
// type = 1 = '菜单'
return '<cite title="' + data.url + '" class="link-line">' + data.name + '</cite>';
} else {
return '<cite>' + data.name + '</cite>';
}
}
},
createSelect:function (data){ //全选按钮
if(typeof this.openSelect == 'boolean' && this.openSelect){
return '<i class="layui-icon layui-icon-select " title="勾选"></i>';
}
return '';
},
createButton:function (data){ //编辑、新增、删除按钮
if( !(typeof this.openButton == 'boolean' && this.openButton) ){
return '';
}
var str = '';
if(data.parentId === -1 || (typeof data.type != 'undefined' && data.type === 0)) {
str += '<i class="layui-icon layui-icon-function layui-icon-add" title="新增子级"></i>';
}
str += '<i class="layui-icon layui-icon-function layui-icon-edit" title="编辑"></i>';
str += '<i class="layui-icon layui-icon-function layui-icon-delete" title="删除">ဆ</i>';
return str;
},
alterSelectIcon:function($btn,status){ //['取消选择','选择','不完全选择']
if(typeof $btn != 'object' || $btn.length<0){
console.error('$btn data types is error!');
return false;
}
$btn.removeClass('active half');
if(typeof status != 'number' || status === 0){
//无选择状态
$btn.html('');
}else if(status === 1){
//选择状态
$btn.addClass('active').html('');
}else if(status === 2){
//不完全选择状态
$btn.addClass('half').html('');
}
}
},data);
//数组:菜单列表
this.list = [];
//数组:层级分明的菜单树,根据 orderNum 大小排序
this.menu = [];
//判断传入参数正确性
typeof this.opt.view == 'object' ? '' : console.error("opt.view not object");
}
/*
* 初始化:
* 1、委托事件
* 2、刷新所有(请求>排序>htmlView)
* */
MenuTree.prototype.init = function () {
var _this = this ;
this.addEvent();
this.refresh();
return this;
}
/*
* 事件委托汇总:
* 1、点击展开收起
* 2、全选
* */
MenuTree.prototype.addEvent = function (fun) {
//事件委托:展开收起按钮事件
this.addClickUpDown();
//事件委托:点击.layui-tree-item 区域 (可以挂载全选、编辑、新增、删除及其他自定义逻辑)
this.addClickEvent();
}
/*
* 事件委托:点击.layui-tree-item 区域 (可以挂载全选、编辑、新增、删除及其他自定义逻辑)
* */
MenuTree.prototype.addClickEvent = function (fun) {
var _this = this;
var view = this.opt.view;
if(view.hasClass('add-event-click-item')){
//已委托事件
return this;
}
view.on('click','.layui-tree-item',function (e) {
var $target = $(e.target || e.toElement || this);
if(_this.opt.openSelect && $target.length>0 && $target.hasClass('layui-icon-select') ){
//全选
$target.hasClass('active') ? _this.cancelSelect($target): _this.addSelect($target);
e.stopPropagation();
e.preventDefault();
}else if(typeof _this.opt.clickCallback == 'function'){
var type;
if(_this.opt.openButton && $target.length>0){
if($target.hasClass('layui-icon-add') ){
//新增
type = 'add';
}else if($target.hasClass('layui-icon-edit') ){
//编辑
type = 'edit';
}else if($target.hasClass('layui-icon-delete') ){
//删除
type = 'delete';
}
}
//执行回调
_this.opt.clickCallback.call(this,e,$target,$(this).parent('li').data('data'),type);
}else{
e.stopPropagation();
e.preventDefault();
}
$target = null;
}).addClass('add-event-click-item');
view = null;
return this;
};
/*
* 事件委托:点击展开收起
* */
MenuTree.prototype.addClickUpDown = function (fun) {
var _this = this;
var view = this.opt.view;
if(view.hasClass('add-event-up-down')){
//已委托事件
return this;
}
view.on('click','.layui-tree-spread',function (e) {
//禁止冒泡、默认事件
e.stopPropagation();
e.preventDefault();
var $btn = $(this);
if($btn.siblings('ul').length<1){return false;}
var data = $btn.parent('li').data('data');
if($btn.hasClass('active')){
$btn.html('');
$btn.removeClass('active');
if(_this.opt.hasRecordDown){
//记录收起
_this.alterOpenStatus(data);
}
}else{
$btn.html('');
$btn.addClass('active');
if(_this.opt.hasRecordDown){
//记录展开
_this.alterOpenStatus(data,true);
}
}
}).addClass('add-evnet-up-down');
view = null;
return this;
};
/*
* 选择菜单-取消选择状态:
* */
MenuTree.prototype.cancelSelect = function ($btn,isSon) {
if( typeof $btn != 'object' || $btn.length<1 ){return this;}
//1、改变自身状态
this.opt.alterSelectIcon($btn,0);
//2、取消所有子级元素的 选中和不完全选中状态,
// 调用 cancelSelect() 方法时,要传入第二个参数isSon==true,标记是从父级操作子级
var $son = $btn.parent('a').siblings('ul').children('li').children('a').children('.layui-icon-select');
this.cancelSelect($son,'isSon');
if(!!isSon){
//如果是从父级操作子级,则不对父级操作,避免死循环
return this;
}
//3、判断兄弟元素 (选中的个数/不完全选中的个数)→ 改变父级
var $parent = $btn.parents('ul').not('.layui-tree').eq(0).siblings('a').children('.layui-icon-select');
var $brothers = $btn.parents('li').eq(0).siblings().children('a').children('.layui-icon-select').filter('.active,.half');
if($brothers.length>0){
// 选中和不完全选中的个数大于0,父级不完全选中
this.addHalfSelect($parent);
}else{
//没有其他选中,父级取消选中
this.cancelSelect($parent);
}
return this;
}
/*
* 选择菜单-添加选择状态:
* */
MenuTree.prototype.addSelect = function ($btn,isSon) {
if( typeof $btn != 'object' || $btn.length<1 ){return this;}
//1、改变自身状态
this.opt.alterSelectIcon($btn,1);
//2、选中所有的子级元素,
// 调用 addSelect() 方法时,要传入第二个参数isSon==true,标记是从父级操作子级
var $son = $btn.parent('a').siblings('ul').children('li').children('a').children('.layui-icon-select');
this.addSelect($son,'isSon');
if(!!isSon){
//如果是从父级操作子级,则不对父级操作,避免死循环
return this;
}
//3、判断兄弟元素 (兄弟元素个数 === 选中的个数)→ 改变父级
var $parent = $btn.parents('ul').not('.layui-tree').eq(0).siblings('a').children('.layui-icon-select');
var $brothers = $btn.parents('li').eq(0).siblings().children('a').children('.layui-icon-select');
if( $brothers.length > 0 ){
//有至少一个兄弟元素
if($brothers.length == $brothers.filter('.active').length){
//兄弟元素个数 == 选中的兄弟元素个数 ,父级添加选中
this.addSelect($parent);
}else{
//兄弟元素个数 != 选中的兄弟元素个数 ,父级不完全选中
this.addHalfSelect($parent);
}
}else{
//没有兄弟元素,父级添加选中
this.addSelect($parent);
}
return this;
}
/*
* 选择菜单-添加不完全选择状态:
* */
MenuTree.prototype.addHalfSelect = function ($btn) {
if( typeof $btn != 'object' || $btn.length<1 ){return this;}
//1、改变自身状态
this.opt.alterSelectIcon($btn,2);
//2、自身不完全选中时,父级元素也为不完全选中
var $parent = $btn.parents('ul').not('.layui-tree').eq(0).siblings('a').children('.layui-icon-select');
//给parent添加不完全选中
this.addHalfSelect($parent);
return this;
}
/*
* 刷新所有:重新请求数据,然后排序、生成view
* */
MenuTree.prototype.refresh = function () {
var _this = this ;
this.requireList(function (list) {
_this.refreshSortAddView();
});
return this;
}
/*
* 刷新数据和视图:手动更新this.list、this.menu 后,重新排序并生成视图
* */
MenuTree.prototype.refreshSortAddView = function () {
this.sort();
this.refreshView();
return this;
}
/*
* 刷新视图:重新生成view
* */
MenuTree.prototype.refreshView = function () {
this.opt.view.removeClass().addClass('layui-box layui-tree');
this.createView();
return this;
}
/*
* 获取数据: 根据this.opt.url参数,获取菜单总列表 ,列表赋值给 this.list = []
* */
MenuTree.prototype.requireList = function (fun) {
var _this = this;
ajax({
type:'get',
info:'获取所有菜单列表',
url:_this.opt.url,
data:{
hash:new Date().getTime()
},
success:function(data,status,xhr) {
// console.log('ajax成功',data);
_this.emptyData();
_this.list = data.data;
if(typeof fun == 'function'){
fun(_this.list);
}
}
})
return this;
};
/*
* 清空数据:清空this.list this.menu
* */
MenuTree.prototype.emptyData = function (lis,view) {
this.list = [];
this.menu = [];
return this;
}
/*
* 汇总操作:菜单列表分类排序
* */
MenuTree.prototype.sort = function () {
this.listClassify();
this.menuSort();
console.log('排序后 list:',this.list);
console.log('排序后 menu:',this.menu);
return this;
};
/*
* 列表分类:
* 1、将列表list根据 parentId 的大小 进行排序
* 2、将排序后list根据parentId是否有对应menuId进行层级分类,生成新的对象menu,为更新html元素做准备
* */
MenuTree.prototype.listClassify = function () {
// console.log('开始分类 MenuTree.prototype.listClassify()',);
var _this = this;
var list = _this.list,
menu = _this.menu = [],
parent ={};
//将列表按照.parentId属性的值进行升序排列
list.sort(function (a,b) {
if( a.parentId == b.parentId){
return a.menuId - b.menuId;
}
return a.parentId - b.parentId;
});
$.each(list,function (i,e) {
// console.log(i,'========【' + e.name +'】',e);
//this.menu
//再次循环 this.list菜单列表
//1、先判断 当前项 panrentId 在列表中有没有对应的 menuId ? 子级列表 : 一级列表 ;
var judge = false;
if(e.children){
delete e.children;
}
$.each(list,function (j,v) {
if(e.parentId === v.menuId){
//找到parentId对应的父元素,不允许子菜单的指针直接指向parent,会造成对象的循环引用
judge = true ;
parent = v;
return false;
}
});
// judge ? console.log(judge,'【'+e.parent.name+'】的子级菜单',e.parent):
// console.log(judge,'没有父级菜单');
//2、根据 judge 值 将菜单项存入 this.menu ; 参数 lv 为菜单等级
var lv = 1;
if(judge){
//存入对应 parent.children [array]
e.lv = parent.lv ? parent.lv+1 : lv+1;
if( parent.children instanceof Array ){
parent.children.push(e);
}else{
parent.children = [e];
}
}else{
//存入一级列表
e.lv = lv;
menu.push(e);
}
})
parent = null ;
list = null ;
menu = null ;
return list;
};
/*
* 列表排序:分类后,对menu中的Array根据 orderNum 的大小 进行排序
* */
MenuTree.prototype.menuSort = function (arrs) {
var _this = this;
//console.log('开始排序 MenuTree.prototype.menuSort() ',arrs ? arrs.length : arrs,arrs);
if( !(arrs instanceof Array) ){
//第一次调用,默认传入的是 this.menu
var arrs = this.menu;
}
//1、对菜单集合排序
arrs.sort(function (a,b) {
if(a.orderNum == b.orderNum){
return a.menuId - b.menuId;
}
return a.orderNum - b.orderNum;
});
//1、判断集合中单个菜单是否有自菜单集
$.each(arrs,function (i,e) {
//有子菜单,则对子菜单进行排序
if(e.children instanceof Array){
_this.menuSort(e.children);
}
});
};
/*
* DOM操作-生成总菜单视图:根据 排序的列表生成html菜单
* */
MenuTree.prototype.createView = function () {
//console.log('开始生成菜单视图 MenuTree.prototype.createView()');
var _this = this;
var menu = _this.menu,
view = _this.opt.view.empty();
_this.createEachMenuLi(view,menu);
view = null;
menu = null;
return this;
};
/*
* DOM操作-循环生成单个菜单项li
* */
MenuTree.prototype.createEachMenuLi = function (view,list) {
if( !( typeof view == 'object' && view.length>0 && list instanceof Array && list.length>0) ){
console.warn(' list or view is not undefined or null ! ');
return this;
}
if( !( view.get(0).tagName == 'ul' || view.get(0).tagName == 'UL' ) ){
//如果当前view不是ul标签,则在view内部创建一个<ul>作为view
view.append('<ul></ul>');
view = view.children('ul').last();
}
var _this = this;
$.each(list, function(i,e) {
//将单个菜单的数据传个create函数,返回值: 生成的元素的 html字符串或jquery对象
var li = _this.opt.createLi(view,e);
if(typeof li == 'string'){
li = $(li);
}
if(typeof li == 'object' && li.length>0 && li.get(0).nodeType === 1 ){
view.append(li);
li.data('data',e);
//如果当前菜单项下有子菜单集
if(e.children instanceof Array && e.children.length>0){
_this.createEachMenuLi(li,e.children);
}
}else{
console.warn(e.name+'返回的<li>不是正确的Html字符串或jQuery对象!');
}
});
_this = null;
view = null;
list = null;
return this;
}
/*
* 状态记录: 展开
* */
MenuTree.prototype.alterOpenStatus = function (data,status) {
if(typeof data == 'object'){
var id = data.menuId;
}else if(typeof data == 'string'){
var id = data;
}else{
console.error('parameter data types is not object or string !');
return this;
}
var arr = this.opt.nowOpenStatus;
var n = $.inArray(id,arr);
if(typeof status == 'boolean' && status){
//记录展开
if(n<0){
arr.push(id);
}
}else{
//记录收起
if(n>=0){
arr.splice(n,1);
}
}
//console.log(!!status?'展开':'收起',id,n,arr);
id = null;
arr = null;
n = null;
return this;
}<file_sep>//获取省城市区域列表
var url_getArea = getAjaxOrigin() + "/api/admin/config/queryArea";
//获取会员等级列表
var url_menberGrade = getAjaxOrigin() + '/api/admin/config/queryMemberLevel';
//获取会员职级列表
var url_MenberPosition = getAjaxOrigin() + '/api/admin/config/queryIdentity';
function getProvince(fun) { //获取省份
ajax({
type: 'get',
info: '获取省份',
url: url_getArea,
data: {
pid: 1,
level: 2
},
success: function(data) {
if(fun && typeof fun == 'function') {
fun(data.data, data);
}
}
})
}
function getProvinceOption(parent, val) { //设置省
getProvince(function(data) {
var str = '<option value="">请选择省份</option>';
if(val) {
$.each(data, function(i, e) {
str += '<option value="' + e.id + '" ' + (e.id == val ? 'selected' : '') + '>' + e.areaName + '</option>'
})
} else {
$.each(data, function(i, e) {
str += '<option value="' + e.id + '">' + e.areaName + '</option>'
})
}
setOption(parent, str);
});
}
function getXCity(pid, fun) { //获取城市
ajax({
type: 'get',
info: '获取城市',
url: url_getArea,
data: {
pid: pid,
level: 3
},
success: function(data) {
if(fun && typeof fun == 'function') {
fun(data.data, data);
}
}
})
}
function getCityOption(parent, pid, val) { //设置省
getXCity(pid, function(data) {
var str = '<option value="">请选择城市</option>';
if(val) {
$.each(data, function(i, e) {
str += '<option value="' + e.id + '" ' + (e.id == val ? 'selected' : '') + '>' + e.areaName + '</option>'
})
} else {
$.each(data, function(i, e) {
str += '<option value="' + e.id + '">' + e.areaName + '</option>'
})
}
setOption(parent, str);
});
}
function getArea(pid, fun) { //获取县区
ajax({
type: 'get',
info: '获取县区',
url: url_getArea,
data: {
pid: pid,
level: 4
},
success: function(data) {
if(fun && typeof fun == 'function') {
fun(data.data, data);
}
}
})
}
function getAreaOption(parent, pid, val) { //设置县区
getArea(pid, function(data) {
var str = '<option value="">请选择区县</option>';
if(val) {
$.each(data, function(i, e) {
str += '<option value="' + e.id + '" ' + (e.id == val ? 'selected' : '') + '>' + e.areaName + '</option>'
})
} else {
$.each(data, function(i, e) {
str += '<option value="' + e.id + '">' + e.areaName + '</option>'
})
}
setOption(parent, str);
});
}
function ProvinceSanAdd(val) { //收货地址 省市三级联动 省、市、区默认值{p:val,c:val,a:val}
ProvinceSan($.extend(true, {
pname: 'addProvince',
cname: 'addCity',
aname: 'addArea'
}, val));
}
function ProvinceSan(val) { //省市三级联动 省、市、区默认值{p:val,c:val,a:val}
//<div class="layui-form-item">
// <label class="layui-form-label"><sup>*</sup>省市区</label>
// <div class="layui-input-inline">
// <select lay-filter="province" class="province" name="province" lay-verify="required">
// <option value="">请选择省</option>
// <option value="2">请选择省</option>
// <option value="3">请选择省</option>
// </select>
// </div>
// <div class="layui-input-inline" >
// <select lay-filter="city" class="city" name="city" lay-verify="required">
// <option value="">请选择市</option>
// </select>
// </div>
// <div class="layui-input-inline" >
// <select lay-filter="area" class="area" name="area" lay-verify="required">
// <option value="">请选择县/区</option>
// </select>
// </div>
// </div>
var val = val || {};
var pname = val.pname || 'province';
var cname = val.cname || 'city';
var aname = val.aname || 'area';
var p = $('select.' + pname);
var c = $('select.' + cname);
var a = $('select.' + aname);
// p.each(function (i,e) {
if(val.p) {
getProvinceOption(p, val.p);
if(val.c) {
getCityOption(c, val.p, val.c);
if(val.a) {
getAreaOption(a, val.c, val.a);
}
}
} else {
getProvinceOption(p);
}
// })
if(val.noEvent) {
//不监听事件
p.attr('disabled', 'disabled');
c.attr('disabled', 'disabled');
a.attr('disabled', 'disabled');
return false;
}
//监听select选择
form.on('select(' + pname + ')', function(data) {
// console.log(data.elem); //得到select原始DOM对象
// console.log(data.value); //得到被选中的值
// console.log(data.othis); //得到美化后的DOM对象
// var c = $(data.elem).parents('.layui-form-item').find('select.city');
if(c.length > 0) {
if(data.value) {
getCityOption(c, data.value);
} else {
setOption(c, '<option value="">请选择市</option>');
}
setOption(a, '<option value="">请选择县/区</option>');
}
});
form.on('select(' + cname + ')', function(data) {
// var c = $(data.elem).parents('.layui-form-item').find('select.area');
if(c.length > 0) {
if(data.value) {
getAreaOption(a, data.value);
} else {
setOption(a, '<option value="">请选择县/区</option>');
}
}
});
}
function getMenberGrade(fun) { //获取会员等级
ajax({
type: 'get',
info: '获取会员等级列表',
url: url_menberGrade,
data:{
methodType:1
},
success: function(data) {
if(fun && typeof fun == 'function') {
fun(data.data, data);
}
}
})
}
function getMenberGradeOption(parent, val) {
getMenberGrade(function(list) {
var str = '';
if(val) {
$.each(list, function(i, e) {
str += '<option value="' + i + '" ' + (i == val ? 'selected' : '') + '>' + e + '</option>'
})
} else {
$.each(list, function(i, e) {
str += '<option value="' + i + '">' + e + '</option>'
})
}
parent.html(str);
form.render('select');
});
}
function getMenberPosition(fun) { //获取职级列表
ajax({
type: 'get',
info: '获取会员职级列表',
url: url_MenberPosition,
success: function(data) {
if(fun && typeof fun == 'function') {
fun(data.data, data);
}
}
})
}
function getMenberPositionOption(parent, val) {
getMenberPosition(function(list) {
var str = '';
if(val) {
$.each(list, function(i, e) {
str += '<option value="' + i + '" ' + (i == val ? 'selected' : '') + '>' + e + '</option>'
})
} else {
$.each(list, function(i, e) {
str += '<option value="' + i + '">' + e + '</option>'
})
}
parent.html(str);
form.render('select');
});
}
function setOption(parent, str) {
parent.html(str);
form.render('select');
}
/*
获取seek 表单参数
* */
function getSeekForm(form) {
var o = {};
var f = form || $('form').eq(0);
if(f.Length < 0) {
return o;
}
var keys = $('select[name="seekType"]'),
vals = $('input[name="seekVal"]')
if(keys.length > 0) {
if(vals.length > 0) {
o[keys.val()] = vals.val();
} else {
o[keys.val()] = '';
}
}
f.find('input,select').not(keys).not(vals).each(function(i, e) {
// if($(e).attr('name')){
o[$(e).attr('name')] = $(e).val();
// }
})
return o;
}
function eachNullEmpty(o) {
if(!!!o) {
return false;
}
if(typeof o != 'object') {
return false;
}
$.each(o, function(i, e) {
if(!!!e || i == 'seekVal' || i == 'seekType') {
delete o[i];
}
});
console.log(o);
}
/*
录单
*/ | 9391820083d99cbbb0cefc2aff89c3eef4f8da74 | [
"JavaScript"
] | 3 | JavaScript | r0o0en/maotai-backstage | ecad742170d118fcb1fc419f209d3c94d0d0667f | 2f9b850935ba61c2a4e2340913a9855cde4e6808 |
refs/heads/master | <file_sep>import {Component} from '@angular/core';
@Component({
templateUrl: './app/products/products.html',
styleUrls: ['./app/products/products.css']
})
export class ProductTemplate { }<file_sep>import {Component} from '@angular/core';
@Component({
templateUrl: './app/template/template.html',
styleUrls: ['./app/template/template.css']
})
export class TemplateComponent { }<file_sep>import {UsersList} from './usersList';
export const UsersRoutes = [
{
path: '/Users',
component: UsersList
}
];<file_sep>import {Component} from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { UserService } from './userService'
import { User } from './user'
@Component({
templateUrl: './app/users/users.html',
styleUrls: ['./app/users/users.css'],
providers: [UserService]
})
export class UsersList {
constructor(private _userService: UserService) { }
users: Observable<User[]>;
ngOnInit() {
this.users = this._userService.getUsers(0);
}
}<file_sep>import {TemplateComponent} from './template';
export const TemplateRoutes = [
{
path: '/Template',
component: TemplateComponent
}
];<file_sep>//angular imports
import { bootstrap } from '@angular/platform-browser-dynamic';
import { Location, LocationStrategy, HashLocationStrategy } from '@angular/common';
import { APP_ROUTER_PROVIDERS } from './app.routes';
import { HTTP_PROVIDERS } from '@angular/http';
import { Title } from '@angular/platform-browser';
import { enableProdMode } from '@angular/core';
// project imports
import { AppComponent } from "./app";
// Extend Observable throughout the app
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/mergeMap';
import 'rxjs/add/observable/of';
import 'rxjs/add/operator/do';
import 'rxjs/add/operator/toPromise';
enableProdMode();
bootstrap(AppComponent, [
APP_ROUTER_PROVIDERS,
HTTP_PROVIDERS,
{ provide: LocationStrategy, useClass: HashLocationStrategy }
])
.catch(err => console.error(err));<file_sep>import { Component } from '@angular/core';
import { ROUTER_DIRECTIVES } from '@angular/router';
@Component({
selector: 'app-main',
templateUrl:'./app/app.html',
directives: [ROUTER_DIRECTIVES]
})
export class AppComponent {
activateTab(elem: Element) {
elem.className = 'active';
}
deactivateTab(elem: Element) {
elem.className = '';
}
}<file_sep>import { provideRouter, RouterConfig } from '@angular/router';
import { UsersRoutes } from './users/user.routes';
import { ProductRoutes } from './products/products.routes';
import { WelcomeComponent } from './welcome';
export const routes: RouterConfig = [
{ path: '/Welcome', component: WelcomeComponent },
...UsersRoutes,
...ProductRoutes
];
export const APP_ROUTER_PROVIDERS = [
provideRouter(routes)
];<file_sep>import {Injectable} from '@angular/core'
import {Http} from '@angular/http'
import {Observable} from 'rxjs/Observable'
import {User} from './user'
import 'rxjs/add/operator/map';
@Injectable()
export /**
* UserService
*/
class UserService {
constructor(private _http: Http) { }
getUsers(sinceValue: number): Observable<User[]> {
return this._http.get('https://api.github.com/users?per_page=10&since=' + sinceValue)
.map(res => <User[]>res.json());
}
getUserDetail(login: string): Observable<User> {
return this._http.get('https://api.github.com/users/' + login)
.map(res => <User>res.json());
}
getUserRepos(login: string): Observable<User> {
return this._http.get('https://api.github.com/users/' + login + '/repos')
.map(res => <User>res.json());
}
getUserFollowers(login: string, perPage: number, pageNumber: number): Observable<User> {
return this._http.get('https://api.github.com/users/' + login + '/followers?per_page=' + perPage + '&page=' + pageNumber)
.map(res => <User>res.json());
}
getUserFollowing(login: string, perPage: number, pageNumber: number): Observable<User> {
return this._http.get('https://api.github.com/users/' + login + '/following?per_page=' + perPage + '&page=' + pageNumber)
.map(res => <User>res.json());
}
}<file_sep>import {ProductTemplate} from './products';
export const ProductRoutes = [
{
path: '/Products',
component: ProductTemplate
}
]; | 64c5cfc6b1958d16f23b557ecb85879f5a0bb7f0 | [
"TypeScript"
] | 10 | TypeScript | karthikchintala64/Angular2SharePointAppWorking | cc8f22256af561f8304520df4e30f2b365270be8 | 282f875f67723a6e4913155515fd0de7045f6f5b |
refs/heads/master | <repo_name>Hari-55/My-Cat<file_sep>/app/src/main/java/com/hari/mycat/presentation/cat/CatViewModel.kt
package com.hari.mycat.presentation.cat
import androidx.lifecycle.ViewModel
import androidx.lifecycle.viewModelScope
import com.hari.mycat.domain.cat.entities.CatEntity
import com.hari.mycat.domain.cat.usecases.GetCatImageUseCase
import com.hari.mycat.utils.NetworkHelper
import com.hari.mycat.utils.Result
import com.hari.mycat.utils.resolveError
import dagger.hilt.android.lifecycle.HiltViewModel
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.*
import kotlinx.coroutines.launch
import javax.inject.Inject
@HiltViewModel
class CatViewModel @Inject constructor(
private val getCatImageUseCase: GetCatImageUseCase,
private val networkHelper: NetworkHelper
) : ViewModel() {
private val _catImage = MutableStateFlow<Result<CatEntity>>(Result.Loading)
val catImage: StateFlow<Result<CatEntity>> get() = _catImage
//If use Live Data
// private val _catImageLive= MutableLiveData<Result<CatEntity>>()
// val catImageLive : LiveData<Result<CatEntity>> get() = _catImageLive
fun getCatImage() {
viewModelScope.launch(Dispatchers.IO) {
if (networkHelper.isNetworkConnected()) {
getCatImageUseCase()
.onStart {
_catImage.value = Result.Loading
//_catImageLive.value = Result.Loading
}
.catch { e ->
if (e is Exception) _catImage.value = Result.Error(resolveError(e))
else _catImage.value = Result.Error("Maaf, mohon coba lagi")
// if (e is Exception) _catImageLive.value = Result.Error(resolveError(e))
// else _catImageLive.value = Result.Error("Maaf, mohon coba lagi")
}
.collect {
_catImage.value = it
// _catImageLive.value = it
}
} else {
_catImage.value = Result.Error("Internet Anda Mati")
}
}
}
}<file_sep>/app/src/main/java/com/hari/mycat/utils/Utils.kt
package com.hari.mycat.utils
import android.accounts.NetworkErrorException
import android.util.Log
import java.net.ConnectException
import java.net.SocketTimeoutException
import java.net.UnknownHostException
fun Int.errorCodeNetwork(moreMessage: String?) =
when (this) {
400 -> "Masalah validasi pengiriman, $moreMessage"
401 -> "Terdapat Masalah Kredensial, $moreMessage"
403 -> "Tidak berhak mengakses data, $moreMessage"
404 -> "Tidak dapat menemukan data, $moreMessage"
500 -> "Terdapat Masalah pada Server, $moreMessage"
503 -> "Maaf server sedang Maintenace, $moreMessage"
else -> "Maaf, mohon coba lagi"
}
fun resolveError(e: Exception): String =
when (e) {
is SocketTimeoutException -> "internet lambat, coba lagi"
is ConnectException -> "internet akses tidak diizinkan, coba lagi"
is UnknownHostException -> "Server sibuk, coba lagi nanti"
is NetworkErrorException -> "Masalah jaringan, coba lagi"
else -> {
Log.i("networkError", e.message ?: "")
"Maaf, mohon coba lagi"
}
}<file_sep>/app/src/main/java/com/hari/mycat/data/cat/models/mapper/CatMapper.kt
package com.hari.mycat.data.cat.models.mapper
import com.hari.mycat.data.cat.models.CatModel
import com.hari.mycat.domain.cat.entities.CatEntity
import com.hari.mycat.utils.EntityMapper
import javax.inject.Inject
import javax.inject.Singleton
@Singleton
class CatMapper @Inject constructor() : EntityMapper<CatEntity, CatModel> {
override fun mapToEntity(domainModel: CatModel) = CatEntity(
imageCat = domainModel.link
)
}<file_sep>/app/src/main/java/com/hari/mycat/domain/cat/CatRepository.kt
package com.hari.mycat.domain.cat
import com.hari.mycat.domain.cat.entities.CatEntity
import com.hari.mycat.utils.Result
import kotlinx.coroutines.flow.Flow
interface CatRepository {
fun getImageCat(): Flow<Result<CatEntity>>
}<file_sep>/app/src/main/java/com/hari/mycat/data/cat/repository/CatRepositoryImpl.kt
package com.hari.mycat.data.cat.repository
import com.hari.mycat.data.cat.datasources.netwoks.CatApi
import com.hari.mycat.data.cat.models.mapper.CatMapper
import com.hari.mycat.domain.cat.CatRepository
import com.hari.mycat.domain.cat.entities.CatEntity
import com.hari.mycat.utils.Result
import com.hari.mycat.utils.errorCodeNetwork
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flow
import kotlinx.coroutines.flow.flowOn
import javax.inject.Inject
class CatRepositoryImpl @Inject constructor(
private val catApi: CatApi,
private val catMapper: CatMapper
) : CatRepository {
override fun getImageCat(): Flow<Result<CatEntity>> = flow {
val response = catApi.getImageCat()
if (response.isSuccessful) {
emit(Result.Data(catMapper.mapToEntity(response.body()!!)))
}
if (response.code() == 400) Result.Error(response.code().errorCodeNetwork(response.errorBody()?.string()))
else Result.Error(response.code().errorCodeNetwork(""))
}.flowOn(Dispatchers.IO)
}<file_sep>/app/src/main/java/com/hari/mycat/presentation/common/extension/ViewExt.kt
package com.hari.mycat.presentation.common.extension
import android.view.View
fun View.gone() {
visibility = View.GONE
}
fun View.visible() {
visibility = View.VISIBLE
}
fun View.invisible() {
visibility = View.INVISIBLE
}
fun View.isEnable(check: Boolean) {
isEnabled = check
}<file_sep>/app/src/main/java/com/hari/mycat/presentation/common/extension/ContextExt.kt
package com.hari.mycat.presentation.common.extension
import android.app.AlertDialog
import android.content.Context
import android.widget.Toast
import com.hari.mycat.R
fun Context.showToastShort(message: String) {
Toast.makeText(this, message, Toast.LENGTH_SHORT).show()
}
fun Context.showToastLong(message: String) {
Toast.makeText(this, message, Toast.LENGTH_LONG).show()
}
fun Context.showGenericAlertDialog(message: String) {
AlertDialog.Builder(this).apply {
setMessage(message)
setPositiveButton(getString(R.string.title_ok)) { dialog, _ -> dialog.dismiss() }
}.show()
}<file_sep>/app/src/main/java/com/hari/mycat/data/cat/datasources/netwoks/CatApi.kt
package com.hari.mycat.data.cat.datasources.netwoks
import com.hari.mycat.data.cat.models.CatModel
import retrofit2.Response
import retrofit2.http.GET
interface CatApi {
@GET("img/cat")
suspend fun getImageCat(): Response<CatModel>
}<file_sep>/app/src/main/java/com/hari/mycat/data/common/modules/NetworkModule.kt
package com.hari.mycat.data.common.modules
import android.content.Context
import com.hari.mycat.BuildConfig
import dagger.Module
import dagger.Provides
import dagger.hilt.InstallIn
import dagger.hilt.android.qualifiers.ApplicationContext
import dagger.hilt.components.SingletonComponent
import okhttp3.Cache
import okhttp3.Interceptor
import okhttp3.OkHttpClient
import okhttp3.logging.HttpLoggingInterceptor
import retrofit2.Retrofit
import retrofit2.converter.gson.GsonConverterFactory
import java.io.File
import java.util.concurrent.TimeUnit
import javax.inject.Singleton
@InstallIn(SingletonComponent::class)
@Module
object NetworkModule {
@Singleton
@Provides
fun provideRetrofit(okHttpClient: OkHttpClient): Retrofit =
Retrofit.Builder().apply {
addConverterFactory(GsonConverterFactory.create())
client(okHttpClient)
baseUrl(BuildConfig.BASE_URL_CAT)
}.build()
private val READ_TIMEOUT = 30
private val WRITE_TIMEOUT = 30
private val CONNECTION_TIMEOUT = 10
private val CACHE_SIZE_BYTES = 10 * 1024 * 1024L // 10 MB
@Singleton
@Provides
fun provideOkHttpClient(
headerInterceptor: Interceptor,
cache: Cache
): OkHttpClient = OkHttpClient().newBuilder().apply {
connectTimeout(CONNECTION_TIMEOUT.toLong(), TimeUnit.SECONDS)
readTimeout(READ_TIMEOUT.toLong(), TimeUnit.SECONDS)
writeTimeout(WRITE_TIMEOUT.toLong(), TimeUnit.SECONDS)
cache(cache)
addInterceptor(headerInterceptor)
}.build()
@Singleton
@Provides
fun provideHeaderInterceptor(): Interceptor = HttpLoggingInterceptor().setLevel(HttpLoggingInterceptor.Level.BODY)
@Singleton
@Provides
internal fun provideCache(@ApplicationContext context: Context): Cache {
val httpCacheDirectory = File(context.cacheDir.absolutePath, "HttpCache")
return Cache(httpCacheDirectory, CACHE_SIZE_BYTES)
}
}<file_sep>/app/src/main/java/com/hari/mycat/presentation/cat/CatActivity.kt
package com.hari.mycat.presentation.cat
import android.os.Bundle
import android.text.TextUtils
import androidx.activity.viewModels
import androidx.appcompat.app.AppCompatActivity
import androidx.lifecycle.Lifecycle
import androidx.lifecycle.lifecycleScope
import androidx.lifecycle.repeatOnLifecycle
import com.bumptech.glide.Glide
import com.hari.mycat.R
import com.hari.mycat.databinding.ActivityCatBinding
import com.hari.mycat.presentation.common.extension.*
import com.hari.mycat.utils.Result
import dagger.hilt.android.AndroidEntryPoint
import kotlinx.coroutines.flow.collect
import kotlinx.coroutines.launch
@AndroidEntryPoint
class CatActivity : AppCompatActivity() {
private lateinit var binding: ActivityCatBinding
private val viewModel by viewModels<CatViewModel>()
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
binding = ActivityCatBinding.inflate(layoutInflater)
setContentView(binding.root)
initViews()
observeImageCat()
}
private fun initViews() {
binding.btnChangeCat.setOnClickListener {
viewModel.getCatImage()
}
}
private fun observeImageCat() {
lifecycleScope.launch {
lifecycle.repeatOnLifecycle(Lifecycle.State.RESUMED) {
viewModel.getCatImage()
viewModel.catImage.collect {
when (it) {
is Result.Loading -> renderLoading(true)
is Result.Data -> renderCatImage(it.data.imageCat)
is Result.Error -> {
renderLoading(false)
showToastLong(it.message)
}
}
}
}
}
}
//if use LiveData
// private fun observeLiveData(){
// viewModel.getCatImage()
// viewModel.catImageLive.observe(this) { result ->
// when (result) {
// is Result.Loading -> renderLoading(true)
// is Result.Data -> renderCatImage(result.data.imageCat)
// is Result.Error -> {
// renderLoading(false)
// showToastLong(result.message)
// }
// }
// }
// }
private fun renderCatImage(pathImgCat: String?) {
showToastShort(getString(R.string.title_loading_image_cat))
if (!TextUtils.isEmpty(pathImgCat)) {
Glide
.with(this)
.load(pathImgCat)
.placeholder(getDrawable(R.drawable.bg_cat_waiting))
.fallback(getDrawable(R.drawable.ic_not_found))
.error(getDrawable(R.drawable.ic_not_found))
.into(binding.ivCat)
renderLoading(false)
} else {
showToastShort(getString(R.string.title_empty_url))
}
}
private fun renderLoading(isLoading: Boolean) {
with(binding) {
if (isLoading) {
loading.visible()
btnChangeCat.isEnable(false)
} else {
loading.gone()
btnChangeCat.isEnable(true)
}
}
}
}<file_sep>/app/src/main/java/com/hari/mycat/domain/cat/usecases/GetCatImageUseCase.kt
package com.hari.mycat.domain.cat.usecases
import com.hari.mycat.domain.cat.CatRepository
import com.hari.mycat.domain.cat.entities.CatEntity
import com.hari.mycat.utils.Result
import kotlinx.coroutines.flow.Flow
import javax.inject.Inject
class GetCatImageUseCase @Inject constructor(private val catRepository: CatRepository) {
operator fun invoke(): Flow<Result<CatEntity>> = catRepository.getImageCat()
}<file_sep>/app/src/main/java/com/hari/mycat/domain/cat/entities/CatEntity.kt
package com.hari.mycat.domain.cat.entities
data class CatEntity(val imageCat: String? = null)
<file_sep>/app/src/main/java/com/hari/mycat/data/cat/models/CatModel.kt
package com.hari.mycat.data.cat.models
import com.google.gson.annotations.SerializedName
data class CatModel(
@field:SerializedName("link")
val link: String? = null
)<file_sep>/settings.gradle
rootProject.name = "MyCatStart"
include ':app'
<file_sep>/app/src/main/java/com/hari/mycat/utils/EntityMapper.kt
package com.hari.mycat.utils
interface EntityMapper<Entity, DataModel> {
fun mapToEntity(domainModel: DataModel): Entity
}<file_sep>/app/src/main/java/com/hari/mycat/utils/NetworkHelper.kt
package com.hari.mycat.utils
import android.content.Context
import android.net.ConnectivityManager
import android.net.NetworkCapabilities
import android.os.Build
import dagger.hilt.android.qualifiers.ApplicationContext
import javax.inject.Inject
import javax.inject.Singleton
@Singleton
class NetworkHelper @Inject constructor(@ApplicationContext private val context: Context) {
fun isNetworkConnected(): Boolean {
val cm = context.getSystemService(Context.CONNECTIVITY_SERVICE) as ConnectivityManager
return if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
val network = cm.activeNetwork ?: return false
val networkCapabilities = cm.getNetworkCapabilities(network) ?: return false
networkCapabilities.hasCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET) &&
networkCapabilities.hasCapability(NetworkCapabilities.NET_CAPABILITY_VALIDATED)
} else {
val status = cm.activeNetworkInfo ?: return false
status.isConnected
}
}
}<file_sep>/app/src/main/java/com/hari/mycat/data/cat/CatModule.kt
package com.hari.mycat.data.cat
import com.hari.mycat.data.cat.datasources.netwoks.CatApi
import com.hari.mycat.data.cat.models.mapper.CatMapper
import com.hari.mycat.data.cat.repository.CatRepositoryImpl
import com.hari.mycat.data.common.modules.NetworkModule
import com.hari.mycat.domain.cat.CatRepository
import dagger.Module
import dagger.Provides
import dagger.hilt.InstallIn
import dagger.hilt.components.SingletonComponent
import retrofit2.Retrofit
import javax.inject.Singleton
@InstallIn(SingletonComponent::class)
@Module(includes = [NetworkModule::class])
object CatModule {
@Singleton
@Provides
fun provideCatApi(retrofit: Retrofit): CatApi = retrofit.create(CatApi::class.java)
@Singleton
@Provides
fun provideCatRepository(
catApi: CatApi,
catMapper: CatMapper
): CatRepository = CatRepositoryImpl(catApi, catMapper)
}<file_sep>/README.md
# My-Cat
App For Learning DI using Hilt
the app was made just to show random image cat
# Hilt
check documentation hilt in here https://dagger.dev/hilt/
<file_sep>/app/src/main/java/com/hari/mycat/utils/Result.kt
package com.hari.mycat.utils
sealed class Result<out T> {
object Loading : Result<Nothing>()
data class Error(var message: String) : Result<Nothing>()
data class Data<T>(var data: T) : Result<T>()
}
| ac6b6b223e0c56c9a0e3fd4e7133a03e6f8fa7a0 | [
"Markdown",
"Kotlin",
"Gradle"
] | 19 | Kotlin | Hari-55/My-Cat | c4c9c1540d905c4e39f946516840ef3667e9ee9b | b6ee14ed1ae8c33f240943dfc4da043c4a77730c |
refs/heads/main | <file_sep>package contract
type HabrDownloaderService interface {
DownloadBestTitles() ([]string, error)
}
<file_sep>module github.com/kshvyryaev/cyber-meower-habr-worker
go 1.17
require (
github.com/pkg/errors v0.9.1
go.uber.org/zap v1.19.1
)
require (
github.com/google/wire v0.5.0
github.com/kshvyryaev/cyber-meower-proto v1.0.0
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
google.golang.org/grpc v1.42.0
)
require (
github.com/golang/protobuf v1.5.0 // indirect
github.com/kelseyhightower/envconfig v1.4.0
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
golang.org/x/text v0.3.3 // indirect
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
google.golang.org/protobuf v1.27.1 // indirect
)
<file_sep>package service
import (
"encoding/xml"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
const _rssUrl = "https://habrahabr.ru/rss/best/"
type habrRss struct {
Items []habrItem `xml:"channel>item"`
}
type habrItem struct {
Title string `xml:"title"`
}
type HabrDownloaderService struct {
}
func ProvideHabrDownloaderService() *HabrDownloaderService {
return &HabrDownloaderService{}
}
func (service *HabrDownloaderService) DownloadBestTitles() ([]string, error) {
response, err := http.Get(_rssUrl)
if err != nil {
return nil, errors.Wrap(err, "habr downloader service")
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, errors.Wrap(err, "habr downloader service")
}
rss := &habrRss{}
if err = xml.Unmarshal(body, rss); err != nil {
return nil, errors.Wrap(err, "habr downloader service")
}
titles := make([]string, 0, len(rss.Items))
for _, item := range rss.Items {
titles = append(titles, item.Title)
}
return titles, nil
}
<file_sep>package pkg
import (
"github.com/kelseyhightower/envconfig"
"github.com/pkg/errors"
)
type Config struct {
MeowerServiceAddress string `envconfig:"MEOWER_SERVICE_ADDRESS"`
}
func ProvideConfig() (*Config, error) {
var config Config
err := envconfig.Process("", &config)
if err != nil {
return nil, errors.Wrap(err, "config")
}
return &config, nil
}
<file_sep>FROM golang:1.17
WORKDIR /app
COPY go.mod ./
COPY go.sum ./
RUN go mod download
COPY /cmd ./cmd
COPY /pkg ./pkg
RUN go build -o ./cyber-meower-habr-worker ./cmd
CMD [ "./cyber-meower-habr-worker" ]<file_sep>//go:build wireinject
// +build wireinject
package di
import (
"github.com/google/wire"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/client"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/contract"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/service"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/worker"
"go.uber.org/zap"
)
func InitializeHabrDownloaderWorker(logger *zap.Logger) (*worker.HabrDownloaderWorker, func()) {
panic(wire.Build(
service.ProvideHabrDownloaderService,
wire.Bind(new(contract.HabrDownloaderService), new(*service.HabrDownloaderService)),
worker.ProvideHabrDownloaderWorker,
))
}
func InitializeHabrUploaderWorker(config *pkg.Config, logger *zap.Logger) (*worker.HabrUploaderWorker, func(), error) {
panic(wire.Build(
client.ProvideMeowerServiceGrpcConnection,
client.ProvideGrpcMeowClient,
wire.Bind(new(contract.MeowClient), new(*client.GrpcMeowClient)),
worker.ProvideHabrUploaderWorker,
))
}
<file_sep>package contract
type MeowClient interface {
Create(body string) error
}
<file_sep>package worker
import (
"sync"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/contract"
"go.uber.org/zap"
)
type HabrUploaderWorker struct {
meowClient contract.MeowClient
logger *zap.Logger
}
func ProvideHabrUploaderWorker(meowClient contract.MeowClient, logger *zap.Logger) *HabrUploaderWorker {
return &HabrUploaderWorker{
meowClient: meowClient,
logger: logger,
}
}
func (worker *HabrUploaderWorker) Run(channel chan []string, wg *sync.WaitGroup) {
defer wg.Done()
for titles := range channel {
worker.logger.Info("uploading best titles started")
for _, title := range titles {
err := worker.meowClient.Create(title)
if err != nil {
worker.logger.Error("title didn't create", zap.String("body", title), zap.Error(err))
}
worker.logger.Info("title created", zap.String("body", title))
}
worker.logger.Info("uploading best titles finished")
}
}
<file_sep>package client
import (
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
func ProvideMeowerServiceGrpcConnection(config *pkg.Config) (*grpc.ClientConn, func(), error) {
connection, err := grpc.Dial(config.MeowerServiceAddress, grpc.WithInsecure())
if err != nil {
return nil, nil, errors.Wrap(err, "meower service grpc connection")
}
cleanup := func() {
connection.Close()
}
return connection, cleanup, nil
}
<file_sep>package main
import (
"sync"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/di"
)
func main() {
config, err := pkg.ProvideConfig()
if err != nil {
panic("cannot initialize config: " + err.Error())
}
logger, loggerCleanup, err := pkg.ProvideZap()
if err != nil {
panic("cannot initialize zap logger: " + err.Error())
}
defer loggerCleanup()
downloader, downloaderCleanup := di.InitializeHabrDownloaderWorker(logger)
defer downloaderCleanup()
uploader, uploaderCleanup, err := di.InitializeHabrUploaderWorker(config, logger)
if err != nil {
panic("cannot uploader: " + err.Error())
}
defer uploaderCleanup()
wg := &sync.WaitGroup{}
wg.Add(1)
go downloader.Run(wg)
wg.Add(1)
go uploader.Run(downloader.GetChannel(), wg)
wg.Wait()
}
<file_sep># Habr worker
Worker for uploading meow messages from habr
## Build docker
`docker build --tag cyber-meower-habr-worker .`
<file_sep>package client
import (
"context"
"time"
"github.com/kshvyryaev/cyber-meower-proto/pkg/proto"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
type GrpcMeowClient struct {
client proto.MeowServiceClient
}
func ProvideGrpcMeowClient(meowerServiceConnection *grpc.ClientConn) *GrpcMeowClient {
client := proto.NewMeowServiceClient(meowerServiceConnection)
return &GrpcMeowClient{
client: client,
}
}
func (client *GrpcMeowClient) Create(body string) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
request := &proto.CreateMeowRequest{
Body: body,
}
_, err := client.client.Create(ctx, request)
if err != nil {
return errors.Wrap(err, "grpc meow client")
}
return nil
}
<file_sep>package worker
import (
"sync"
"time"
"github.com/kshvyryaev/cyber-meower-habr-worker/pkg/contract"
"go.uber.org/zap"
)
type HabrDownloaderWorker struct {
habrDownloader contract.HabrDownloaderService
logger *zap.Logger
channel chan []string
}
func ProvideHabrDownloaderWorker(
habrDownloader contract.HabrDownloaderService,
logger *zap.Logger) (*HabrDownloaderWorker, func()) {
channel := make(chan []string)
cleanup := func() {
close(channel)
}
return &HabrDownloaderWorker{
habrDownloader: habrDownloader,
logger: logger,
channel: channel,
}, cleanup
}
func (worker *HabrDownloaderWorker) Run(wg *sync.WaitGroup) {
defer wg.Done()
// TODO: Move it to configuration
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
worker.logger.Info("downloading best titles started")
titles, err := worker.habrDownloader.DownloadBestTitles()
if err != nil {
worker.logger.Error("downloading best titles finished with errors: ", zap.Error(err))
}
worker.logger.Info("downloading best titles finished succesfuly")
worker.channel <- titles
worker.logger.Info("best titles sent to channel")
}
}
func (worker *HabrDownloaderWorker) GetChannel() chan []string {
return worker.channel
}
| 8955371ea759e3df01b0896db60fb7792fa5b524 | [
"Markdown",
"Go Module",
"Go",
"Dockerfile"
] | 13 | Go | kshvyryaev/cyber-meower-habr-worker | 283a182097d6e57417f8a9f4019a7284f011627e | 5557fdfed63d51c7f986a81d9cd4df8176679516 |
refs/heads/master | <file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class SoundManager : MonoBehaviour
{
public static SoundManager soundManager = null;
public AudioSource loopSfx;
public AudioSource singleSfx;
public AudioSource bgSfx;
void Awake()
{
//Check if there is already an instance of SoundManager
if (soundManager == null)
//if not, set it to this.
soundManager = this;
//If instance already exists:
else if (soundManager != this)
//Destroy this, this enforces our singleton pattern so there can only be one instance of SoundManager.
Destroy(gameObject);
//Set SoundManager to DontDestroyOnLoad so that it won't be destroyed when reloading our scene.
DontDestroyOnLoad(gameObject);
}
public void PlayLoopingSfx(AudioClip clip)
{
loopSfx.clip = clip;
loopSfx.Play();
}
public void PlaySingleSfx(AudioClip clip)
{
singleSfx.clip = clip;
singleSfx.Play();
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;
public class MainMenu : MonoBehaviour
{
public MainMenu menuInstance = null;
public GameObject[] subMenus;
public Component[] buttons;
public GameObject currentMenu;
public GameObject previousMenu;
private void Awake()
{
if (menuInstance == null)
menuInstance = this;
else if (menuInstance != this)
Destroy(gameObject);
DontDestroyOnLoad(gameObject);
}
private void OnEnable()
{
buttons = GetComponentsInChildren<Button>();
currentMenu = this.gameObject;
previousMenu = this.gameObject;
}
public void ShowInventory()
{
subMenus[0].SetActive(true);
currentMenu = subMenus[0];
foreach (Button b in buttons)
{
b.interactable = false;
}
// Fill in the text.
Inventory inventory = FindObjectOfType<Inventory>();
// List<IInventoryItem> items = inventory.GetItems();
List<string> items = inventory.GetItems();
subMenus[0].GetComponentInChildren<Text>().text = "";
//foreach (IInventoryItem i in items)
foreach (string i in items)
{
if (i != null)
//subMenus[0].GetComponentInChildren<Text>().text = subMenus[0].GetComponentInChildren<Text>().text + "- " + i.name + "\n";
subMenus[0].GetComponentInChildren<Text>().text = subMenus[0].GetComponentInChildren<Text>().text + "- " + i + "\n";
}
}
public void QuitGame()
{
Application.Quit();
}
public void SaveGame()
{
if (GameManager.instance.SaveGame())
{
subMenus[1].SetActive(true);
currentMenu = subMenus[1];
}
}
// In the event of future other menus that have buttons, this will probably be easier with base Menu class containing buttons
public void DisableSubmenu()
{
currentMenu.SetActive(false);
currentMenu = previousMenu;
foreach (Button b in buttons)
{
b.interactable = true;
}
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using Yarn.Unity;
[System.Serializable]
public class Save
{
public string currentScene = "";
public List<string> currentInventory = new List<string>();
public float playerX = 0;
public float playerY = 0;
public List<ExampleVariableStorage.SaveVariable> dialogueVars = new List<ExampleVariableStorage.SaveVariable>();
public IEnumerable<string> nodesVisited;
public List<SceneItemManager.LoadCondition> sceneVars = new List<SceneItemManager.LoadCondition>();
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using Yarn.Unity;
namespace Yarn.Unity.Example
{
public class Vegetable : IInventoryItem
{
public AudioClip pickupSfx;
public string Name
{
get
{
return "Vegetable";
}
}
public override void OnPickup()
{
base.OnPickup();
FindObjectOfType<DialogueRunner>().GetComponent<ExampleVariableStorage>().SetValue("$taken_veg", new Yarn.Value(true));
SceneItemManager.sceneItems.loadConditionStatuses[loadConditionVar] = false;
SoundManager.soundManager.PlaySingleSfx(pickupSfx);
}
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
namespace Yarn.Unity.Example
{
public class NPCWander : MonoBehaviour
{
[SerializeField]
private Animator animator;
private Transform thisTransform;
private Vector3 startPos;
private Rigidbody2D rb;
// Movement speed of the object
public float moveSpeed = 0.2f;
// Min and max time for taking a decision
public Vector2 decisionTime = new Vector2(1, 4);
private float decisionTimeCount = 0;
private bool colliding = false;
// Possible directions to move in
public Vector3[] moveDirections = new Vector3[] { Vector3.right, Vector3.left, Vector3.zero, Vector3.zero };
private int currentMoveDirection;
// Start is called before the first frame update
void Start()
{
// Cache the transform for quicker access
thisTransform = this.transform;
startPos = thisTransform.position;
rb = gameObject.GetComponent<Rigidbody2D>();
// Set random time delay for taking a decision
decisionTimeCount = Random.Range(decisionTime.x, decisionTime.y);
ChooseMoveDirection();
}
//Prevent the NPC continuing to try and wander if for example the player is standing in the way.
private void OnCollisionEnter2D(Collision2D collision)
{
colliding = true;
}
private void OnCollisionExit2D(Collision2D collision)
{
colliding = false;
}
// Update is called once per frame
void Update()
{
// Wait a random number of seconds
// Walk left
// Wait
// Walk right
// Remove all player control when we're in dialogue
if (FindObjectOfType<DialogueRunner>().isDialogueRunning == true || colliding)
{
return;
}
switch(currentMoveDirection)
{
case 0: // right
if (thisTransform.position.x <= startPos.x + 2.0f)
{
animator.SetInteger("DirectionX", 1);
animator.SetInteger("DirectionY", 0);
thisTransform.position += moveDirections[currentMoveDirection] * Time.deltaTime * moveSpeed;
}
else
decisionTimeCount = 0;
break;
case 1: // left
if (thisTransform.position.x >= startPos.x - 2.0f)
{
animator.SetInteger("DirectionX", -1);
animator.SetInteger("DirectionY", 0);
thisTransform.position += moveDirections[currentMoveDirection] * Time.deltaTime * moveSpeed;
}
else
decisionTimeCount = 0;
break;
default:
break;
}
// Move in chosen direction at the set speed
//if (thisTransform.position.x >= startPos.x - 2.0f && thisTransform.position.x <= startPos.x + 2.0f)
// thisTransform.position += moveDirections[currentMoveDirection] * Time.deltaTime * moveSpeed;
if (decisionTimeCount > 0)
decisionTimeCount -= Time.deltaTime;
else
{
animator.SetInteger("DirectionX", 0);
animator.SetInteger("DirectionY", 1);
// Choose a random time delay for taking a decision
decisionTimeCount = Random.Range(decisionTime.x, decisionTime.y);
// Choose a movement direction or stay in place
ChooseMoveDirection();
}
}
void ChooseMoveDirection()
{
currentMoveDirection = Mathf.FloorToInt(Random.Range(0, moveDirections.Length));
}
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.SceneManagement;
public class SceneItemManager : MonoBehaviour
{
public static SceneItemManager sceneItems = null;
[System.Serializable]
public class ItemCondition
{
public string sceneName;
public string loadIfThisVarTrue;
}
[System.Serializable]
public class LoadCondition
{
public string loadIfThisVarTrue;
public bool varStatus;
}
public ItemCondition[] loadWith;
public Dictionary<string, bool> loadConditionStatuses = new Dictionary<string, bool>(); // this needs to be saved
private void Awake()
{
sceneItems = this;
foreach (ItemCondition ic in loadWith)
{
loadConditionStatuses.Add(ic.loadIfThisVarTrue, true);
}
}
// Start is called before the first frame update
void Start()
{
foreach (ItemCondition ic in loadWith)
{
if (loadConditionStatuses[ic.loadIfThisVarTrue])
SceneManager.LoadScene(ic.sceneName, LoadSceneMode.Additive);
}
}
// Update is called once per frame
void Update()
{
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class IInventoryItem : MonoBehaviour
{
string Name { get; }
public string loadConditionVar;
public virtual void OnPickup()
{
gameObject.SetActive(false);
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using Yarn.Unity;
public class Inventory : MonoBehaviour
{
public static Inventory invInstance = null;
private const int SLOTS = 9;
//private List<IInventoryItem> mItems = new List<IInventoryItem>();
private List<string> mItems = new List<string>();
private void Awake()
{
if (invInstance == null)
invInstance = this;
else if (invInstance != this)
Destroy(gameObject);
DontDestroyOnLoad(gameObject);
}
public void AddItem(IInventoryItem item)
{
if (mItems.Count < SLOTS)
{
// mItems.Add(item);
mItems.Add(item.name);
item.OnPickup();
}
}
public void LoadItem(string item)
{
mItems.Add(item);
}
[YarnCommand("giveitem")]
public void RemoveItem(string itemToRemove)
{
//foreach (IInventoryItem i in mItems)
foreach (string i in mItems)
{
//if (i.name == itemToRemove)
if (i == itemToRemove)
{
mItems.Remove(i);
return;
}
}
Debug.Log(string.Format("Cannot find item {0} to remove!", itemToRemove));
}
public void EmptyInventory()
{
mItems.Clear();
}
//public List<IInventoryItem> GetItems()
//{
// return mItems;
//}
public List<string> GetItems()
{
return mItems;
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using Yarn.Unity;
//namespace Yarn.Unity.Example
//{
public class Player : MonoBehaviour
{
public static Player playerInstance;
public float speed = 1.0f;
[SerializeField]
private Animator animator;
public float interactionRadius = 2.0f;
public Inventory inventory;
//private void Awake()
//{
// if (playerInstance == null)
// playerInstance = this;
// else if (playerInstance != this)
// Destroy(gameObject);
// DontDestroyOnLoad(gameObject);
//}
private void Start()
{
inventory = FindObjectOfType<Inventory>();
}
/// Draw the range at which we'll start talking to people.
void OnDrawGizmosSelected()
{
Gizmos.color = Color.blue;
// Flatten the sphere into a disk, which looks nicer in 2D games
Gizmos.matrix = Matrix4x4.TRS(transform.position, Quaternion.identity, new Vector3(1, 1, 0));
// Need to draw at position zero because we set position in the line above
Gizmos.DrawWireSphere(Vector3.zero, interactionRadius);
}
void FixedUpdate()
{
// Remove all player control when we're in dialogue
if (FindObjectOfType<DialogueRunner>().isDialogueRunning == true)
{
animator.SetInteger("DirectionX", 0);
animator.SetInteger("DirectionY", 0);
return;
}
float moveHorizontal = Input.GetAxis("Horizontal");
float moveVertical = Input.GetAxis("Vertical");
Vector2 currentVelocity = gameObject.GetComponent<Rigidbody2D>().velocity;
float newVelocityX = 0f;
if (moveHorizontal < 0 && currentVelocity.x <= 0)
{
newVelocityX = -speed;
animator.SetInteger("DirectionX", -1);
}
else if (moveHorizontal > 0 && currentVelocity.x >= 0)
{
newVelocityX = speed;
animator.SetInteger("DirectionX", 1);
}
else
{
animator.SetInteger("DirectionX", 0);
}
float newVelocityY = 0f;
if (moveVertical < 0 && currentVelocity.y <= 0)
{
newVelocityY = -speed;
animator.SetInteger("DirectionY", -1);
}
else if (moveVertical > 0 && currentVelocity.y >= 0)
{
newVelocityY = speed;
animator.SetInteger("DirectionY", 1);
}
else
{
animator.SetInteger("DirectionY", 0);
}
gameObject.GetComponent<Rigidbody2D>().velocity = new Vector2(newVelocityX, newVelocityY);
// Detect if we want to start a conversation
if (Input.GetKeyDown(KeyCode.Space))
{
CheckForNearbyNPC();
if (FindObjectOfType<DialogueRunner>().isDialogueRunning == false)
CheckForNearbyItem();
}
}
/// Find all DialogueParticipants
/** Filter them to those that have a Yarn start node and are in range;
* then start a conversation with the first one
*/
public void CheckForNearbyNPC()
{
var allParticipants = new List<NPC>(FindObjectsOfType<NPC>());
var target = allParticipants.Find(delegate (NPC p) {
return string.IsNullOrEmpty(p.talkToNode) == false && // has a conversation node?
(p.transform.position - this.transform.position)// is in range?
.magnitude <= interactionRadius;
});
if (target != null)
{
// Kick off the dialogue at this node.
FindObjectOfType<DialogueRunner>().StartDialogue(target.talkToNode);
}
}
public void CheckForNearbyItem()
{
var allItems = new List<IInventoryItem>(FindObjectsOfType<IInventoryItem>());
var target = allItems.Find(delegate (IInventoryItem i)
{
return (i.transform.position - this.transform.position)// is in range?
.magnitude <= interactionRadius;
});
if (target != null)
{
inventory.AddItem(target);
}
}
}
//}
<file_sep># ChattingTheRPG
A small RPG overworld in Unity testing the Yarn Spinner dialogue engine, inventory system, and save system.
The folders include the YarnSpinner dialogue engine used for conversations within the project, featuring my own extensions. The extended versions of the files can be found within the Examples/DemoScripts as tutorial extensions, including the sprite holder addition in the UI for character portraits, the skipping of text and sound effects within the UI, and making stored dialogue variables serializeable.
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using Yarn.Unity;
using System.Runtime.Serialization.Formatters.Binary;
using System.IO;
using UnityEngine.SceneManagement;
public class GameManager : MonoBehaviour
{
public static GameManager instance = null;
public GameObject mainMenu;
private Inventory inventory;
private Player player;
private ExampleVariableStorage dialogueVars;
private AsyncOperation asyncLoadLevel;
private bool loadingGame = false;
public void Awake()
{
if (instance == null)
instance = this;
else if (instance != this)
Destroy(gameObject);
DontDestroyOnLoad(gameObject);
inventory = FindObjectOfType<Inventory>();
player = FindObjectOfType<Player>();
dialogueVars = FindObjectOfType<ExampleVariableStorage>();
SceneManager.sceneLoaded += OnSceneLoaded;
}
public bool SaveGame()
{
// Create a save instance with all data for the current session saved into it
Save save = CreateSaveGameObject();
// Pass path for the Save to be saved to with .save file extension
BinaryFormatter bf = new BinaryFormatter();
FileStream file = File.Create(Application.persistentDataPath + "/gamesave.save");
bf.Serialize(file, save);
file.Close();
return true;
}
public void LoadGame()
{
// Check that the save file actually exists- only one save file ever at a time here
if (File.Exists(Application.persistentDataPath + "/gamesave.save"))
{
inventory.EmptyInventory();
dialogueVars.ResetToDefaults();
mainMenu.SetActive(false);
// Provide binary formatter a stream of bytes to read and create a save object from
BinaryFormatter bf = new BinaryFormatter();
FileStream file = File.Open(Application.persistentDataPath + "/gamesave.save", FileMode.Open);
Save save = (Save)bf.Deserialize(file);
file.Close();
// Convert save game information into actual game state
loadingGame = true;
//SceneManager.LoadScene(save.currentScene);
StartCoroutine(LoadScene(save));
for (int i = 0; i < save.currentInventory.Count; i++)
{
//inventory.AddItem(save.currentInventory[i]);
inventory.LoadItem(save.currentInventory[i]);
}
//player.transform.position = new Vector3(save.playerX, save.playerY);
//for (int i = 0; i < save.dialogueVars.Count; i++)
//{
// object value;
// ExampleVariableStorage.SaveVariable variable = save.dialogueVars[i];
// switch (variable.type)
// {
// case Yarn.Value.Type.Number:
// value = variable.valueAsFloat;
// break;
// case Yarn.Value.Type.String:
// value = variable.valueAsString;
// break;
// case Yarn.Value.Type.Bool:
// value = variable.valueAsBool;
// break;
// case Yarn.Value.Type.Null:
// value = null;
// break;
// default:
// throw new System.ArgumentOutOfRangeException();
// }
// var v = new Yarn.Value(value);
// dialogueVars.SetValue(variable.name, v);
//}
//FindObjectOfType<DialogueRunner>().dialogue.visitedNodes = save.nodesVisited;
}
else
{
Debug.Log("No game saved!");
}
}
IEnumerator LoadScene(Save save)
{
asyncLoadLevel = SceneManager.LoadSceneAsync(save.currentScene);
while (!asyncLoadLevel.isDone)
{
yield return null;
}
}
private Save CreateSaveGameObject()
{
Save save = new Save();
//foreach (IInventoryItem item in inventory.GetItems())
foreach (string item in inventory.GetItems())
{
save.currentInventory.Add(item);
}
foreach (KeyValuePair<string, Yarn.Value> entry in dialogueVars.GetAllVariables())
{
ExampleVariableStorage.SaveVariable var = new ExampleVariableStorage.SaveVariable();
var.name = entry.Key;
var.type = entry.Value.type;
var.valueAsBool = entry.Value.AsBool;
var.valueAsFloat = entry.Value.AsNumber;
var.valueAsString = entry.Value.AsString;
save.dialogueVars.Add(var);
}
foreach (KeyValuePair<string, bool> condition in SceneItemManager.sceneItems.loadConditionStatuses)
{
SceneItemManager.LoadCondition var = new SceneItemManager.LoadCondition();
var.loadIfThisVarTrue = condition.Key;
var.varStatus = condition.Value;
save.sceneVars.Add(var);
}
save.currentScene = SceneManager.GetActiveScene().name;
save.playerX = player.transform.position.x;
save.playerY = player.transform.position.y;
save.nodesVisited = FindObjectOfType<DialogueRunner>().dialogue.visitedNodes;
return save;
}
// Update is called once per frame
void Update()
{
// Don't allow menuing for now if in dialogue.
if (FindObjectOfType<DialogueRunner>().isDialogueRunning == true)
{
return;
}
if (Input.GetKeyDown(KeyCode.Escape))
{
// Main menu is not shown, so show it
if (!mainMenu.activeInHierarchy)
{
mainMenu.SetActive(true);
}
// Main menu is showing and is the only menu
else if (mainMenu.activeInHierarchy && mainMenu.GetComponent<MainMenu>().currentMenu == mainMenu)
{
mainMenu.SetActive(false);
}
// Main menu is showing and is not the current menu
else
{
mainMenu.GetComponent<MainMenu>().DisableSubmenu();
}
}
}
void OnSceneLoaded(Scene scene, LoadSceneMode mode)
{
if (loadingGame)
{
// Provide binary formatter a stream of bytes to read and create a save object from
BinaryFormatter bf = new BinaryFormatter();
FileStream file = File.Open(Application.persistentDataPath + "/gamesave.save", FileMode.Open);
Save save = (Save)bf.Deserialize(file);
file.Close();
foreach (var condition in save.sceneVars)
{
SceneItemManager.sceneItems.loadConditionStatuses[condition.loadIfThisVarTrue] = condition.varStatus;
}
player = FindObjectOfType<Player>();
player.transform.position = new Vector3(save.playerX, save.playerY);
dialogueVars = FindObjectOfType<ExampleVariableStorage>();
for (int i = 0; i < save.dialogueVars.Count; i++)
{
object value;
ExampleVariableStorage.SaveVariable variable = save.dialogueVars[i];
switch (variable.type)
{
case Yarn.Value.Type.Number:
value = variable.valueAsFloat;
break;
case Yarn.Value.Type.String:
value = variable.valueAsString;
break;
case Yarn.Value.Type.Bool:
value = variable.valueAsBool;
break;
case Yarn.Value.Type.Null:
value = null;
break;
default:
throw new System.ArgumentOutOfRangeException();
}
var v = new Yarn.Value(value);
dialogueVars.SetValue(variable.name, v);
}
FindObjectOfType<DialogueRunner>().dialogue.visitedNodes = save.nodesVisited;
loadingGame = false;
}
}
}
| 5007325a420712034d42ea8487b7572600e0d590 | [
"Markdown",
"C#"
] | 11 | C# | eleanoot/ChattingTheRPG | ba07dd2eaf07e0dbe2c7a53ec11f6c2e7ae2cc43 | 9173def3480a3eae652d0c78ceed56d4c5b38402 |
refs/heads/master | <repo_name>sayyar-source/ASP.NET-Core-Health-Checks-Exampels<file_sep>/HealthCheckexample/Health/TestHealthCheckWithArgs.cs
using Microsoft.Extensions.Diagnostics.HealthChecks;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
namespace HealthCheckexample.Health
{
public class TestHealthCheckWithArgs : IHealthCheck
{
public TestHealthCheckWithArgs(int a,string s,bool b)
{
A = a;
S = s;
B = b;
}
public int A { get; set; }
public string S { get; set; }
public bool B { get; set; }
public Task<HealthCheckResult> CheckHealthAsync(HealthCheckContext context, CancellationToken cancellationToken = default)
{
if(A>10 && S!=string.Empty && B==true)
{
return Task.FromResult(HealthCheckResult.Healthy("Healthy"));
}
return Task.FromResult(HealthCheckResult.Unhealthy("Unhealthy"));
}
}
}
<file_sep>/README.md
# ASP.NET-Core-Health-Checks-Exampels
Health Checks to the rescue! Before trying out the code yourself
:point_right: After the app is running, check the health status by making a request to the /health endpoint in a browser.
for example:
:house: https://localhost:44398/health
<file_sep>/HealthCheckexample/CatalogContext.cs
using Microsoft.EntityFrameworkCore;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace HealthCheckexample
{
public class CatalogContext:DbContext
{
public CatalogContext(DbContextOptions<CatalogContext> options)
: base(options)
{ }
}
}
| abd3cd5bfd1c6f331a4d64c8dbd88633b2fc0dfa | [
"Markdown",
"C#"
] | 3 | C# | sayyar-source/ASP.NET-Core-Health-Checks-Exampels | cdf37d6b7f3e53b1c3ae29682b28996cfab01751 | a3e32a69682814fa4a1573ab3562ac35806bc709 |
refs/heads/master | <repo_name>liuq123/repos3<file_sep>/spring/src/main/java/com/liu/spring/model/MutablePropertyValues.java
package com.liu.spring.model;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class MutablePropertyValues implements PropertyValues {
private List<String> propertyName;
private Map<String, Object> valueMap;
private List<String> refPropertyName;
public MutablePropertyValues() {
super();
this.propertyName = new ArrayList<String>();
this.valueMap = new HashMap<String, Object>();
refPropertyName = new ArrayList<String>();
}
public List<String> getPropertyName() {
return propertyName;
}
public Map<String, Object> getValueMap() {
return valueMap;
}
public void addProName(String name) {
this.propertyName.add(name);
}
public void addValue(String name, String value) {
this.valueMap.put(name, value);
}
public void addRefName(String name) {
this.refPropertyName.add(name);
}
public boolean isContainRefName(String name) {
return this.refPropertyName.contains(name);
}
public List<String> getRefPropertyName() {
return refPropertyName;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/support/ConfigurablePropertyAccessor.java
package com.liu.mvc.support;
/**
* 配置文件解析接口
* @author Administrator
*
*/
public interface ConfigurablePropertyAccessor {
}
<file_sep>/spring/src/main/java/com/liu/spring/autowired/AutowiredSupport.java
package com.liu.spring.autowired;
import com.liu.spring.context.AbstractApplicationContext;
public interface AutowiredSupport {
public void injection(AbstractApplicationContext abstractApplicationContext);
}
<file_sep>/spring/src/main/java/com/liu/spring/context/AbstractApplicationContext.java
package com.liu.spring.context;
import javax.servlet.ServletContext;
import com.liu.mvc.context.WebApplicationContext;
import com.liu.spring.autowired.AutowiredSupport;
import com.liu.spring.autowired.DefaultAutowiredSupport;
import com.liu.spring.factory.AbstractBeanFactory;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.factory.DefaultBeanFactory;
import com.liu.spring.factory.RealyAbstractBeanFactory;
import com.liu.spring.model.Resource;
import com.liu.spring.parser.DefualtXmlReader;
import com.liu.spring.parser.DefultNamespaceHandlerResolver;
import com.liu.spring.parser.NamespaceHandlerResolver;
import com.liu.spring.reflex.ReflexImpl;
import com.liu.spring.register.BeanDefinitionRegisterWare;
import com.liu.spring.register.SingleRegist;
import com.liu.spring.register.SingleRegistImpl;
import com.liu.spring.util.ClassUtil;
import com.liu.spring.util.ProdecObjectFactory;
import com.liu.spring.ware.ResourceLocation;
import com.liu.spring.ware.XmlResourceLocation;
public abstract class AbstractApplicationContext implements WebApplicationContext {
private static final String NamespaceHandlerResolver = null;
/**
* 父类容器
*/
private ApplicationContext parentContext;
/**
* 路径解析器
*/
private ResourceLocation xmlResourceLocation;
/**
* 配置文件路径
*/
private String[] configs;
/**
* beanBeanDefinition工厂
*/
private BeanFactory beanFactory;
/**
* 真正的bean对象工厂
*/
private BeanFactory realybeanFactory;
private NamespaceHandlerResolver namespaceHandlerResolver;
private AutowiredSupport autowiredSupport;
private ServletContext servletContext;
// 容器是否启动过
protected boolean isStart = false;
public Resource resolve(String locations) {
return xmlResourceLocation.resolve(locations);
}
public void setConfigLocations(String... configLocation) {
this.configs = new String[configLocation.length];
for (int i = 0; i < configLocation.length; i++) {
this.configs[i] = configLocation[i];
}
}
public void refash() {
if (isStart) {
return;
}
prepare();
prepareBeanFactory();
isStart = true;
}
public void prepareBeanFactory() {
BeanFactory beanDefinition = getBeanDefinition();
initParse(new DefultNamespaceHandlerResolver());
/**
* 开始注册beanDefinition对象
*/
loadBeanDefinitions(beanDefinition);
}
public void loadBeanDefinitions(BeanFactory beanFactory) {
ResourceLocation resourceLocation = getResourceLocation();
Resource[] configResources = getConfigResources(resourceLocation);
/**
* 注册解析器
*/
DefualtXmlReader parse = new DefualtXmlReader(beanFactory);
initReader(parse);
for (Resource re : configResources) {
parse.loadBeanDefinition(re);
}
System.out.println("注册完成!");
/**
* 带注解的类进行依赖注入
*
*/
autowiredSupport.injection(this);
}
public void initReader(DefualtXmlReader reader) {
reader.setReflex(new ReflexImpl());
reader.setRegisterWare(new BeanDefinitionRegisterWare());
}
/**
* 初始化一些需要的对象
*/
public void prepare() {
this.beanFactory = this.CreateBeanFactory(DefaultBeanFactory.class);
this.realybeanFactory = this.CreateBeanFactory(RealyAbstractBeanFactory.class);
this.xmlResourceLocation = new XmlResourceLocation();
this.autowiredSupport = new DefaultAutowiredSupport();
}
public void initParse(NamespaceHandlerResolver namespaceHandlerResolver) {
namespaceHandlerResolver.loadParser(beanFactory);
}
private BeanFactory CreateBeanFactory(Class<?> cls) {
AbstractBeanFactory fco = (AbstractBeanFactory) ProdecObjectFactory.prodecObject(cls);
fco.setApplicationContext(this);
return fco;
}
public BeanFactory getBeanDefinition() {
return this.beanFactory;
}
public ResourceLocation getResourceLocation() {
return this.xmlResourceLocation;
}
public Resource[] getConfigResources(ResourceLocation resourceLocation) {
Resource[] resou = new Resource[this.configs.length];
for (int i = 0; i < this.configs.length; i++) {
resou[i] = resourceLocation.resolve(this.configs[i]);
}
return resou;
}
public Object getBean(String name) {
return getSingleRegistWare(null).getBean(name, beanFactory, realybeanFactory);
}
public <T> T getBean(String name, Class<T> cls) {
return (T) getSingleRegistWare(null).getBean(cls, beanFactory, realybeanFactory);
}
public SingleRegist getSingleRegistWare(ApplicationContext applicationContext) {
if (applicationContext == null) {
return new SingleRegistImpl(applicationContext);
} else {
return new SingleRegistImpl(applicationContext);
}
}
public BeanFactory getBeanFactory() {
return beanFactory;
}
public BeanFactory getRealybeanFactory() {
return realybeanFactory;
}
public ServletContext getServletContext() {
return servletContext;
}
public void setServletContext(ServletContext servletContext) {
this.servletContext = servletContext;
}
@Override
public void setParentContext(ApplicationContext applicationContext) {
this.parentContext = applicationContext;
}
@Override
public ApplicationContext getParentApplicationContext() {
return this.parentContext;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/utils/CollectionUtis.java
package com.liu.mvc.utils;
import java.util.List;
public class CollectionUtis {
public static Object[] listToArray(List<Object> list) {
Object[] vals = new Object[list.size()];
vals[0] = "1";
vals[1] = "2";
for (int i=0;i<list.size();i++) {
vals[i] = list.get(i);
}
return vals;
}
}
<file_sep>/spring/src/main/java/com/liu/spring/util/StringUtil.java
package com.liu.spring.util;
public class StringUtil {
public static String toLowerCaseFirstOne(String s) {
// 首字母转小写
if (Character.isLowerCase(s.charAt(0)))
return s;
else
return (new StringBuilder()).append(Character.toLowerCase(s.charAt(0))).append(s.substring(1)).toString();
}
// 首字母转大写
public static String toUpperCaseFirstOne(String s) {
if (Character.isUpperCase(s.charAt(0)))
return s;
else
return (new StringBuilder()).append(Character.toUpperCase(s.charAt(0))).append(s.substring(1)).toString();
}
/**
* 返回类名 首字母是小写
* @param clssName
* @return
*/
public static String getLowerCaseFirstOneBeanName(String clssName) {
String e = clssName.substring(clssName.lastIndexOf(".")+1, clssName.length());
return toLowerCaseFirstOne(e);
}
/**
* 判断是否为空
*
* @param str
* @return
*/
public static boolean isNullOrEmpty(Object obj) {
if (null == obj || "".equals(obj.toString().trim())
|| obj.toString().toUpperCase().equals("NULL")) {
return true;
}
return false;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/resolver/HandlerAdapter.java
package com.liu.mvc.resolver;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.liu.mvc.beans.ModelAndView;
public interface HandlerAdapter {
boolean supports(Object handler);
ModelAndView handle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception;
long getLastModified(HttpServletRequest request, Object handler);
}
<file_sep>/spring/src/main/java/com/liu/mvc/beans/ModelMap.java
package com.liu.mvc.beans;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
public class ModelMap extends LinkedHashMap<String, Object> {
/**
* Construct a new, empty {@code ModelMap}.
*/
public ModelMap() {
}
/**
* Construct a new {@code ModelMap} containing the supplied attribute
* under the supplied name.
* @see #addAttribute(String, Object)
*/
public ModelMap(String attributeName, Object attributeValue) {
addAttribute(attributeName, attributeValue);
}
/**
* Construct a new {@code ModelMap} containing the supplied attribute.
* Uses attribute name generation to generate the key for the supplied model
* object.
* @see #addAttribute(Object)
*/
public ModelMap(Object attributeValue) {
addAttribute(attributeValue);
}
/**
* Add the supplied attribute under the supplied name.
* @param attributeName the name of the model attribute (never {@code null})
* @param attributeValue the model attribute value (can be {@code null})
*/
public ModelMap addAttribute(String attributeName, Object attributeValue) {
put(attributeName, attributeValue);
return this;
}
/**
* Add the supplied attribute to this {@code Map} using a
* {@link org.springframework.core.Conventions#getVariableName generated name}.
* <p><emphasis>Note: Empty {@link Collection Collections} are not added to
* the model when using this method because we cannot correctly determine
* the true convention name. View code should check for {@code null} rather
* than for empty collections as is already done by JSTL tags.</emphasis>
* @param attributeValue the model attribute value (never {@code null})
*/
public ModelMap addAttribute(Object attributeValue) {
if (attributeValue instanceof Collection && ((Collection<?>) attributeValue).isEmpty()) {
return this;
}
return addAttribute((String)attributeValue, attributeValue);
}
/**
* Copy all attributes in the supplied {@code Collection} into this
* {@code Map}, using attribute name generation for each element.
* @see #addAttribute(Object)
*/
public ModelMap addAllAttributes( Collection<?> attributeValues) {
if (attributeValues != null) {
for (Object attributeValue : attributeValues) {
addAttribute(attributeValue);
}
}
return this;
}
/**
* Copy all attributes in the supplied {@code Map} into this {@code Map}.
* @see #addAttribute(String, Object)
*/
public ModelMap addAllAttributes( Map<String, ?> attributes) {
if (attributes != null) {
putAll(attributes);
}
return this;
}
/**
* Copy all attributes in the supplied {@code Map} into this {@code Map},
* with existing objects of the same name taking precedence (i.e. not getting
* replaced).
*/
public ModelMap mergeAttributes(Map<String, ?> attributes) {
if (attributes != null) {
attributes.forEach((key, value) -> {
if (!containsKey(key)) {
put(key, value);
}
});
}
return this;
}
/**
* Does this model contain an attribute of the given name?
* @param attributeName the name of the model attribute (never {@code null})
* @return whether this model contains a corresponding attribute
*/
public boolean containsAttribute(String attributeName) {
return containsKey(attributeName);
}
}
<file_sep>/spring/src/main/java/com/liu/spring/parser/XmlReader.java
package com.liu.spring.parser;
import com.liu.spring.model.Resource;
public interface XmlReader extends SourceReader{
public void loadBeanDefinition(Resource re) ;
}
<file_sep>/spring/src/main/java/com/liu/spring/parser/NamespaceHandlerSupport.java
package com.liu.spring.parser;
import java.util.HashMap;
import java.util.Map;
import com.liu.spring.factory.BeanFactory;
public abstract class NamespaceHandlerSupport implements NamespaceHandler {
public final Map<String, XmlParser> parsers = new HashMap<String, XmlParser>();
public void registerBeanDefinitionParser(String parseName, XmlParser pareser) {
parsers.put(parseName, pareser);
}
public Map<String, XmlParser> getParsers() {
return parsers;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/beans/BeansUtil.java
package com.liu.mvc.beans;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import com.liu.mvc.context.GenericWebApplicationContext;
import com.liu.mvc.mapping.HandlerMapping;
import com.liu.mvc.mapping.RequestMappingHandlerMapping;
import com.liu.spring.context.ApplicationContext;
import com.liu.spring.factory.DefaultBeanFactory;
import com.liu.spring.factory.RealyAbstractBeanFactory;
import com.liu.spring.model.BeanDefinition;
import com.liu.spring.model.GenericBeanDefinition;
import com.liu.spring.util.GetListFactory;
public class BeansUtil {
/**
* 加载所有benn根据类型
* @param context
* @return
*/
public static Map<String, HandlerMapping> beansOfType(ApplicationContext context,Class<?> cls) {
if (context instanceof GenericWebApplicationContext) {
Map<String, HandlerMapping> hdmp = GetListFactory.buildHashMap();
GenericWebApplicationContext ge = (GenericWebApplicationContext) context;
DefaultBeanFactory beanFactory = (DefaultBeanFactory) ge.getBeanFactory();
RealyAbstractBeanFactory realybeanFactory = (RealyAbstractBeanFactory) ge.getRealybeanFactory();
Set<Entry<String,BeanDefinition>> beans = beanFactory.getBeanMap().entrySet();
for (Entry<String,BeanDefinition> be:beans) {
GenericBeanDefinition gs = (GenericBeanDefinition) be.getValue();
if (((String)gs.getBeanClass()).equals(cls.getName())) {
/**
* 获取beanid
*/
String beanId = gs.getBeanId();
RequestMappingHandlerMapping object = (RequestMappingHandlerMapping) realybeanFactory.getRentyMap().get(beanId);
Map<String, HandlerMethod> mappingLookup = object.getMappingLookup();
//设置bean工厂
for (HandlerMethod hd:mappingLookup.values()) {
hd.setBeanFactory(realybeanFactory);
}
hdmp.put( beanId , object);
}
}
return hdmp;
}
return null;
}
}
<file_sep>/spring/src/main/java/com/liu/model/TestController.java
package com.liu.model;
import com.liu.mvc.ancocation.RequestMapping;
import com.liu.mvc.ancocation.RequestMethod;
import com.liu.mvc.ancocation.ResponseBody;
import com.liu.spring.annocation.Controller;
@Controller
@RequestMapping("/test")
public class TestController {
@RequestMapping(value="/hello",method = RequestMethod.GET)
public String test(String name,int id) {
return "test";
}
@RequestMapping(value="/json",method = RequestMethod.GET)
@ResponseBody
public String json(String name,int id) {
return "hello json";
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/support/ServletBeanWrapper.java
package com.liu.mvc.support;
import javax.servlet.ServletContext;
import com.liu.mvc.beans.ServletProperty;
public class ServletBeanWrapper implements BeanWrapper {
@Override
public ServletProperty loadServletProperty(ServletContext context) {
ServletProperty pro = new ServletProperty();
pro.setServletName("dispatcher");
pro.setServletClass("org.springframework.web.servlet.DispatcherServlet");
pro.getMapping().put("dispatcher","/");
pro.getParam().put("contextConfigLocation","classpath:springConfig/dispatcher-servlet.xml");
pro.setLoadOnStartup(0);
return pro;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/resolver/HandlerExceptionResolver.java
package com.liu.mvc.resolver;
public interface HandlerExceptionResolver {
}
<file_sep>/spring/src/main/java/com/liu/mvc/context/GenericWebApplicationContext.java
package com.liu.mvc.context;
import javax.servlet.ServletContext;
import com.liu.spring.context.AbstractApplicationContext;
import com.liu.spring.context.ApplicationContext;
public class GenericWebApplicationContext extends AbstractApplicationContext{
private ServletContext servletContext;
public GenericWebApplicationContext() {
}
public void startContext() {
setConfigLocations( this.getLocation());
refash();
}
public GenericWebApplicationContext(String... locs) {
this(true,locs);
}
public GenericWebApplicationContext(boolean isfalsh,String... locs) {
if (isfalsh) {
setConfigLocations( locs);
refash();
}
}
public ServletContext getServletContext() {
return servletContext;
}
public void setServletContext(ServletContext servletContext) {
this.servletContext = servletContext;
}
private String[] getLocation() {
return new String[]{"spring-bean.xml"};
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/support/AnnotationdrivenXmlParser.java
package com.liu.mvc.support;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.liu.mvc.ancocation.Mapping;
import com.liu.mvc.ancocation.RequestMapping;
import com.liu.mvc.ancocation.RequestMethod;
import com.liu.mvc.beans.HandlerMethod;
import com.liu.mvc.mapping.HandlerMapping;
import com.liu.mvc.mapping.RequestMappingHandlerMapping;
import com.liu.spring.annocation.Controller;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.model.GenericBeanDefinition;
import com.liu.spring.model.MutablePropertyValues;
import com.liu.spring.model.Node;
import com.liu.spring.parser.ParserContext;
import com.liu.spring.parser.XmlParser;
import com.liu.spring.util.ClassUtil;
import com.liu.spring.util.GetListFactory;
import com.liu.spring.util.StringUtil;
/**
* 映射handmapping
* @author Administrator
*
*
*/
public class AnnotationdrivenXmlParser implements XmlParser{
private final Class<Controller> cls = Controller.class;
private final Class<RequestMapping> reqMp = RequestMapping.class;
private BeanFactory beanFactory;
private final Class<RequestMappingHandlerMapping> reqClass = RequestMappingHandlerMapping.class;
public <T> GenericBeanDefinition parserElement(Node<T> e2, ParserContext parserContext) {
List<Class<?>> cos = new ArrayList<Class<?>>();
List<String> allPageNames = ClassUtil.getAllPageNames();
for (String name: allPageNames ) {
cos.addAll(ClassUtil.annotationClass(name, cls));
}
/**
* 映射handmpping
*/
for (Class<?> s:cos) {
registBeanDefinition(parserContext, s);
}
return null;
}
private void registBeanDefinition(ParserContext parserContext,Class<?> cls) {
if (ClassUtil.isContainAnnotation(cls, reqMp)) {
//controller上面的mapping对应的多个url
List<String> annotationValues = this.getClassMapiAnnotationValues(cls);
if (annotationValues.size()>0) {
for (String v:annotationValues) {
/**
* 创建mapping
*/
RequestMappingHandlerMapping createReqMapping = createReqMapping(cls, v);
//生成
GenericBeanDefinition beanDefinition = generateBeanDefinition(createReqMapping);
//注册
parserContext.getRegisterWare().registBention(parserContext.getBeanFactory(), beanDefinition);
}
}
} else {
/**
* 创建mapping
*/
RequestMappingHandlerMapping createReqMapping = createReqMapping(cls, HandlerMapping.DEFULT_CONTROLLER_URL);
//生成
GenericBeanDefinition beanDefinition = generateBeanDefinition(createReqMapping);
//注册
parserContext.getRegisterWare().registBention(parserContext.getBeanFactory(), beanDefinition);
}
}
private RequestMappingHandlerMapping createReqMapping(Class<?> cls,String url) {
RequestMappingHandlerMapping maping = new RequestMappingHandlerMapping();
/**
* 注册HandlerMethod
*/
List<Method> methods = ClassUtil.getContainAnnotationMethods(cls,reqMp);
for (Method me:methods) {
if (ClassUtil.MethodIsContainAnnotation(me, reqMp)) {
//方法上面对应的多个url
List<String> annotationValues = this.getMethodMapiAnnotationValues(me);
if (annotationValues.size()>0) {
for (String value:annotationValues) {
/**
* handler key
*/
String hadnlerKey = url+value;
maping.registHandlerMethod(hadnlerKey, this.createHandlerMethod(value, me,cls));
maping.getUrlPathHelper().addAllUrl(url+value);
}
}
}
}
maping.getUrlPathHelper().setControllerUrl(url);
maping.setMappingNameByController(cls,url);
maping.setControllerBeanId(getControllerBeanId(cls));
return maping;
}
//获取controllerbeanid
private String getControllerBeanId(Class<?> cls) {
return StringUtil.getLowerCaseFirstOneBeanName(cls.getName());
}
/**
* 获取方法上的mapping路径url
*
* @param me
* @return
*/
private List<String> getMethodMapiAnnotationValues(Method me){
List<String> buildArrayList = GetListFactory.buildArrayList();
if (ClassUtil.MethodIsContainAnnotation(me, reqMp)) {
RequestMapping requestMapping = me.getAnnotation(reqMp);
String[] value = requestMapping.value();
if (value!=null) {
for (int i=0;i<value.length;i++) {
//不为空
if (!StringUtil.isNullOrEmpty(value[i])) {
buildArrayList.add(value[i]);
}
}
}
}
return buildArrayList;
}
/**
* 获取类上面的hampping value的值
* @param me
* @return
*/
private List<String> getClassMapiAnnotationValues(Class<?> cls){
List<String> buildArrayList = GetListFactory.buildArrayList();
if (ClassUtil.isContainAnnotation(cls, reqMp)) {
RequestMapping requestMapping = cls.getAnnotation(reqMp);
String[] value = requestMapping.value();
if (value!=null) {
for (int i=0;i<value.length;i++) {
//不为空
if (!StringUtil.isNullOrEmpty(value[i])) {
buildArrayList.add(value[i]);
}
}
}
}
return buildArrayList;
}
/**
* 创建HandlerMethod
* @param cls
* @param url
* @param mapings
*/
private HandlerMethod createHandlerMethod( String methodUrl,Method me,Class<?> cls){
HandlerMethod meh = new HandlerMethod();
String methodName = me.getName();
List<RequestMethod> methods = this.getRequestMethods(me);
Map<String,Class<?>> parameterNames = ClassUtil.getParameterNameAndvalue(me,cls);
List<String> parameterName = ClassUtil.getParameterName(me, cls);
meh.setMethodName(methodName);
meh.setRequestMethod(methods);
meh.setParameterName(parameterNames);
meh.setMethodUrl(methodUrl);
meh.setControllerClass(cls);
meh.setMethodName(me.getName());
meh.setParameterName(parameterName);
meh.setMethod(me);
return meh;
}
private GenericBeanDefinition generateBeanDefinition(RequestMappingHandlerMapping maping) {
GenericBeanDefinition ge = new GenericBeanDefinition();
ge.setBeanClass(this.getGenericBeanClassName(maping.getClass()));
MutablePropertyValues mo = new MutablePropertyValues();
Map<String, Object> keyAndValue = ClassUtil.getKeyAndValue(maping);
List<String> allFieldName = ClassUtil.getAllFieldName(RequestMappingHandlerMapping.class);
mo.getValueMap().putAll(keyAndValue);
mo.getPropertyName().addAll(allFieldName);
ge.setPropertyValues(mo);
ge.setBeanId(maping.getMappingName());
return ge;
}
private String getGenericBeanClassName(Class<?> cs){
return RequestMappingHandlerMapping.class.getName();
}
private List<RequestMethod> getRequestMethods(Method me){
List<RequestMethod> requestMethods = GetListFactory.buildArrayList();
RequestMapping annotation = me.getAnnotation(reqMp);
if (ClassUtil.MethodIsContainAnnotation(me, reqMp)) {
RequestMethod[] value = me.getAnnotation(reqMp).method();
for (int i=0;i<value.length;i++) {
if (!StringUtil.isNullOrEmpty(value[i])) {
requestMethods.add(value[i]);
}
}
}
return requestMethods;
}
public static void main(String[] args) {
AnnotationdrivenXmlParser s = new AnnotationdrivenXmlParser();
s.parserElement(null, null);
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/beans/AbstractView.java
package com.liu.mvc.beans;
public abstract class AbstractView implements View {
protected ResponseType responseType;
@Override
public ResponseType getResponseType() {
return responseType;
}
@Override
public void setResponseType(ResponseType responseType) {
this.responseType = responseType;
}
}
<file_sep>/spring/src/main/java/com/liu/spring/reflex/ReflexImpl.java
package com.liu.spring.reflex;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.liu.model.Studens;
import com.liu.spring.util.ClassUtil;
public class ReflexImpl implements Reflex {
public Object newInstance(String className) {
try {
Class<?> forName = Class.forName(className);
return forName.newInstance();
} catch (ClassNotFoundException e) {
e.printStackTrace();
} catch (InstantiationException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
return null;
}
public void assignment(Object obj, Object value, String name) {
try {
List<Field> allField = ClassUtil.getAllField(obj.getClass());
Field f = null;
for (Field fs:allField) {
if (name.equals(fs.getName())) {
f = fs;
}
}
Class<?> className = f.getType();
f.setAccessible(true);
if (this.isBaseType(className)) {
/**
* 基本类型
*/
f.set(obj, typeChange(className, value));
} else {
f.set(obj, value);
}
} catch (SecurityException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IllegalArgumentException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IllegalAccessException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public Object typeChange(Class<?> className, Object value) {
if ("int".equals(className.toString())) {
if (value==null) {
return 0;
}
return Integer.parseInt((String) value);
}
return value;
}
public boolean isBaseType(Class<?> className) {
if (className.equals(java.lang.Integer.class) || "int".equals(className.toString())
|| className.equals(java.lang.Byte.class) || className.equals(java.lang.Long.class)
|| className.equals(java.lang.Double.class) || className.equals(java.lang.Float.class)
|| className.equals(java.lang.Character.class) || className.equals(java.lang.Short.class)
|| className.equals(java.lang.Boolean.class)) {
return true;
}
return false;
}
public static void main(String[] args) {
ReflexImpl s = new ReflexImpl();
Studens so = new Studens();
String ne = "w";
s.assignment(so, "1", "id");
}
public List<Field> parentClassAssignment(String smallClassName) {
try {
Class<?> sc = Class.forName(smallClassName);
Class<?> superclass = sc.getSuperclass();
Field[] fields = superclass.getDeclaredFields();
List<Field> type = new ArrayList<Field>();
for (Field fi : fields) {
type.add(fi);
}
return type;
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
return null;
}
public void assignmentField(Field f, Object value) {
}
public void modifyFiledsPower(String fieldName,String className,Object obj,Object value) {
Class<?> cla;
try {
cla = Class.forName(className);
Field field = cla.getDeclaredField(fieldName);
field.setAccessible(true);
if (this.isBaseType( field .getType())) {
/**
* 基本类型
*/
field.set(obj, typeChange(cla, value));
} else {
field.set(obj, value);
}
} catch (ClassNotFoundException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
} catch (NoSuchFieldException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (SecurityException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IllegalArgumentException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IllegalAccessException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/support/FlashMapManager.java
package com.liu.mvc.support;
public interface FlashMapManager {
}
<file_sep>/spring/src/main/java/com/liu/spring/parser/AbstractXmlReader.java
package com.liu.spring.parser;
import java.util.List;
import org.dom4j.Element;
import com.liu.spring.factory.AbstractBeanFactory;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.factory.DefaultBeanFactory;
import com.liu.spring.model.BeanDefinition;
import com.liu.spring.model.GenericBeanDefinition;
import com.liu.spring.model.Node;
import com.liu.spring.model.Resource;
import com.liu.spring.reflex.Reflex;
import com.liu.spring.reflex.ReflexImpl;
import com.liu.spring.register.BeanDefinitionRegisterWare;
import com.liu.spring.register.RegisterWare;
public abstract class AbstractXmlReader implements XmlReader {
private RegisterWare registerWare;
private Reflex reflex;
private ParserContext parserContext;
private BeanFactory beanFactory;
public AbstractXmlReader(BeanFactory beanFactory) {
this.parserContext = new ParserContext();
this.beanFactory = beanFactory;
}
public <T> void loadBeanDefinition(Node<T> e2) {
String name = this.getElementName(e2);
DefaultBeanFactory defaultBeanFactory = (DefaultBeanFactory) beanFactory;
XmlParser xmlParser = defaultBeanFactory.getSpaceHandlerSupport(name);
if (xmlParser != null) {
GenericBeanDefinition genericBeanDefinition = xmlParser.parserElement(e2,parserContext);
if (genericBeanDefinition!=null) {
defaultBeanFactory.addCount(registerWare.registBention(defaultBeanFactory, genericBeanDefinition));
}
} else {
}
}
private <T> String getElementName(Node<T> e2) {
String name = null;
if (e2.getNode() instanceof Element) {
Element e = (Element) e2.getNode();
name = e.getName();
}
return name;
}
public abstract BeanDefinition CrateBeanDefinitionObject();
/**
* 返回一个节点集合 beans里面的所有元素节点
*
* @param re
* @return
*/
@SuppressWarnings("rawtypes")
public abstract List<Node> getListElement(Resource re);
@SuppressWarnings("unchecked")
public void loadBeanDefinition(Resource re) {
initParseContext();
List<Node> listElement = this.getListElement(re);
/**
* 校验
*
*/
for (Node no : listElement) {
loadBeanDefinition(no);
}
}
public void initParseContext() {
parserContext.setBeanFactory(this.beanFactory);
parserContext.setRegisterWare(this.registerWare);
}
public RegisterWare getRegisterWare() {
return registerWare;
}
public void setRegisterWare(RegisterWare registerWare) {
this.registerWare = registerWare;
}
public Reflex getReflex() {
return reflex;
}
public void setReflex(Reflex reflex) {
this.reflex = reflex;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/support/RequestToViewNameTranslator.java
package com.liu.mvc.support;
public interface RequestToViewNameTranslator {
}
<file_sep>/spring/src/main/java/com/liu/spring/context/ClasspathXmlApplicationContext.java
package com.liu.spring.context;
public class ClasspathXmlApplicationContext extends AbstractApplicationContext {
public ClasspathXmlApplicationContext() {
}
public ClasspathXmlApplicationContext(String... locations) {
this(true, locations);
}
public ClasspathXmlApplicationContext(boolean isfalsh, String... locations) {
super.setConfigLocations(locations);
}
public ClasspathXmlApplicationContext(String locations) {
this(true, new String[] { locations });
}
public void startContext() {
setConfigLocations(getLocation());
refash();
}
private String[] getLocation() {
return new String[] { "spring-bean.xml" };
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/resolver/LocaleResolver.java
package com.liu.mvc.resolver;
public interface LocaleResolver {
}
<file_sep>/spring/src/main/java/com/liu/mvc/mapping/AbstractHandlerMethodMapping.java
package com.liu.mvc.mapping;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import com.liu.mvc.ancocation.ResponseBody;
import com.liu.mvc.beans.HandlerMethod;
public abstract class AbstractHandlerMethodMapping extends AbstractHandlerMapping{
private final Map<String, HandlerMethod> mappingLookup = new LinkedHashMap<String, HandlerMethod>();
private final Map<String, List<HandlerMethod>> nameLookup = new ConcurrentHashMap<>();
public HandlerMethod getHandlerMethodRelativeUrl(String url) {
return mappingLookup.get(url);
}
public void registHandlerMethod(String url,HandlerMethod handlerMethod) {
mappingLookup.put(url, handlerMethod);
}
public List<HandlerMethod> getHandlerMethodListByName(String name) {
return nameLookup.get(name);
}
public void registHandlerMethodList(String name,List<HandlerMethod> HandlerMethods) {
nameLookup.put(name, HandlerMethods);
}
public Map<String, HandlerMethod> getMappingLookup() {
return mappingLookup;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/support/ServletWebXmlReade.java
package com.liu.mvc.support;
import javax.servlet.ServletContext;
import com.liu.mvc.beans.ServletProperty;
import com.liu.mvc.servlet.DispatcherServlet;
import com.liu.mvc.servlet.HttpServletBean;
public class ServletWebXmlReade implements ServletReader {
private BeanWrapper beanWrapper;
public ServletWebXmlReade(BeanWrapper beanWrapper) {
this.beanWrapper = beanWrapper;
}
@Override
public void initServletPrConfigure(HttpServletBean httpServletBean,ServletContext context) {
DispatcherServlet ht = (DispatcherServlet)httpServletBean;
ServletProperty loadServletProperty = beanWrapper.loadServletProperty(context);
ht.setServletProperty(loadServletProperty);
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/beans/ServletProperty.java
package com.liu.mvc.beans;
import java.util.HashMap;
import java.util.Map;
/**
* servlet ±êÇ©ÊôÐÔ
*/
public class ServletProperty extends ServletConfigPropertyValues{
private String servletName;
private String servletClass;
private int loadOnStartup;
/**
* param-name
*/
private Map<String,String> param = new HashMap<String,String>();
/**
* servlet-mapping
* key:servlet-name
* value:url-pattern
*/
private Map<String,String> mapping = new HashMap<String,String>();
public String getServletName() {
return servletName;
}
public void setServletName(String servletName) {
this.servletName = servletName;
}
public String getServletClass() {
return servletClass;
}
public void setServletClass(String servletClass) {
this.servletClass = servletClass;
}
public int getLoadOnStartup() {
return loadOnStartup;
}
public void setLoadOnStartup(int loadOnStartup) {
this.loadOnStartup = loadOnStartup;
}
public Map<String, String> getParam() {
return param;
}
public void setParam(Map<String, String> param) {
this.param = param;
}
public Map<String, String> getMapping() {
return mapping;
}
public void setMapping(Map<String, String> mapping) {
this.mapping = mapping;
}
@Override
public String toString() {
return "ServletSource [servletName=" + servletName + ", servletClass=" + servletClass + ", loadOnStartup="
+ loadOnStartup + ", param=" + param + ", mapping=" + mapping + "]";
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/beans/AnnotationConstant.java
package com.liu.mvc.beans;
import com.liu.spring.annocation.Controller;
import com.liu.spring.annocation.Service;
public class AnnotationConstant {
public static final Class<Service> SERVICE = Service.class;
public static final Class<Controller> Controller = Controller.class;
}
<file_sep>/spring/src/main/java/com/liu/spring/register/SingleRegist.java
package com.liu.spring.register;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.model.GenericBeanDefinition;
public interface SingleRegist {
public Object getBean(String name,BeanFactory beanFactory,
BeanFactory realybeanFactory);
public void assignment(Object obj,GenericBeanDefinition gebd);
public Object getBean(Class<?> name,BeanFactory beanFactory,
BeanFactory realybeanFactory);
}
<file_sep>/spring/src/main/java/com/liu/spring/parser/DefualtXmlReader.java
package com.liu.spring.parser;
import java.io.File;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.dom4j.Attribute;
import org.dom4j.Document;
import org.dom4j.DocumentException;
import org.dom4j.Element;
import org.dom4j.io.SAXReader;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.model.BeanDefinition;
import com.liu.spring.model.GenericBeanDefinition;
import com.liu.spring.model.MutablePropertyValues;
import com.liu.spring.model.Node;
import com.liu.spring.model.Resource;
import com.liu.spring.reflex.Reflex;
public class DefualtXmlReader extends AbstractXmlReader {
public DefualtXmlReader(BeanFactory beanFactory) {
super(beanFactory);
}
public Element getRootElement(Resource re) {
SAXReader reader = new SAXReader();
Element root = null;
try {
Document doc = reader.read(new File(re.getLocations()));
root = doc.getRootElement();
} catch (DocumentException e) {
e.printStackTrace();
}
return root;
}
@Override
public BeanDefinition CrateBeanDefinitionObject() {
return new GenericBeanDefinition();
}
@Override
public List<Node> getListElement(Resource re) {
Element rootElement = this.getRootElement(re);
Iterator<Node<Element>> iterator = rootElement.elementIterator();
List<Node> lie = new ArrayList<Node>();
while (iterator.hasNext()) {
lie.add(new Node(iterator.next()));
}
return lie;
}
public <T> void checkElement(Reflex reflex, List<Node> lie) {
/**
* ÅжÏidÊÇ·ñÖØ¸´
*/
for (Node el : lie) {
Element e = (Element) el;
}
}
}
<file_sep>/spring/src/main/java/com/liu/spring/model/ConstructorArgumentValues.java
package com.liu.spring.model;
import java.util.ArrayList;
import java.util.List;
public class ConstructorArgumentValues implements PropertyValues{
private List<String> constructor;
private List<String> values;
public ConstructorArgumentValues() {
this.constructor = new ArrayList<String>();
this.values = new ArrayList<String>();
}
public void addConstructor(String name) {
this.constructor.add(name);
}
public void addValue(String value) {
this.values.add( value);
}
public List<String> getConstructor() {
return constructor;
}
public List<String> getValues() {
return values;
}
}
<file_sep>/spring/src/main/java/com/liu/spring/parser/IXmlParser.java
package com.liu.spring.parser;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import com.liu.mvc.beans.AnnotationConstant;
import com.liu.spring.annocation.Autowired;
import com.liu.spring.annocation.Service;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.factory.DefaultBeanFactory;
import com.liu.spring.model.GenericBeanDefinition;
import com.liu.spring.model.MutablePropertyValues;
import com.liu.spring.model.Node;
import com.liu.spring.util.ClassUtil;
import com.liu.spring.util.StringUtil;
/**
* 假设是扫描注解
*
* @author Administrator
*
*/
public class IXmlParser implements XmlParser {
public <T> GenericBeanDefinition parserElement( Node<T> e2, ParserContext parserContext) {
String pageName = "com.liu";
List<Class<?>> annotationClass = this.getlConstainAnClass(pageName);
Set<GenericBeanDefinition> generateBeanDefinition = this.generateBeanDefinition(annotationClass);
for (GenericBeanDefinition si:generateBeanDefinition) {
parserContext.getRegisterWare().registBention(parserContext.getBeanFactory(), si);
}
return null;
}
/**
* 获取包含注解的类
* @param pageName
* @return
*/
private List<Class<?>> getlConstainAnClass(String pageName ) {
List<Class<?>> annotationClass = new ArrayList<>();
annotationClass.addAll(ClassUtil.annotationClass(pageName, AnnotationConstant.SERVICE));
annotationClass.addAll(ClassUtil.annotationClass(pageName, AnnotationConstant.Controller));
return annotationClass;
}
private Set<GenericBeanDefinition> generateBeanDefinition(List<Class<?>> annotationClass) {
Set<GenericBeanDefinition> ges = new HashSet<GenericBeanDefinition>();
for (Class<?> cs : annotationClass) {
GenericBeanDefinition generateBeanDefinition = this.generateBeanDefinition(cs);
ges.add(generateBeanDefinition);
}
return ges;
}
private GenericBeanDefinition generateBeanDefinition(Class<?> annotationClas) {
GenericBeanDefinition ge = new GenericBeanDefinition();
//获取指定注解的属性名
List<String> allFieldName = ClassUtil.appointAnnotationFieldName(annotationClas,Autowired.class);
MutablePropertyValues mu = new MutablePropertyValues();
/**
* 属性名默认就是id名称
*/
for (String si:allFieldName) {
mu.getValueMap().put(si, si);
}
mu.getRefPropertyName().addAll(allFieldName);
mu.getPropertyName().addAll(allFieldName);
String cs = annotationClas.getName();
ge.setBeanClass(annotationClas.getName());
ge.setPropertyValues(mu);
ge.setBeanId(StringUtil.getLowerCaseFirstOneBeanName(annotationClas.getName()));
return ge;
}
}
<file_sep>/spring/src/main/java/com/liu/spring/parser/NamespaceHandlerResolver.java
package com.liu.spring.parser;
import com.liu.spring.factory.BeanFactory;
public interface NamespaceHandlerResolver {
public void loadParser(BeanFactory beanFactory);
}
<file_sep>/spring/src/main/java/com/liu/spring/model/Resource.java
package com.liu.spring.model;
public class Resource {
private String fileName;
private String locations;
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public String getLocations() {
return locations;
}
public void setLocations(String locations) {
this.locations = locations;
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/beans/ServletConfigPropertyValues.java
package com.liu.mvc.beans;
import com.liu.spring.model.PropertyValues;
public class ServletConfigPropertyValues implements PropertyValues {
}
<file_sep>/spring/src/main/java/com/liu/mvc/context/WebApplicationContext.java
package com.liu.mvc.context;
import com.liu.spring.context.ApplicationContext;
public interface WebApplicationContext extends ApplicationContext{
public static final String SERVLET_CONTEXT_NAME = WebApplicationContext.class+".mvc" ;
void setParentContext(ApplicationContext applicationContext);
ApplicationContext getParentApplicationContext();
}
<file_sep>/spring/src/main/java/com/liu/model/Teacher.java
package com.liu.model;
import java.lang.reflect.Field;
public class Teacher {
private String name;
private Studens st;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Studens getSt() {
return st;
}
public void setSt(Studens st) {
this.st = st;
}
@Override
public String toString() {
return "Teacher [name=" + name + ", st=" + st + "]";
}
public static void main(String[] args) {
try {
Class cls = Class.forName("com.liu.model.Studens");
Field field = cls.getField("name");
field.setAccessible(true);
} catch (ClassNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (NoSuchFieldException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (SecurityException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
<file_sep>/spring/src/main/java/com/liu/mvc/resolver/AbstractCachingViewResolver.java
package com.liu.mvc.resolver;
public abstract class AbstractCachingViewResolver implements ViewResolver{
}
<file_sep>/spring/src/main/java/com/liu/mvc/servlet/HttpServletBean.java
package com.liu.mvc.servlet;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import com.liu.mvc.beans.ServletProperty;
import com.liu.mvc.support.ServletBeanWrapper;
import com.liu.mvc.support.ServletReader;
import com.liu.mvc.support.ServletWebXmlReade;
public abstract class HttpServletBean extends HttpServlet{
@Override
public void init() throws ServletException {
/**
* 封装servlet配置
*/
ServletReader reader = new ServletWebXmlReade(new ServletBeanWrapper());
/**
* 把servlet标签内容封装在dispatcherservlet中
*/
reader.initServletPrConfigure(this,super.getServletContext());
initServletBean();
}
public abstract void initServletBean();
}
<file_sep>/spring/src/main/java/com/liu/mvc/resolver/HttpRequestParameterResolvet.java
package com.liu.mvc.resolver;
import java.lang.reflect.Type;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import com.liu.mvc.beans.HandlerMethod;
import com.liu.mvc.beans.ResponseType;
import com.liu.mvc.beans.RquestCheckInfo;
import com.liu.spring.reflex.Reflex;
import com.liu.spring.reflex.ReflexImpl;
import com.liu.spring.util.GetListFactory;
public class HttpRequestParameterResolvet implements ParameterResolvet {
private Reflex reflex;
public HttpRequestParameterResolvet() {
this.reflex = new ReflexImpl();
}
/**
* 校验参数类型、值、请求方式
*/
@Override
public RquestCheckInfo checkRequst(HttpServletRequest request, HandlerMethod method) {
RquestCheckInfo check = new RquestCheckInfo();
List<Object> parValue = GetListFactory.buildArrayList();
Map<String, Class<?>> parameterNameAndType = method.getParameterNameAndType();
/**
* 方法返回值类型
*/
Type returnType = method.getMethod().getAnnotatedReturnType().getType();
for (String parName:method.getParameterName()) {
Class<?> type = parameterNameAndType.get(parName);
/**
* 是不是基本类型
*/
if (reflex.isBaseType(type)) {
String parameterValue = request.getParameter(parName);
parValue.add( reflex.typeChange(type, parameterValue));
check.setParameterNamAndValue(parName, reflex.typeChange(type, parameterValue));
}else if (type==String.class) {
parValue.add( request.getParameter(parName));
check.setParameterNamAndValue(parName, request.getParameter(parName));
} else {
}
}
check.setExceptionMessage(null);
check.setMethodReturnType(returnType);
check.setResponseType(ResponseType.JSON);
check.setParameterValues(parValue);
check.setParameterCondition(true);
return check ;
}
}
<file_sep>/spring/src/main/java/com/liu/spring/model/PropertyValues.java
package com.liu.spring.model;
public interface PropertyValues {
}
<file_sep>/spring/src/main/java/com/liu/spring/register/SingleRegistImpl.java
package com.liu.spring.register;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import com.liu.spring.context.ApplicationContext;
import com.liu.spring.factory.BeanFactory;
import com.liu.spring.factory.DefaultBeanFactory;
import com.liu.spring.factory.RealyAbstractBeanFactory;
import com.liu.spring.model.BeanDefinition;
import com.liu.spring.model.GenericBeanDefinition;
import com.liu.spring.reflex.Reflex;
import com.liu.spring.reflex.ReflexImpl;
/**
* 依赖注入实现并创建真正 的bean实体
*
* @author Administrator
*
*/
public class SingleRegistImpl implements SingleRegist {
private Reflex reflex;
public SingleRegistImpl(ApplicationContext applicationContext) {
if (applicationContext == null) {
reflex = new ReflexImpl();
} else {
reflex = new ReflexImpl();
}
}
public Object getBean(String name, BeanFactory beanFactory, BeanFactory realybeanFactory) {
if (beanFactory instanceof DefaultBeanFactory && realybeanFactory instanceof RealyAbstractBeanFactory) {
DefaultBeanFactory deful = (DefaultBeanFactory) beanFactory;
RealyAbstractBeanFactory real = (RealyAbstractBeanFactory) realybeanFactory;
if (getBean(real, name) != null) {
return getBean(real, name);
}
Map<String, BeanDefinition> beanMap = deful.getBeanMap();
GenericBeanDefinition gebd = (GenericBeanDefinition) deful.getByName(name);
String className = (String) gebd.getBeanClass();
Object object = null;
/**
* 是抽象的不创建该实体
*/
if (gebd.isAbstractFlag()) {
} else {
/**
* 创建实体对象
*/
/**
* 无参数构造
*/
if (gebd.getConstructorArgumentValues() == null) {
object = reflex.newInstance(className);
} else {
}
/**
* 给porporty赋值
*/
Set<Entry<String, Object>> entrySet = gebd.getPropertyValues().getValueMap().entrySet();
for (Entry<String, Object> si : entrySet) {
if (gebd.getPropertyValues().isContainRefName(si.getKey())) {
/**
* 从容器中取值
*/
Object bean = getBean((String) si.getValue(), beanFactory, realybeanFactory);
this.assignment(object, bean, si.getKey());
} else {
this.assignment(object, si.getValue(), si.getKey());
}
}
/**
* 父类处理
*/
if (gebd.getParentId() != null || "".equals(gebd.getParentId())) {
String parId = gebd.getParentId();
this.getBean(parId, beanFactory, realybeanFactory);
/**
* 注册parent
*/
GenericBeanDefinition gebdParent = (GenericBeanDefinition) deful.getByName(parId );
/**
* 给porporty赋值
*/
Set<Entry<String, Object>> entrySet2 =gebdParent.getPropertyValues().getValueMap().entrySet();
for (Entry<String, Object> si : entrySet2) {
if (gebdParent.getPropertyValues().isContainRefName(si.getKey())) {
/**
* 从容器中取值
*/
Object bean = getBean((String) si.getValue(), beanFactory, realybeanFactory);
reflex.modifyFiledsPower(si.getKey(), (String)gebdParent.getBeanClass(),object,bean);
} else {
reflex.modifyFiledsPower(si.getKey(), (String)gebdParent.getBeanClass(),object,si.getValue());
}
}
}
}
/**
* 注册实体
*/
real.addObj(name, object);
return object;
}
return null;
}
/**
* 先从缓存中获取
*
* @param real
* @return
*/
private Object getBean(RealyAbstractBeanFactory real, String id) {
return real.getRentyMap().get(id);
}
/**
* 给对象赋值
*
* @param obj
* @param value
* @param fildname
*/
public void assignment(Object obj, Object value, String fildname) {
reflex.assignment(obj, value, fildname);
}
public void assignment(Object obj, GenericBeanDefinition gebd) {
// TODO Auto-generated method stub
}
public Object getBean(Class<?> cla, BeanFactory beanFactory, BeanFactory realybeanFactory) {
if (beanFactory instanceof DefaultBeanFactory && realybeanFactory instanceof RealyAbstractBeanFactory) {
DefaultBeanFactory deful = (DefaultBeanFactory) beanFactory;
RealyAbstractBeanFactory real = (RealyAbstractBeanFactory) realybeanFactory;
Map<Class<?>, String[]> singletonBeanNamesByType = deful.getSingletonBeanNamesByType();
String[] beanName = singletonBeanNamesByType.get(cla);
if (singletonBeanNamesByType.get(cla)==null) {
return null;
}
return getBean(beanName[0], beanFactory, realybeanFactory);
}
return null;
}
}
| ab8429df9ea2a18e4334484564641b52926ede97 | [
"Java"
] | 41 | Java | liuq123/repos3 | 3134df10dbe8d7aa2ebd96560251d81ddb7ffa3d | 1a3ab6521493d1489419542ca17465a5d6f6124c |
refs/heads/master | <repo_name>nguahoangxq/project<file_sep>/scr/forms.js
////////////////////////////////////////////////////////////
String.prototype.escape=function(){return(this.replace(/"/g,'\\"'))};
const safeJSON=function(keys,vals,i=0,a=[]){if(keys.length!=vals.length)throw(null);for(;i<keys.length;i++)a.push(`"${keys[i]}":"${vals[i].toString().escape()}"`);return('{'+a.join(',')+'}');};
////////////////////////////////////////////////////////////
const trim=function(s){if(s)return(s.replace(/^\s+|\s+$/g,BLANK));return(BLANK);};
const errCode=function(e){if(e!=null){e=e.toString();if(e.indexOf(']')>0)return(hi_alert_data);if(e.indexOf(OxOO)>0)return(hi_prompt_err);if(e.indexOf(RECEIPT)>0)return(hi_prompt_rct);e=(e.substring(e.lastIndexOf(HASH)));if(e){return(e);}else{return(0);}}return(null);};
const funcName=function(){return(funcName.caller.name);};
////////////////////////////////////////////////////////////
const hvalid=function(h){return(hashRegex.test(h));};
const avalid=function(a){return(web3.utils.isAddress(a));};
const nvalid=function(n,b){n=s2n(n);b=s2n(b);return(n>0&&n<=b);};
////////////////////////////////////////////////////////////
const nsmall=function(n,b){return(nvalid(n,b));};
const nmidle=function(n,b){n=s2n(n);b=s2n(b);return(n>0&&n<b);};
const nlarge=function(n,s){n=s2n(n);s=s2n(s);return(n>s&&s>=0);};
////////////////////////////////////////////////////////////
const wrd=function(o,v){return(Object.keys(o).find(key=>o[key]==v));};
const n2s=function(n,d){if(!n)return(ZERO);if(!d)d=0;n=n.toString().split(DOT);n[0]=n[0].replace(/\B(?=(\d{3})+(?!\d))/g,COMMA);n[1]=n[1]?DOT+n[1].substr(0,d):EMPTY;return(n[0]+n[1]);};
const s2n=function(s){s=parseFloat(s.toString().replace(/[^\d\.\-]/g,EMPTY));if(isNaN(s))return(0);return(s);};
const w2s=function(n,dec=5,len=22){n=n2s(fromWei(n),dec);return(n.length<len?n:ASK);};
const s2w=function(s){return(toWei(s2n(s).toString()));};
const toHex=function(s){return(web3.utils.toHex(s));};
const toHash=function(s){return(web3.utils.keccak256(s,{encoding:HEX}));};
const solHash=function(...args){return(web3.utils.soliditySha3(...args));};
const jtoHash=function(j){return(toHash(JSON.stringify(j)));};
const toWei=function(n){return(web3.utils.toWei(n.toString(),ETHER));};
const gtoWei=function(n){return(web3.utils.toWei(n.toString(),GWEI));};
const fromWei=function(w){return(web3.utils.fromWei(w.toString(),ETHER));};
const fromGwei=function(g){return(fromWei(gtoWei(g)));};
const gfromWei=function(w){return(web3.utils.fromWei(w.toString(),GWEI));};
const fromHex=function(h){return(web3.utils.hexToNumberString(h));};
const fromNum=function(n){return(web3.utils.numberToHex(n));};
const fromWHex=function(h){return(fromWei(fromHex(h)));};
const s2wHex=function(s){return(toHex(s2w(s)));};
const g2wHex=function(g){return(toHex(gtoWei(g)));};
const n2Hex=function(n,dec=18){return(toHex(toDec(n,dec)));};
const toDec=function(n,dec=18,a,s,u){n=n.toString();dec=parseInt(dec);if(isNaN(n)||isNaN(dec)||n<=0)return(ZERO);a=n.split(DOT);s=a[0];u=a[1];if(s||(s=ZERO),u||(u=ZERO),u.length>dec)return(ZERO);for(;u.length<dec;)u+=ZERO;return(big(s).mul(big(10).pow(big(dec))).add(big(u)).toString());};
////////////////////////////////////////////////////////////
const toSat=function(n){return(_Decimal(n).absoluteValue().mul(_Decimal(100000000)).toInteger().toString());};
const fromSat=function(s){return(_Decimal(s).absoluteValue().toInteger().div(_Decimal(100000000)).toString());};
const satChange=function(satBal,satAmt,satFee){satBal=big(satBal);satAmt=big(satAmt);satFee=big(satFee);if(satBal.lt(satAmt.add(satFee)))throw(ERROR);return(satBal.sub(satAmt).sub(satFee).toString());};
const bitChange=function(bal,amt,fee){return(satChange(toSat(bal),toSat(amt),toSat(fee)));};
////////////////////////////////////////////////////////////
const setInput=function(obj){return(JSON.stringify({obj:obj}));};
const getInput=function(tx,cbf=console.log){if(!hvalid(tx))return(cbf(ERROR,null));web3.eth.getTransaction(tx,function(err,result){if(err||!result||!result.input)return(cbf(err,null));cbf(null,hexObj(result.input).obj);});};
////////////////////////////////////////////////////////////
const toDate=function(y,m,d){return(parseInt(_Date(Date.UTC(y,m-1,d,0,0,0,0)).getTime()/1000,10));};
const nowDate=function(){return(parseInt(_Date(0).getTime()/1000,10));};
const fromDate=function(n){return(_Date(n*1000).toString());};
////////////////////////////////////////////////////////////
const hiRegex=_Regex('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#\$%\^&\*])(?=.{8,})');
const loRegex=_Regex('^(((?=.*[a-z])(?=.*[A-Z]))|((?=.*[a-z])(?=.*[0-9]))|((?=.*[A-Z])(?=.*[0-9])))(?=.{6,})');
const hashRegex=_Regex('^0x([A-Fa-f0-9]{64})$');
////////////////////////////////////////////////////////////<file_sep>/scr/base-raw.js
////////////////////////////////////////////////////////////
const BSCSCAN={
api:{
mainnet:EXCHAINS.binance.api},
push:{
mainnet:EXCHAINS.binance.push}};
////////////////////////////////////////////////////////////
const ETHERSCAN={
api:{
rinkeby:EXCHAINS.rinkeby.api,
ropsten:EXCHAINS.ropsten.api,
mainnet:EXCHAINS.mainnet.api},
push:{
rinkeby:EXCHAINS.rinkeby.push,
ropsten:EXCHAINS.ropsten.push,
mainnet:EXCHAINS.mainnet.push}};
////////////////////////////////////////////////////////////
const TXDECODERS=[
'https://www.ethereumdecoder.com/',
'https://live.blockcypher.com/eth/decodetx/'];
////////////////////////////////////////////////////////////
const PROXIES=[
{/*https://etherscan.io/apis#proxy*/
getTransactionCount:function(addr,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=proxy&action=eth_getTransactionCount&address=${addr}&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
getContractDecimals:function(addr,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=proxy&action=eth_call&to=${addr}&data=0x313ce567&tag=latest&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
getTokenTotalSupply:function(addr,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=proxy&action=eth_call&to=${addr}&data=0x18160ddd&tag=latest&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
getUserTokenBalance:function(addr,acc,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=account&action=tokenbalance&contractaddress=${addr}&address=${acc}&tag=latest&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
getUserEtherBalance:function(acc,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=account&action=balance&address=${acc}&tag=latest&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
sendToSmartContract:function(addr,data,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=proxy&action=eth_call&to=${addr}&data=${data}&tag=latest&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
sendRawTransaction:function(hex,ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=proxy&action=eth_sendRawTransaction&hex=${hex}&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
getGasPrice:function(ncid=MAINNET){return(`${EXCHAINS[ncid].api}module=proxy&action=eth_gasPrice&apikey=${EXCHAINS[ncid].token?EXCHAINS[ncid].token:BLANK}`)},
setApiKey:function(key,ncid=MAINNET){EXCHAINS[ncid].token=key}}];
////////////////////////////////////////////////////////////
const BROXIES=[
{/*https://www.blockcypher.com/dev/bitcoin/#introduction*/
getTransactionList:function(addr,ncid=BITCOIN){return(`${BXCHAINS[ncid].api}addrs/${addr}?token=${BXCHAINS[ncid].token?BXCHAINS[ncid].token:BLANK}`)},
sendRawTransaction:function(hex,ncid=BITCOIN){return(`curl\u0020-d\u0020"{\\"tx\\":\\"${hex}\\"}"\u0020${BXCHAINS[ncid].api}txs/push?token=${BXCHAINS[ncid].token?BXCHAINS[ncid].token:BLANK}`)},
setApiKey:function(key,ncid=BITCOIN){BXCHAINS[ncid].token=key}}];
//////////////////////////////////////////////////////////// | bf2d61cd26f5c96641fafe31cdd971c5cc0c5803 | [
"JavaScript"
] | 2 | JavaScript | nguahoangxq/project | fcfc4888d90a5a379e0d90f7698af5e9c5aa83a1 | dbb378fa12a7f7ccc37bdb71543c8ed4ad43506e |
refs/heads/master | <file_sep>imparse
=======
Parser generator that can be used to quickly and succinctly define a parser definition, and to deploy an automatically-generated implementations thereof in multiple languages and on multiple platforms.
Directory Structure
-------------------
The directory structure is as follows:
* `/Python/`
The Python version of the library.
- `/Python/working/`
The working version of the code, spread across several modules.
- `/Python/release/`
The single-module release version, automatically generated.
<file_sep>## Release script for HackageDB.
echo Building Haskell package.
cd Haskell
release.sh
cd ..
echo Done.
echo
echo Building JavaScript library from Imparse source.
informl -js Informl/Imparse.iml
mv Informl/Imparse.js JavaScript/Imparse.js
echo Done.
echo
echo Release script for imparse finished.
echo
##eof
| 08ea09b5a5bab2a3b476e19ad4895e51716f750b | [
"Markdown",
"Shell"
] | 2 | Markdown | wevial/imparse | c3736774a33ab80240160e7f57ef7e2876b9b121 | 75b4757008c728f40cb14a99da3594ec437c7f55 |
refs/heads/master | <file_sep>import Vue from 'vue'
import Vuex from 'vuex'
Vue.use(Vuex)
/**
* vuex数据流
* vue组件(发出dispath请求) ——> actions(commit提交)——> mutations(改变state) ——> render更新组件状态
*/
const store = new Vuex.Store({
/**
* state:
* 这里面主要定义了需要的属性,只有在这里定义的属性,在使用时才能获取的到
* 存储数据,存储状态;在根实例中注册了store 后,用 this.$store.state 来访问;
* 对应vue里面的data;
* 存放数据方式为响应式,vue组件从store中读取数据,如数据发生变化,组件也会对应的更新。
* mapState把全局的 state , mapGetters getters 映射到当前组件的 computed 计算属性中
*/
state: {
count: 0,
age: 18,
name:'wangjuan',
school:'ccnu',
flag:false,
msg: true,
users: [],
stus: [
{
name: '张三21',
age: 18,
sex: '女'
}, {
name: '张三42',
age: 14,
sex: '女'
}, {
name: '张三42',
age: 54,
sex: '女'
}, {
name: '张三2',
age: 34,
sex: '女'
}, {
name: '张三4',
age: 13,
sex: '男'
}, {
name: '张三52',
age: 53,
sex: '男'
}]
},
/**
* getters:
* 这里是将我们定义的数据进行输出:多组件之间复用
* getters 和 组件的 computed 类似,方便直接生成一些可以直接用的数据
* 当组装的数据要在多个页面使用时,就可以使用 getters 来做
*
* 可以认为是 store 的计算属性,它的返回值会根据它的依赖被缓存起来,
* 且只有当它的依赖值发生了改变才会被重新计算
*/
getters: {
boys (state) {
return state.stus.filter(stu => stu.sex === '男')
},
boysLength (state, getters) {
return getters.boys.length
},
ageStu (state) {
return state.stus.filter(stu => stu.age > 20)
}
},
/**
* actions:
* 组件中调用:dispatch、或者使用赋值函数 mapActions
* 这里主要是与后台进行交互,并获取数据,然后传给mutations里面进行操作
* 包含任意异步操作,通过提交 mutation 间接更变状态
* 当组件进行数据修改的时候我们需要调用dispatch来触发actions里面的方法。
* actions里面的每个方法中都会有一个commit方法,
* 当方法执行的时候会通过commit调用mutations里面的方法,来触发mutations里面的方法进行数据的修改
*/
actions: {
// getUser({ commit }){
// return new Promise((resolve,reject)=>{
// setTimeout(()=>{
// // console.log('setUsers')
// commit('setUsers')
// resolve();
// },1000)
// },()=>{
// reject();
// })
// }
setUsers(context){
return new Promise((resolve) => {
context.commit('setUsers');
resolve();
},(reject)=>{
reject();
})
}
},
/**
* mutations:
* 组件中调用:使用赋值函数 mapMutations
* 这里是对后台传来的数据进行操作,处理数据 修改state
* 更改 Vuex 的 store 中的状态的唯一方法是提交 mutation
* 把我们需要的数据赋值给在state里面定义那个数组里
* mutations里面的每个函数都会有一个state参数,
* 这样就可以在mutations里面进行state的数据修改,
* 当数据修改完毕后,会传导给页面。页面的数据也会发生改变
*
*/
mutations: {
changeMsg (state, payload) {
// 在这里改变state即可
state.msg = payload.msg
},
setUsers (state, users) {
// console.log("mutations:"+users)
state.users = users
},
increment (state) {
state.count ++;
// state.age = state.age + state.count;
}
},
/**
* 将 store 分割成模块,每个模块都具有state、mutation、action、getter、甚至是嵌套子模块
*/
modules:{
}
})
export default store<file_sep>##### 运行报错 Error: getaddrinfo ENOTFOUND localhost 时 是因为本地localhost没有绑定到127.0.0.1
### 在本地配置host即可
### 127.0.0.1 localhost<file_sep>/**
* 配置vue-router
*/
import Vue from 'vue'
import VueRouter from 'vue-router'
import Hello from '../components/hello.vue'
import Home from '../components/home.vue'
import Count from '../components/count.vue'
// 使用插件, 用use
Vue.use(VueRouter); // 调用一个这个方法
// 路由的数组
export default new VueRouter({
routes:[
{
path:'/',
name:'count',
component: (resolve) => require(['../components/count.vue'], resolve)
},
{
path:'/hello',
name:'hello',
component:Hello
},
{
path:'/home',
name:'home',
component:Home,
beforeEnter:(to,from,next)=>{
console.log('我进入了Home模板');
console.log(to);
console.log(from);
next();
},
beforeRouteLeave(to, from, next) {
console.log("准备离开路由模板");
next();
}
}
]
}) | 13883bd71a2402662672fd1be9da857adbeed8ff | [
"JavaScript",
"Markdown"
] | 3 | JavaScript | wangjuaneisr/webpack4-vuex | eefb707d64daf49ae136bcd197e0eec5b3161c6f | 9f85f753cdecf910c880dc4a4df15fbd36e984cb |
refs/heads/master | <file_sep>import { Component, OnInit } from '@angular/core';
import { AgmCoreModule } from '@agm/core';
import { MapService } from '../map.service';
import { Marker } from '../marker';
import { Promise } from 'q';
import { LogService } from '../shared/log.service';
import { HttpClient } from '@angular/common/http';
import * as _ from 'lodash';
import Map from 'ol/map';
import Tile from 'ol/layer/tile';
import XYZ from 'ol/source/xyz';
import View from 'ol/view';
import proj from 'ol/proj';
import Feature from 'ol/feature';
import Point from 'ol/geom/point';
import VectorLayer from 'ol/layer/vector';
import VectorSource from 'ol/source/vector';
import Icon from 'ol/style/icon';
import Style from 'ol/style/style';
import Circle from 'ol/style/circle'
import Fill from 'ol/style/fill';
@Component({
selector: 'app-map-view',
templateUrl: './map-view.component.html',
styleUrls: ['./map-view.component.css']
})
export class MapViewComponent implements OnInit {
title: string = 'Streetlights';
lat: number = 39.090265;
lng: number = -94.576062;
zoom: number = 6;
minZoom: number = 8;
mapDraggable: boolean = false;
streetlightMarkers: Marker[];
filteredStreetlightMarkers: Marker[];
nema: boolean = false;
wireless: boolean = false;
fixtureMfg: string;
filters = {};
constructor(private logger: LogService, private mapService: MapService, private http: HttpClient) {
this.streetlightMarkers = [];
this.filteredStreetlightMarkers = [];
this.http = http;
}
ngOnInit() {
var self = this;
this.http.get('https://my.api.mockaroo.com/streetlights.json?key=08931ac0').subscribe(function(data){
self.renderMap(data);
});
//this.getStreetlights();
//this.applyFilters(this.renderMap);
}
getStreetlights() {
this.mapService.getStreetlights().subscribe(function(data){
console.log(data);
});
}
renderMap(streetlightMarkers) {
var features = [];
var vectorSource = new VectorSource();
var vectorLayer = new VectorLayer({
source: vectorSource,
style: new Style({
image: new Circle({
radius: 8,
fill: new Fill({
color: 'yellow'
})
})
})
});
let map = new Map({
target: 'map',
layers: [
new Tile({
source: new XYZ({
url: 'http://basemaps.cartocdn.com/dark_all/{z}/{x}/{y}@2x.png'
})
}),
vectorLayer
],
view: new View({
center: proj.fromLonLat([-94.576062, 39.090265]),
zoom: 12
})
});
console.log(streetlightMarkers);
if(streetlightMarkers){
streetlightMarkers.map(function(item){
vectorSource.addFeature(
new Feature({
geometry: new Point(proj.fromLonLat([item.lon, item.lat]))
}));
})
}
}
private applyFilters(callback) {
console.log(this.filters);
this.filteredStreetlightMarkers = _.filter(this.streetlightMarkers, _.conforms(this.filters) );
callback(this.filteredStreetlightMarkers);
}
/// filter property by equality to rule
filterExact(property: string, rule: any) {
if (rule === '' || !rule) {
this.removeFilter(property);
this.applyFilters(this.renderMap);
} else {
this.filters[property] = val => val == rule
this.applyFilters(this.renderMap);
}
}
/// filter numbers greater than rule
filterGreaterThan(property: string, rule: number) {
this.filters[property] = val => val > rule;
this.applyFilters(this.renderMap);
}
/// filter properties that resolve to true
filterBoolean(property: string, rule: boolean) {
if (!rule) this.removeFilter(property)
else {
this.filters[property] = val => val;
this.applyFilters(this.renderMap);
}
}
/// removes filter
removeFilter(property: string) {
delete this.filters[property]
this[property] = null;
this.applyFilters(this.renderMap);
}
}
| 895f90a56205bed0fd175108b2747d73a90cd50c | [
"TypeScript"
] | 1 | TypeScript | zachflanders/streetlights-openlayers | 70bb65f3fc8a89987798b2bf7424529852907fdd | 82f566a191462c98bd646b671b5627ba31994d78 |
refs/heads/master | <file_sep>package com.hoanghiep.webtravel.repo;
import com.hoanghiep.webtravel.model.Role;
import org.springframework.data.repository.CrudRepository;
/**
* Created by hoanghiep on 6/18/17.
*/
public interface RoleRepository extends CrudRepository<Role, Integer> {
Role findByName(String name);
}
| 5289371018284b18fed0c2181fa7938d39acaaeb | [
"Java"
] | 1 | Java | TranHoangHiep/Project | 1f1b6dc6720c272e6798b819a442bf3335b66a27 | 61c764cfcfef8280273b8ac4c04f1533556537b2 |
refs/heads/master | <file_sep>mod lib;
use lib::*;
use std::time::{Duration, SystemTime};
fn main() {
let mut input = vec![[4, 1], [2, 3], [1, 2], [5, 3], [4, 4]];
let now = SystemTime::now();
for _ in 0..1000000 {
let mut to_sort = input.clone();
sort(&mut to_sort);
}
match now.elapsed() {
Ok(elapsed) => {
println!("CSorting: {}", elapsed.as_nanos());
}
Err(e) => {
// an error occurred!
println!("Error: {:?}", e);
}
}
let now = SystemTime::now();
for _ in 0..1000000 {
let mut to_sort = input.clone();
to_sort.sort_by(|p1, p2| {
if p1[0] < p2[0] {
std::cmp::Ordering::Less
} else if p1[0] == p2[0] {
if p1[1] < p2[1] {
std::cmp::Ordering::Less
} else if p1[1] == p2[1] {
std::cmp::Ordering::Equal
} else {
std::cmp::Ordering::Greater
}
} else {
std::cmp::Ordering::Greater
}
});
}
match now.elapsed() {
Ok(elapsed) => {
// it prints '2'
println!("Primitive Sorting: {}", elapsed.as_nanos());
}
Err(e) => {
// an error occurred!
println!("Error: {:?}", e);
}
}
// println!("{:?} {:?}", input, input2);
}
<file_sep>pub fn sort(pairs: &mut Vec<[i64; 2]>) {
let len = pairs.len();
let min = pairs.iter().min_by_key(|p| p[0]).unwrap()[0];
// let max = pairs.iter().max_by_key(|p| p[0]).unwrap()[0];
let mut hist = hist(&pairs);
let hist_copy = hist.clone();
let start = cum(&hist);
let mut objects = Vec::new();
objects.resize(len, 0);
for i in 0..len {
let pos = start[(pairs[i][0] - min) as usize];
let remaining = hist[(pairs[i][0] - min) as usize];
hist[(pairs[i][0] - min) as usize] -= 1;
objects[(pos + remaining - 1) as usize] = pairs[i][1];
}
for i in 0..(len - 1) {
sort_with_index(&mut objects, start[i], start[i + 1]);
}
let mut j = 0;
let mut l = 0;
let mut last = -1;
for i in 0..len {
let val = hist_copy[i];
let s = min + i as i64;
for k in 0..val {
let o = objects[l];
l += 1;
if k == 0 || o != last {
pairs[j] = [s, o];
j += 1;
}
last = o;
}
}
pairs.resize(j as usize, [0, 0]);
}
fn sort_with_index(v: &mut Vec<i64>, from: i64, to: i64) {
for i in from..to {
let mut j = i;
let tmp = v[i as usize];
while j > from && v[(j - 1) as usize] > tmp {
v[j as usize] = v[(j - 1) as usize];
j -= 1;
}
v[j as usize] = tmp;
}
}
fn hist(pairs: &Vec<[i64; 2]>) -> Vec<i64> {
let mut hist = Vec::new();
hist.resize(pairs.len(), 0);
for pair in pairs {
hist[pair[0] as usize - 1] += 1;
}
hist
}
fn cum(hist: &Vec<i64>) -> Vec<i64> {
let mut cum = Vec::new();
cum.resize(hist.len(), 0);
for (i, _e) in hist.iter().enumerate() {
if i != 0 {
cum[i] = cum[i - 1] + hist[i - 1];
}
}
cum
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let mut input = vec![[4, 1], [2, 3], [1, 2], [5, 3], [4, 4]];
sort(&mut input);
let output = vec![[1, 2], [2, 3], [4, 1], [4, 4], [5, 3]];
assert_eq!(input, output);
}
#[test]
fn dupplicate() {
let mut input = vec![[4, 4], [2, 3], [1, 2], [5, 3], [4, 4]];
let output = vec![[1, 2], [2, 3], [4, 4], [5, 3]];
sort(&mut input);
assert_eq!(input, output);
}
#[test]
fn already_sorted() {
let mut input = vec![[1, 2], [2, 3], [4, 1], [4, 4], [5, 3]];
let output = vec![[1, 2], [2, 3], [4, 1], [4, 4], [5, 3]];
sort(&mut input);
assert_eq!(input, output);
}
#[test]
fn test_hist() {
assert_eq!(
hist(&vec![[4, 1], [2, 3], [1, 2], [5, 3], [4, 4]]),
vec![1, 1, 0, 2, 1]
)
}
#[test]
fn test_cum() {
assert_eq!(cum(&vec![1, 1, 0, 2, 1]), vec![0, 1, 2, 2, 4])
}
}
| 93f42b2fe4e8e1314acdcc954bedcce8d6bb0129 | [
"Rust"
] | 2 | Rust | tbourg/sorting_algo | 90497d20cb61e5d7b4a2d55bdda90b1be047e574 | 6478e2a08aba04952fc392755f6781db58f3d224 |
refs/heads/master | <file_sep><?php
class Database {
protected $db_name = "mysql:host=localhost;dbname=arsip;";
protected $username = "root";
protected $password = "pwd";
public $db;
public function __construct(){
$this->db= new PDO(
$this->db_name,
$this->username,
$this->password
);
}
public function getAll(){
$data = $this->db->prepare("SELECT * FROM ?");
$data->execute((array)$this->table);
while ( $row = $data->fetch(PDO::FETCH_OBJ)){
echo $row;
}
return $row;
}
}
<file_sep><?php
echo "anu";
require_once(__DIR__.'/database.php');
require_once(__DIR__.'/sms_gate.php');
class Arsip extends Database{
protected $table='karpeg';
protected $findParam='id';
protected $findParamD='kode = :kode';
protected $fill = array('nip','nama','pangkat','phone');
public $request=array();
public function __construct($request=null){
parent::__construct();
$this->setRequest($request);
}
public function add(){
$add = $this->db->prepare("INSERT INTO ".$this->table."(nip,nama,pangkat,phone) VALUES(:nip,:nama,:pangkat,:phone)");
foreach ($this->fill as $key => $value) {
if ( $value =='phone'){
$phone = $this->request[$value];
}
$add->bindParam(":".$value, $this->request[$value],PDO::PARAM_STR);
}
$add->execute();
$arsip = $add->fetch(PDO::FETCH_OBJ);
$msg = "Berkas Anda Telah Dikirim, Terima Kasih";
if ($add->rowCount() > 0){
$sms = new SMS($phone, $msg);
$sms->send();
}
}
public function setRequest($request){
$this->request=$request;
}
public function update(){
$edit = $this->db->prepare("UPDATE ".$this->table." set nip=:nip, nama=:nama, pangkat=:pangkat, phone=:phone WHERE id=:oldid");
foreach ($this->fill as $key => $value) {
$edit->bindParam(":".$value, $this->request[$value],PDO::PARAM_STR);
}
$edit->bindParam(":oldid", $this->request['oldid'],PDO::PARAM_INT);
$edit->execute();
return $edit->fetch(PDO::FETCH_OBJ);
}
}
<file_sep><?php
error_reporting(1);
echo 1;
require_once('database.php');
class Karpeg extends Database{
protected $table = "karpeg";
protected $fill = ['nip','pangkat','phone','nama'];
public function __construct($request){
$this->request = $request;
parent::construct();
}
public function add(){
$d = $this->db->prepare("
INSERT INTO $this->table(nama,nip,pangkat,phone)
VALUES(:nama,:nip,:pangkat,:phone)
");
foreach($this->fill as $v){
$d->bindParam(':'.$v , $this->request[$v], PDO::PARAM_STR);
}
$d->execute();
return $d->fetch(PDO::FETCH_OBJ);
}
}
$c = new Karpeg($_REQUEST);
echo var_dump($c);
<file_sep>-- phpMyAdmin SQL Dump
-- version 4.5.4.1deb2ubuntu2
-- http://www.phpmyadmin.net
--
-- Host: localhost
-- Generation Time: Oct 13, 2017 at 12:43 AM
-- Server version: 5.7.19-0ubuntu0.16.04.1
-- PHP Version: 5.6.31-4+ubuntu16.04.1+deb.sury.org+4
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `gammu2`
--
-- --------------------------------------------------------
--
-- Table structure for table `gammu`
--
CREATE TABLE `gammu` (
`Version` int(11) NOT NULL DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `gammu`
--
INSERT INTO `gammu` (`Version`) VALUES
(16);
-- --------------------------------------------------------
--
-- Table structure for table `inbox`
--
CREATE TABLE `inbox` (
`UpdatedInDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`ReceivingDateTime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`Text` text NOT NULL,
`SenderNumber` varchar(20) NOT NULL DEFAULT '',
`Coding` enum('Default_No_Compression','Unicode_No_Compression','8bit','Default_Compression','Unicode_Compression') NOT NULL DEFAULT 'Default_No_Compression',
`UDH` text NOT NULL,
`SMSCNumber` varchar(20) NOT NULL DEFAULT '',
`Class` int(11) NOT NULL DEFAULT '-1',
`TextDecoded` text NOT NULL,
`ID` int(10) UNSIGNED NOT NULL,
`RecipientID` text NOT NULL,
`Processed` enum('false','true') NOT NULL DEFAULT 'false'
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `inbox`
--
INSERT INTO `inbox` (`UpdatedInDB`, `ReceivingDateTime`, `Text`, `SenderNumber`, `Coding`, `UDH`, `SMSCNumber`, `Class`, `TextDecoded`, `ID`, `RecipientID`, `Processed`) VALUES
('2017-08-18 16:05:51', '2017-07-21 22:27:36', '004800750062002E0020004F00750074006C00650074002F0047007200610070006100720069002F00500065006E006A00750061006C002000750074006B002000640061007000610074006B0061006E0020003100300020004400690067006900740020004B004F00440045005F00490044002000640061006E00200061006B0074006900660061007300690020006C006100790061006E0061006E00200070007200610062006100790061007200200041006E00640061002C002000640067006E0020006B006900720069006D00200053004D00530020006B006500740069006B00200050004F005300490044003C00730070006100730069003E003100300020004400690067006900740020004B004F00440045005F004900440020006B006900720069006D0020006B', '1818', 'Default_No_Compression', '0500033A0201', '+6281100000', 1, 'Hub. Outlet/Grapari/Penjual utk dapatkan 10 Digit KODE_ID dan aktifasi layanan prabayar Anda, dgn kirim SMS ketik POSID<spasi>10 Digit KODE_ID kirim k', 1, '', 'false'),
('2017-08-18 16:05:51', '2017-07-21 22:27:37', '00650020003100380031003800200028004300740068003A00200050004F005300490044002000310032003300340035003600370038003900300029', '1818', 'Default_No_Compression', '0500033A0202', '+6281100000', 1, 'e 1818 (Cth: POSID 1234567890)', 2, '', 'false'),
('2017-08-18 16:05:51', '2017-07-22 02:24:21', '004E006F006D006F007200200041006E00640061002000740065006C00610068002000640069006900730069002000640065006E00670061006E0020006E006F006D006F007200200053004E00200037003000370032003200310030003200340032003000300031003100320035003100350031002E', 'MKIOS', 'Default_No_Compression', '', '+6281100000', -1, 'Nomor Anda telah diisi dengan nomor SN 7072210242001125151.', 3, '', 'false'),
('2017-08-18 16:05:51', '2017-07-22 02:25:46', '0054006500720069006D00610020006B0061007300690068002000740065006C006100680020006D0065006C0061006B0075006B0061006E002000690073006900200075006C0061006E0067002E002000420065006C0069002000500061006B0065007400200043006F006D0062006F0020004D0061006E006900610020003100300030006D006E0074002B003500300053004D0053002B0035004D0042002000640067002000630061007200610020006B006500740069006B00200043004D0020004F004E0020006B006900720069006D0020006B006500200038003900390039002E0020004800610072006700610020006D0075006C00610069002000520070002000320037003500300020006B00650063002E0020005000610070007500610020', 'TELKOMSEL', 'Default_No_Compression', '0807010300032A0201', '+6281100000', 1, 'Terima kasih telah melakukan isi ulang. Beli Paket Combo Mania 100mnt+50SMS+5MB dg cara ketik CM ON kirim ke 8999. Harga mulai Rp 2750 kec. Papua ', 4, '', 'false'),
('2017-08-18 16:05:51', '2017-07-22 02:25:46', '004D0061006C0075006B0075002E00200049006E0066006F003A003100380038', 'TELKOMSEL', 'Default_No_Compression', '0807010300032A0202', '+6281100000', 1, 'Maluku. Info:188', 5, '', 'false'),
('2017-08-18 16:46:20', '2017-08-18 15:46:14', '005000650072006D0069006E007400610061006E00200043006F006C006C00650063007400200053004D005300200041006E006400610020006400690074006F006C0061006B0020002B0036003200380031003300350035003600390037003500350030002E', '88330', 'Default_No_Compression', '', '+6281100000', -1, 'Permintaan Collect SMS Anda ditolak +6281355697550.', 6, '', 'false');
-- --------------------------------------------------------
--
-- Table structure for table `karpeg`
--
CREATE TABLE `karpeg` (
`id` int(10) UNSIGNED NOT NULL,
`nama` varchar(50) NOT NULL,
`nip` varchar(20) NOT NULL,
`pangkat` varchar(50) NOT NULL,
`phone` varchar(20) NOT NULL,
`status` enum('dikirim','diproses','dijawab','') NOT NULL DEFAULT 'dikirim',
`dibuat` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `karpeg`
--
INSERT INTO `karpeg` (`id`, `nama`, `nip`, `pangkat`, `phone`, `status`, `dibuat`) VALUES
(1, '23212', '1213', '1231', '234', 'dikirim', '2017-08-18 20:15:20'),
(2, '23212', '1213', '1231', '234', 'dikirim', '2017-08-18 20:30:43'),
(3, 'Akram', '08821', 'GOLONGAN 2', '081355697550', 'dikirim', '2017-08-18 20:40:59'),
(4, 'Akram EditC', '08821', 'GOLONGAN 2', '081355697550', 'dikirim', '2017-08-18 20:50:52'),
(5, 'adysurya', '19283198', 'i', '081355697550', 'dikirim', '2017-10-12 15:10:28'),
(6, 'Akram', '123', 'asdf', '+6282191971283', 'dikirim', '2017-10-12 17:35:44'),
(7, 'Akram', '123', 'asdf', '+6282191971283', 'dikirim', '2017-10-12 17:36:41'),
(8, 'Akram', '123', 'asdf', '+6282191971283', 'dikirim', '2017-10-12 17:37:14'),
(9, 'surya', 'surya', 'surya', '+6281355697550', 'dikirim', '2017-10-12 17:37:57');
-- --------------------------------------------------------
--
-- Table structure for table `outbox`
--
CREATE TABLE `outbox` (
`UpdatedInDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`InsertIntoDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`SendingDateTime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`SendBefore` time NOT NULL DEFAULT '23:59:59',
`SendAfter` time NOT NULL DEFAULT '00:00:00',
`Text` text,
`DestinationNumber` varchar(20) NOT NULL DEFAULT '',
`Coding` enum('Default_No_Compression','Unicode_No_Compression','8bit','Default_Compression','Unicode_Compression') NOT NULL DEFAULT 'Default_No_Compression',
`UDH` text,
`Class` int(11) DEFAULT '-1',
`TextDecoded` text NOT NULL,
`ID` int(10) UNSIGNED NOT NULL,
`MultiPart` enum('false','true') DEFAULT 'false',
`RelativeValidity` int(11) DEFAULT '-1',
`SenderID` varchar(255) DEFAULT NULL,
`SendingTimeOut` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`DeliveryReport` enum('default','yes','no') DEFAULT 'default',
`CreatorID` text NOT NULL,
`Retries` int(3) DEFAULT '0',
`Priority` int(11) DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
-- --------------------------------------------------------
--
-- Table structure for table `outbox_multipart`
--
CREATE TABLE `outbox_multipart` (
`Text` text,
`Coding` enum('Default_No_Compression','Unicode_No_Compression','8bit','Default_Compression','Unicode_Compression') NOT NULL DEFAULT 'Default_No_Compression',
`UDH` text,
`Class` int(11) DEFAULT '-1',
`TextDecoded` text,
`ID` int(10) UNSIGNED NOT NULL DEFAULT '0',
`SequencePosition` int(11) NOT NULL DEFAULT '1'
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
-- --------------------------------------------------------
--
-- Table structure for table `phones`
--
CREATE TABLE `phones` (
`ID` text NOT NULL,
`UpdatedInDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`InsertIntoDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`TimeOut` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`Send` enum('yes','no') NOT NULL DEFAULT 'no',
`Receive` enum('yes','no') NOT NULL DEFAULT 'no',
`IMEI` varchar(35) NOT NULL,
`IMSI` varchar(35) NOT NULL,
`NetCode` varchar(10) DEFAULT 'ERROR',
`NetName` varchar(35) DEFAULT 'ERROR',
`Client` text NOT NULL,
`Battery` int(11) NOT NULL DEFAULT '-1',
`Signal` int(11) NOT NULL DEFAULT '-1',
`Sent` int(11) NOT NULL DEFAULT '0',
`Received` int(11) NOT NULL DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `phones`
--
INSERT INTO `phones` (`ID`, `UpdatedInDB`, `InsertIntoDB`, `TimeOut`, `Send`, `Receive`, `IMEI`, `IMSI`, `NetCode`, `NetName`, `Client`, `Battery`, `Signal`, `Sent`, `Received`) VALUES
('', '2017-08-18 19:19:27', '2017-08-18 19:18:26', '2017-08-18 19:19:37', 'yes', 'yes', '867989012656266', '510104332745971', '510 10', 'TELKOMSEL', 'Gammu 1.38.4, Linux, kernel 4.4.0-92-generic (#115-Ubuntu SMP Thu Aug 10 09:04:33 UTC 2017), GCC 5.4', 0, 51, 0, 0);
-- --------------------------------------------------------
--
-- Table structure for table `sentitems`
--
CREATE TABLE `sentitems` (
`UpdatedInDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`InsertIntoDB` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`SendingDateTime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`DeliveryDateTime` timestamp NULL DEFAULT NULL,
`Text` text NOT NULL,
`DestinationNumber` varchar(20) NOT NULL DEFAULT '',
`Coding` enum('Default_No_Compression','Unicode_No_Compression','8bit','Default_Compression','Unicode_Compression') NOT NULL DEFAULT 'Default_No_Compression',
`UDH` text NOT NULL,
`SMSCNumber` varchar(20) NOT NULL DEFAULT '',
`Class` int(11) NOT NULL DEFAULT '-1',
`TextDecoded` text NOT NULL,
`ID` int(10) UNSIGNED NOT NULL DEFAULT '0',
`SenderID` varchar(255) NOT NULL,
`SequencePosition` int(11) NOT NULL DEFAULT '1',
`Status` enum('SendingOK','SendingOKNoReport','SendingError','DeliveryOK','DeliveryFailed','DeliveryPending','DeliveryUnknown','Error') NOT NULL DEFAULT 'SendingOK',
`StatusError` int(11) NOT NULL DEFAULT '-1',
`TPMR` int(11) NOT NULL DEFAULT '-1',
`RelativeValidity` int(11) NOT NULL DEFAULT '-1',
`CreatorID` text NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `sentitems`
--
INSERT INTO `sentitems` (`UpdatedInDB`, `InsertIntoDB`, `SendingDateTime`, `DeliveryDateTime`, `Text`, `DestinationNumber`, `Coding`, `UDH`, `SMSCNumber`, `Class`, `TextDecoded`, `ID`, `SenderID`, `SequencePosition`, `Status`, `StatusError`, `TPMR`, `RelativeValidity`, `CreatorID`) VALUES
('2017-08-18 16:05:54', '2017-08-18 14:58:29', '2017-08-18 16:05:54', NULL, '006800610069', '+6281355697550', 'Default_No_Compression', '', '+6281100000', -1, 'hai', 1, '', 1, 'SendingOKNoReport', -1, 40, 255, 'Gammu 1.38.4'),
('2017-08-18 16:09:53', '2017-08-18 16:07:26', '2017-08-18 16:09:53', NULL, '006800610069', '+6281355697550', 'Default_No_Compression', '', '+6281100000', -1, 'hai', 3, '', 1, 'SendingOKNoReport', -1, 42, 255, 'Gammu 1.38.4'),
('2017-08-18 16:11:23', '2017-08-18 16:11:04', '2017-08-18 16:11:23', NULL, '006800610069', '+6281355697550', 'Default_No_Compression', '', '+6281100000', -1, 'hai', 4, '', 1, 'SendingOKNoReport', -1, 43, 255, 'Gammu 1.38.4'),
('2017-08-18 16:16:26', '2017-08-18 15:03:03', '2017-08-18 16:16:26', NULL, '006800610069', '+6281355697550', 'Default_No_Compression', '', '+6281100000', -1, 'hai', 2, '', 1, 'SendingOKNoReport', -1, 44, 255, 'Gammu 1.38.4');
-- --------------------------------------------------------
--
-- Table structure for table `users`
--
CREATE TABLE `users` (
`id` int(10) UNSIGNED NOT NULL,
`username` varchar(50) NOT NULL,
`password` varchar(50) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `users`
--
INSERT INTO `users` (`id`, `username`, `password`) VALUES
(1, 'admin', '<PASSWORD>fc3');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `gammu`
--
ALTER TABLE `gammu`
ADD PRIMARY KEY (`Version`);
--
-- Indexes for table `inbox`
--
ALTER TABLE `inbox`
ADD PRIMARY KEY (`ID`);
--
-- Indexes for table `karpeg`
--
ALTER TABLE `karpeg`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `outbox`
--
ALTER TABLE `outbox`
ADD PRIMARY KEY (`ID`),
ADD KEY `outbox_date` (`SendingDateTime`,`SendingTimeOut`),
ADD KEY `outbox_sender` (`SenderID`(250));
--
-- Indexes for table `outbox_multipart`
--
ALTER TABLE `outbox_multipart`
ADD PRIMARY KEY (`ID`,`SequencePosition`);
--
-- Indexes for table `phones`
--
ALTER TABLE `phones`
ADD PRIMARY KEY (`IMEI`);
--
-- Indexes for table `sentitems`
--
ALTER TABLE `sentitems`
ADD PRIMARY KEY (`ID`,`SequencePosition`),
ADD KEY `sentitems_date` (`DeliveryDateTime`),
ADD KEY `sentitems_tpmr` (`TPMR`),
ADD KEY `sentitems_dest` (`DestinationNumber`),
ADD KEY `sentitems_sender` (`SenderID`(250));
--
-- Indexes for table `users`
--
ALTER TABLE `users`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `username` (`username`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `inbox`
--
ALTER TABLE `inbox`
MODIFY `ID` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=7;
--
-- AUTO_INCREMENT for table `karpeg`
--
ALTER TABLE `karpeg`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=10;
--
-- AUTO_INCREMENT for table `outbox`
--
ALTER TABLE `outbox`
MODIFY `ID` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5;
--
-- AUTO_INCREMENT for table `users`
--
ALTER TABLE `users`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
<file_sep><?php
require_once(__DIR__.'/../vendor/autoload.php');
use telesign\sdk\messaging\MessagingClient;
class SMS {
protected $phone;
protected $customer_id = "6F1EF683-577F-49BD-95D3-E7F0D39CB5E4";
protected $api_key = "<KEY>;
protected $msg_type = 'ARN';
protected $message;
public function __construct($phone , $message){
$this->phone = $phone;
$this->message = $message;
}
public function setPhone($phone){
$this->phone = $phone;
}
public function setMessage($msg){
$this->message = $msg;
}
public function setMsgType($type){
$this->msg_type = $type;
}
public function send(){
$client = new MessagingClient(
$this->customer_id,
$this->api_key
);
$res = $client->message($this->phone, $this->message, $this->msg_type);
print_r($res);
}
}
<file_sep><?php
/**
*
*/
session_start();
require_once('models/database.php');
class Login extends Database
{
protected $username;
protected $password;
protected $request;
protected $table='users';
public function __construct($request)
{
parent::__construct();
$this->request = $this->setUser($request);
$this->setUser($request);
}
public function setUser($request){
if ($request['username'] && $request['password']) {
$this->username = $request['username'];
$this->password = $request['password'];
return array(
'username' => $request['username'],
'password' => md5($request['password']));
}
}
public function checkUser(){
$user = $this->db->prepare("SELECT * FROM ".$this->table." WHERE username=:username AND password=:<PASSWORD>");
$user->execute($this->request);
if ($user->rowCount() > 0) {
$this->makeSession();
header('location:admin.php');
}
else echo "<script>alert('Login Gagal');</script>";
}
public function makeSession(){
foreach ($this->request as $key => $value) {
$_SESSION[$key] = $value;
}
$_SESSION['user_auth'] = true;
}
}
if ($_SERVER['REQUEST_METHOD'] === 'POST') {
$auth = new Login($_REQUEST);
$auth->checkUser();
}
if ($_SESSION['user_auth']) {
header('location:index.php');
}
?>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>Login</title>
<!-- Bootstrap core CSS -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/signin.css" rel="stylesheet">
</head>
<body>
<div class="container">
<form class="form-signin" method="post" name="login">
<h2 class="form-signin-heading">Please sign in</h2>
<label for="inputusername" class="sr-only">Username</label>
<input type="username" id="inputusername" class="form-control" placeholder="username" name="username" required autofocus>
<label for="inputPassword" class="sr-only">Password</label>
<input type="password" id="inputPassword" class="form-control" placeholder="<PASSWORD>" name="password" required>
<div class="checkbox">
<label>
<input type="checkbox" value="remember-me"> Remember me
</label>
</div>
<button class="btn btn-lg btn-primary btn-block" type="submit">Sign in</button>
</form>
</div> <!-- /container -->
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
<script src="js/ie10-viewport-bug-workaround.js"></script>
</body>
</html>
| ca13970d02476d57aa47c628f89f6bdbfc120b14 | [
"SQL",
"PHP"
] | 6 | PHP | r17x/AKRAM | af52a68cf19c0fed76d9c74a068555fe137b3b6b | b5aab0bc3681aa66292d642bbe58cafe500f8ac7 |
refs/heads/master | <repo_name>ktoroshchin/tictac<file_sep>/README.md
Play [TIC-TAC-TOE](https://tictac-ef8e4.web.app/).
## Available Scripts
This is 2 player tic-tac-toe game, where 2 people can play from different computers or browser windows
## Stack
1. React
2. TypeScript
3. Node
4. Firebase
5. Semantic-UI
## Run locally
1. Clone the repo
2. run 'npm install'
3. run 'npm start'<file_sep>/src/constants.ts
export enum PlayerName {
PLAYER1 = 'player1',
PLAYER2 = 'player2',
}
export enum CellValue {
X = 'X',
O = 'O',
}<file_sep>/src/FirebaseApi.ts
import * as firebase from 'firebase'
import { Dispatch, SetStateAction } from 'react'
import { IPlayer } from './App'
import { PlayerName } from './constants'
import { firebaseConfig } from './firebaseConfig'
class FirebaseApi {
private app: firebase.app.App
private database: firebase.database.Database
constructor() {
this.app = firebase.initializeApp(firebaseConfig)
this.database = this.app.database()
}
public getBoard(updateBoard: React.Dispatch<React.SetStateAction<string[]>>): void {
this.database.ref('board').on('value', (snapshot) => {
updateBoard(Object.values(snapshot.val()))
})
}
public updateBoard(index: number, value: string): void {
this.database.ref('board').update({
[index]: value
})
}
public resetBoard(board: string[]): void {
let newBoard: { [key: number]: string } = {}
board.forEach((cell, index) => (newBoard[index] = ''))
this.database.ref('board').update({ ...newBoard })
}
public getPlayer(playerName: string, setPlayer: Dispatch<SetStateAction<IPlayer | undefined>>): void {
this.database.ref(playerName).on('value', (snapshot) => {
setPlayer(snapshot.val())
})
}
public isPlayerActive(playerName: string, setPlayerStatus: Dispatch<SetStateAction<boolean | undefined>>): void {
this.database.ref(playerName).on('value', (snapshot) => {
setPlayerStatus(snapshot.val().isActive)
})
}
public updatePlayerStatus(playerName: string, status: boolean): void {
this.database.ref(playerName).update({
isActive: status
})
}
public updatePlayerTurn(playerName: string, turn: boolean): void {
this.database.ref(playerName).update({
turn: turn,
})
}
public getPlayerTurn(playerName: string, setTurn: Dispatch<SetStateAction<boolean | undefined>>): void {
this.database.ref(playerName).on('value', (snapshot) => {
setTurn(snapshot.val().turn)
})
}
public resetPlayer(playerName: string): void {
const newPlayer = {
isActive: false,
gamesPlayed: 0,
losses: 0,
ties: 0,
turn: 0,
wins: 0,
}
this.database.ref(playerName).update({ ...newPlayer })
}
public emergencyReset(board: string[]): void {
this.updateGameFullStatus(false)
this.resetPlayer(PlayerName.PLAYER1)
this.resetPlayer(PlayerName.PLAYER2)
this.resetBoard(board)
}
public updateWins(playerName: string, wins: number): void {
this.database.ref(playerName).update({ wins: wins + 1 })
}
public getWins(playerName: string, setWins: Dispatch<SetStateAction<number>>): void {
this.database.ref(playerName).on('value', (snapshot) => {
setWins(snapshot.val().wins)
})
}
public updateTies(ties: number): void {
this.database.ref(PlayerName.PLAYER1).update({ ties: ties + 1 })
this.database.ref(PlayerName.PLAYER2).update({ ties: ties + 1 })
}
public getTies(playerName: string, setTies: Dispatch<SetStateAction<number>>): void {
this.database.ref(playerName).on('value', (snapshot) => {
setTies(snapshot.val().ties)
})
}
public updateLosses(playerName: string, losses: number): void {
this.database.ref(playerName).update({ losses: losses + 1 })
}
public getLosses(playerName: string, setLosses: Dispatch<SetStateAction<number>>): void {
this.database.ref(playerName).on('value', (snapshot) => {
setLosses(snapshot.val().losses)
})
}
public getGameFullStatus(setGameStatus: Dispatch<SetStateAction<boolean>> ): void {
this.database.ref('game').on('value', (snapshot) => {
setGameStatus(snapshot.val().isFull)
})
}
public updateGameFullStatus(status?: boolean): void {
this.database.ref('game').update({ isFull: status })
}
}
export const firebaseApi: FirebaseApi = new FirebaseApi()
| 6a639d1cde6336aff6b972a978d146bafa7caab2 | [
"Markdown",
"TypeScript"
] | 3 | Markdown | ktoroshchin/tictac | b376d351db6dc8ff5d6f097e36aae3c2b26b4d38 | 65204e5a83edc1f9d0743c8f1443aabb9551cb97 |
refs/heads/master | <file_sep>package tech.sree.com.listviewcomplex;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ListView;
import android.widget.Switch;
import android.widget.TextView;
import android.widget.Toast;
public class MainActivity extends AppCompatActivity {
ListView listView ;
String[] Title={"SUNDAY","MONDAY","TUESDAY","WEDNESDAY","THRUSDAY","FRIDAY","SATURDAY","SUNDAY",
"SUNDAY","MONDAY","TUESDAY","WEDNESDAY","THRUSDAY","FRIDAY","SATURDAY","SUNDAY",
"SUNDAY","MONDAY","TUESDAY","WEDNESDAY","THRUSDAY","FRIDAY","SATURDAY","SUNDAY"};
String[] Descpription ={"sun-day","mon-day","tues-day","wednes-day","thurs-day","fri-day","satur-day","sun-day",
"sun-day","mon-day","tues-day","wednes-day","thurs-day","fri-day","satur-day","sun-day",
"sun-day","mon-day","tues-day","wednes-day","thurs-day","fri-day","satur-day","sun-day"};
int[] imagesId={R.drawable.images_001,R.drawable.images_002,R.drawable.images_003,R.drawable.images_004,
R.drawable.images_005,R.drawable.images_006,R.drawable.images_007,R.drawable.images_008,R.drawable.images_001,R.drawable.images_002,R.drawable.images_003,R.drawable.images_004,
R.drawable.images_005,R.drawable.images_006,R.drawable.images_007,R.drawable.images_008,R.drawable.images_001,R.drawable.images_002,R.drawable.images_003,R.drawable.images_004,
R.drawable.images_005,R.drawable.images_006,R.drawable.images_007,R.drawable.images_008};
boolean weekDay[]={true,false,false,true,false,false,true};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
listView = (ListView) findViewById(R.id.listView);
//ListViewArrayAdaptor adaptor = new ListViewArrayAdaptor(this,R.layout.one_row,R.id.Title,Title,Descpription,imagesId);
ListViewBaseAdaptor baseAdaptor = new ListViewBaseAdaptor(this,imagesId,Title,Descpription,true,weekDay);
listView.setAdapter(baseAdaptor);
listView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
TextView title = (TextView) view.findViewById(R.id.Title);
TextView Description = (TextView) view.findViewById(R.id.description);
Switch onoff = (Switch) view.findViewById(R.id.oNoFF);
String string = "Title : "+title.getText().toString()+
"Description : +"+Description.getText().toString()
+"Switch : "+onoff.isChecked();
Toast.makeText(getApplicationContext(), string,Toast.LENGTH_LONG).show();
}
});
}
}
| 44fdacbaa3e3ada335acb51a30ac06ea66b8e390 | [
"Java"
] | 1 | Java | arunkk-git/ListViewComplex | 6c00085ebb5112892ce000b4ac3033792ff338a1 | 8636c8590a00ef1cb0fd852825a0daab1fda469a |
refs/heads/master | <file_sep>const button = document.querySelector('input[type=submit]');
const answer = document.getElementById('answer');
button.addEventListener('click', displayResult);
function displayResult(){
let n1 = Number(document.getElementById('n1').value);
let n2 = Number(document.getElementById('n2').value);
let action = document.querySelector('input[type=radio]:checked').value;
let result;
if (action === "multiply"){
result = multiply(n1, n2);
}
else if (action === "divide"){
result = divide(n1, n2);
}
else if (action === "add"){
result = add(n1, n2);
}
else{
result = subtract(n1, n2);
}
answer.innerHTML = result;
}
function multiply(n1, n2){
return n1 * n2;
}
function divide(n1, n2){
return n1 / n2;
}
function add(n1, n2){
return n1 + n2;
}
function subtract(n1, n2){
return n1 - n2;
}<file_sep>const base = 32;
const conversion = 9.0/5.0;
function myFunction(){
let min = document.getElementById('min').value;
let max = document.getElementById('max').value;
calc(min, max);
}
function calc(min, max){
let newTable = document.createElement("table");
document.body.appendChild(newTable);
let i = min;
while(i <= max){
let newTr = document.createElement('tr');
newTable.appendChild(newTr);
let newTd1 = document.createElement('td');
newTr.appendChild(newTd1);
let newContent = document.createTextNode(i);
newTd1.appendChild(newContent);
newTd2 = document.createElement('td');
newTr.appendChild(newTd2);
newContent = document.createTextNode((i*conversion+base).toFixed(1));
newTd2.appendChild(newContent);
i++;
}
}<file_sep>const price = 6.00;
const studentDiscount = 0.5;
const retiredDiscount = 0.3;
let displayPrice;
function myFunction(){
let age = document.getElementById('age').value;
if (age < 16){
displayPrice = price / 2;
}
else if (age > 60){
displayPrice = 2/3 * price;
}
else{
displayPrice = price;
}
document.getElementById('price').innerHTML = displayPrice;
}<file_sep>const app = document.getElementById('app');
const products = document.getElementById('products');
const addButton = document.querySelector('input[type=submit]');
var pizzaName;
addButton.addEventListener('click', addPizza);
function addPizza(){
pizzaName = document.getElementById('name').value;
products.insertAdjacentHTML('afterBegin', '<div class="product"><div>');
document.querySelector('.product').innerHTML = pizzaName;
}<file_sep>var cars = document.getElementById("cars");
var i = 1;
function addCar(){
// Getting all values
let brand = document.getElementById('brand').value;
let model = document.getElementById('model').value;
let doors = document.getElementById('doors').value;
let price = document.getElementById('price').value;
let row = cars.insertRow(-1);
let j = 'a' + i;
row.setAttribute("id", j);
let cell1 = row.insertCell(0);
cell1.innerHTML = i;
i++;
let cell2 = row.insertCell(1);
cell2.innerHTML = brand;
let cell3 = row.insertCell(2);
cell3.innerHTML = model;
let cell4 = row.insertCell(3);
cell4.innerHTML = doors;
let cell5 = row.insertCell(4);
cell5.innerHTML = price;
document.getElementById(j).addEventListener("click", function(){ alert(document.getElementById(j).textContent); });
}
function removeCar(){
let did = document.getElementById('did').value;
cars.deleteRow(did);
}<file_sep>'use strict';
const app = document.getElementById("app");
const clear = () => app.innerHTML = "";
const removeStorage = () => {localStorage.clear(); createTable()};
const button = document.querySelector("input[type=submit]");
button.addEventListener("click", fn);
const reset = document.querySelector("input[type=reset]");
reset.addEventListener("click", removeStorage);
var i, j;
createTable();
function Guest(id, name, surname){
this.id = id;
this.name = name;
this.surname = surname;
this.save = function(){localStorage.setItem(this.id, this.name.concat(",",this.surname))};
}
function getData(){
let guestList = Array();
for (let key in localStorage) {
if (localStorage.hasOwnProperty(key) && !isNaN(key)) {
let id = key;
let name = localStorage.getItem(key).split(',',1)[0];
let surname = localStorage.getItem(key).replace(name + ",","");
let fullNameObject = new Guest(id, name, surname);
guestList.push([key, fullNameObject]);
}
}
guestList.sort((a, b) => a[0] - b[0]);
if(guestList.length !== 0){
i = guestList[0][0];
j = guestList[(guestList.length - 1)][0];
}
else{
i = 1000;
j = 1000;
}
return guestList;
}
function fn(){
let fullName = document.getElementById("fullname").value;
let location = document.querySelectorAll("input[type=radio]");
let name = fullName.split(" ")[0];
let surname = fullName.replace(name, "").trim();
if(!location[0].checked && !location[1].checked){
app.innerHTML = "Please go back and fill out the fields";
}
else if(location[0].checked){
let id = --i;
let fullNameObject = new Guest(id, name, surname);
fullNameObject.save();
createTable();
}
else{
let id = ++j;
let fullNameObject = new Guest(id, name, surname);
fullNameObject.save();
createTable();
}
}
function createTable(){
clear();
let guestList = getData();
var table = app.appendChild(document.createElement("table"));
let tr = table.appendChild(document.createElement("tr"));
let th1 = tr.appendChild(document.createElement("th"));
th1.appendChild(document.createTextNode("ID"));
let th2 = tr.appendChild(document.createElement("th"));
th2.appendChild(document.createTextNode("Name"));
let th3 = tr.appendChild(document.createElement("th"));
th3.appendChild(document.createTextNode("Surname"));
let th4 = tr.appendChild(document.createElement("th"));
th4.appendChild(document.createTextNode("Remove Button"));
guestList.forEach(function(v, i){
let tr = table.appendChild(document.createElement("tr"));
let td1 = tr.appendChild(document.createElement("td"));
td1.appendChild(document.createTextNode(i));
let td2 = tr.appendChild(document.createElement("td"));
td2.appendChild(document.createTextNode(v[1].name));
let td3 = tr.appendChild(document.createElement("td"));
td3.appendChild(document.createTextNode(v[1].surname));
let td4 = tr.appendChild(document.createElement("td"));
td4.appendChild(document.createTextNode("Remove"));
td4.addEventListener("click", function(){
this.parentNode.parentNode.removeChild(this.parentNode);
localStorage.removeItem(v[0]);
});
});
}<file_sep>//Not in function, thus you can guess multiple times a single answer;
let number = Math.floor(Math.random() * 5);
function myFunction(){
let guess = document.getElementById('guess').value;
console.log(number);
document.getElementById('result').innerHTML = calc(guess, number);
}
function calc(guess, number){
let result;
if (guess == number){
result = 'correct';
}
else{
result = 'incorrect';
}
return result;
}<file_sep><!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>Page Title</title>
<link
rel="stylesheet"
type="text/css"
media="screen"
href="normalize.css"
/>
<link rel="stylesheet" type="text/css" media="screen" href="style.css" />
</head>
<body>
<div class="wrapper">
<header>
<img
src="https://conferences.recruitingdaily.com/wp-content/uploads/sites/12/2017/06/Microsoft-logo.png"
alt="Mictrosoft logo"
/>
<nav>
<a href="#">Home</a>
<a href="#about">About</a>
<a href="#contact">Contact</a>
</nav>
</header>
<main>
<section id="about">
<div class="slideshow">
<img
src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUTExMVFhUVFx8aGRcYGBgZGhgaGxgbGhoYGBgaHSggGBolIB0XITUhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGhAQGi0lHSUtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIALcBEwMBIgACEQEDEQH/xAAcAAABBQEBAQAAAAAAAAAAAAAEAAIDBQYBBwj/xABAEAACAQMCBAMGBAQFAwQDAQABAhEAAyESMQQFQVEiYXEGEzKBkaFCscHwB1LR4RQjM2LxJHKiFYKywkOSoxb/xAAYAQEBAQEBAAAAAAAAAAAAAAABAAIDBP/EACIRAQEAAgICAgMBAQAAAAAAAAABAhEhMRJBA1EiYXEyQv/aAAwDAQACEQMRAD8A8j4Zh<KEY>"
alt="forest"
/>
</div>
<h2>About</h2>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam ac
sem at massa ullamcorper mollis. Vivamus vel viverra nisl, ut
facilisis mauris. Praesent et ultricies sem, ut laoreet arcu. Morbi
et nulla efficitur, efficitur ligula ut, laoreet nulla. Morbi nec
odio posuere leo iaculis tempor. Curabitur mollis elit turpis, in
tempus libero auctor ac. In ut finibus purus. Nullam ut nulla
lobortis, commodo justo et, egestas nunc. Duis pulvinar bibendum
faucibus. Suspendisse in sagittis ante. Duis ut quam congue, dapibus
lacus ornare, rhoncus elit. Pellentesque id commodo urna. Donec
pretium id dolor id lacinia. Aenean condimentum velit id felis
venenatis viverra.
</p>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam ac
sem at massa ullamcorper mollis. Vivamus vel viverra nisl, ut
facilisis mauris. Praesent et ultricies sem, ut laoreet arcu. Morbi
et nulla efficitur, efficitur ligula ut, laoreet nulla. Morbi nec
odio posuere leo iaculis tempor. Curabitur mollis elit turpis, in
tempus libero auctor ac. In ut finibus purus. Nullam ut nulla
lobortis, commodo justo et, egestas nunc. Duis pulvinar bibendum
faucibus. Suspendisse in sagittis ante. Duis ut quam congue, dapibus
lacus ornare, rhoncus elit. Pellentesque id commodo urna. Donec
pretium id dolor id lacinia. Aenean condimentum velit id felis
venenatis viverra.
</p>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam ac
sem at massa ullamcorper mollis. Vivamus vel viverra nisl, ut
facilisis mauris. Praesent et ultricies sem, ut laoreet arcu. Morbi
et nulla efficitur, efficitur ligula ut, laoreet nulla. Morbi nec
odio posuere leo iaculis tempor. Curabitur mollis elit turpis, in
tempus libero auctor ac. In ut finibus purus. Nullam ut nulla
lobortis, commodo justo et, egestas nunc. Duis pulvinar bibendum
faucibus. Suspendisse in sagittis ante. Duis ut quam congue, dapibus
lacus ornare, rhoncus elit. Pellentesque id commodo urna. Donec
pretium id dolor id lacinia. Aenean condimentum velit id felis
venenatis viverra.
</p>
</section>
<section id="contact">
<h2>Contact</h2>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam ac
sem at massa ullamcorper mollis. Vivamus vel viverra nisl, ut
facilisis mauris. Praesent et ultricies sem, ut laoreet arcu. Morbi
et nulla efficitur, efficitur ligula ut, laoreet nulla. Morbi nec
odio posuere leo iaculis tempor. Curabitur mollis elit turpis, in
tempus libero auctor ac. In ut finibus purus. Nullam ut nulla
lobortis, commodo justo et, egestas nunc. Duis pulvinar bibendum
faucibus. Suspendisse in sagittis ante. Duis ut quam congue, dapibus
lacus ornare, rhoncus elit. Pellentesque id commodo urna. Donec
pretium id dolor id lacinia. Aenean condimentum velit id felis
venenatis viverra.
</p>
</section>
</main>
</div>
</body>
</html>
<file_sep>function myFunction(){
let age = document.getElementById('age').value;
document.getElementById('result').innerHTML = calc(age);
}
function calc(age){
let result;
if (age >= 20){
result = 150;
}
else if (age >= 10){
result = 100;
}
else{
result = 50;
}
return result;
}<file_sep>function myFunction(){
let rows = document.getElementById('rows').value;
calc(rows);
}
function calc(rows){
let node = document.createElement("div");
document.body.appendChild(node);
for(i=0; i <= rows; i++){
for(a=0; a < i; a++){
let content = document.createTextNode('+');
node.appendChild(content);
}
let newBrake = document.createElement("br");
node.appendChild(newBrake);
}
} | 21bb79822a4d8f6a97274c28e4fb84c14c4ea9e0 | [
"JavaScript",
"HTML"
] | 10 | JavaScript | linapilipa/CodeAcademyLT | b710c8859bc2e8ddbdbe0a13c875379ff4e7288b | e743ecca1ba0ec34fff3c4250d465de66a08bdf0 |
refs/heads/master | <repo_name>jessie110/codefairies-homework1<file_sep>/homework2.go
package main
import (
"fmt"
)
var fairyAnimals = []string{"fly", "spider", "bird", "cat", "dog", "cow", "horse"}
var swallowAnimal = map[string]string{
"spider": "That wriggled and wiggled and tickled inside her.",
"bird": "How absurd to swallow a bird.",
"cat": "Fancy that to swallow a cat!",
"dog": "What a hog, to swallow a dog!",
"cow": "I don't know how she swallowed a cow!",
"horse": "...She's dead, of course!",
}
var animalsMap = map[string]int{
"fly": 0,
"spider": 1,
"bird": 2,
"cat": 3,
"dog": 4,
"cow": 5,
"horse": 6,
}
var animalsMap2 = map[int]string{
0: "fly",
1: "spider",
2: "bird",
3: "cat",
4: "dog",
5: "cow",
6: "horse",
}
func main() {
story := getSongs(fairyAnimals)
fmt.Println(story)
}
func getSongs(animals []string) []string {
var songs []string
for _, animal := range animals {
song := getSong(animal)
songs = append(songs, song)
}
return songs
}
func catchAnimal(animal string) string {
var returnValues string
if animalsMap[animal] != 0 && animalsMap[animal] != 6 {
for i := animalsMap[animal]; i > 0; i-- {
returnValue := fmt.Sprintf("She swallowed the %s to catch the %s,\n", animalsMap2[i], animalsMap2[i-1])
returnValues += returnValue
}
}
return returnValues
}
func getSong(animal string) string {
var song string
song1 := "There was an old lady who swallowed a " + animal + ". "
song2 := swallowAnimal[animal]
song3 := catchAnimal(animal)
song4 := "I don't know why she swallowed a fly - perhaps she'll die!"
if animal == "horse" {
song = fmt.Sprintf("%s\n %s\n", song1, song2)
} else if animal == "fly" {
song = fmt.Sprintf("%s\n %s\n", song1, song4)
} else {
song = fmt.Sprintf("\n%s\n%s\n%s\n%s\n", song1, song2, song3, song4)
}
return song
}
<file_sep>/Jessie_test.go
package main
import (
"testing"
)
func TestGetCertainScoreReports(t *testing.T) {
reports := getCertainScoreReports(schoolReports, certainScore)
if len(reports) != 3 {
t.Error("test failed")
}
for _, report := range reports {
if report.score <= 59 {
t.Error("test failed")
} else if report.score > 100 {
t.Error("test failed")
} else if report.score < 0 {
t.Error("test failed")
}
}
}
func TestGetCertainClassReports(t *testing.T) {
reports := getCertainClassReports(schoolReports, certainClass)
if len(reports) != 2 {
t.Error("test failed")
}
for _, report := range reports {
if report.class != "two" {
t.Error("test failed")
}
}
}
func TestGetCertainNameReports(t *testing.T) {
reports := getCertainNameReports(schoolReports, certainNamelen)
if len(reports) != 1 {
t.Error("test failed")
}
for _, report := range reports {
if len(report.name) <= 10 {
t.Error("test failed")
}
}
}
<file_sep>/homework2_test.go
package main
import (
"testing"
)
func TestCatchAnimal(t *testing.T) {
fly := catchAnimal("fly")
if len(fly) != 0 {
t.Error("test failed")
}
horse := catchAnimal("horse")
if len(horse) != 0 {
t.Error("test failed")
}
}
<file_sep>/Jessie.go
//写三个函数,给定一个[]Score,找出:找出某个特定班级的,找出名字长度超过10的
package main
import (
"fmt"
)
type score struct {
name string
class string
score int
}
const (
certainScore = 59
certainClass = "two"
certainNamelen = 10
)
var schoolReports = []score{
{"tom", "one", 40},
{"jerry", "one", 50},
{"john", "two", 85},
{"jessie.xie", "two", 100},
{"teddddddddddd", "three", 60},
}
func main() {
a := getCertainScoreReports(schoolReports, certainScore)
b := getCertainClassReports(schoolReports, certainClass)
c := getCertainNameReports(schoolReports, certainNamelen)
fmt.Println(a, b, c)
}
//分数大于59
func getCertainScoreReports(reports []score, condition int) []score {
var returnReports []score
for _, report := range reports {
if report.score > condition {
returnReports = append(returnReports, report)
}
}
return returnReports
}
//某个特定班级的
func getCertainClassReports(reports []score, condition string) []score {
var returnReports []score
for _, report := range reports {
if report.class == condition {
returnReports = append(returnReports, report)
}
}
return returnReports
}
//找出名字长度超过10的
func getCertainNameReports(reports []score, condition int) []score {
var returnReports []score
for _, report := range reports {
if len(report.name) > condition {
returnReports = append(returnReports, report)
}
}
return returnReports
}
// func getCertainReport(reports []score, selector interface{}, condition interface{}) []score {
// var returnReports []score
// for _, report := range reports {
// temp := func(selector interface{}, condition interface{}) interface{} {
// return selector=condition
// }
// if temp {
// returnReports = append(returnReports, report)
// }
// }
// return returnReports
// }
// func getScoreSelector(reports []score) interface{} {
// var returnSelector interface{}
// for _, report := range reports {
// returnSelector = report.score
// }
// return returnSelector
// }
| 070685afed02569db643e80c3988a50e84f1aa34 | [
"Go"
] | 4 | Go | jessie110/codefairies-homework1 | 9114e264cb47e9346a20903e22f4c29cb296f81e | 8a46b3b4f2970cd7a15c10bd45cb05627430745d |
refs/heads/master | <repo_name>Andrei199541/Angular-Adminca<file_sep>/template/adminca_v2.0.1/tools/gulp/gulpfile.js
var gulp = require('gulp');
requireDir = require('require-dir'),
browserSync = require('browser-sync'),
imagemin = require('gulp-imagemin'),
pngquant = require('imagemin-pngquant'),
rename = require('gulp-rename'),
tasks = requireDir('./gulp-tasks'),
readConfig = require('read-config'),
config = readConfig('./config.json');
/*
* BUILD ALL THEMES
*/
gulp.task('build', [
'build:admin_1',
'build:admin_2',
'build:admin_3',
'build:admin_4',
'build:admin_5',
'build:admin_6',
'build:admin_7',
'build:admin_8',
]);
/*
* BUILD ALL FILES FOR ADMIN_1 THEME
*/
gulp.task('build:admin_1', function(){
gulp.start('admin_1:pug'); // BUILD ALL HTML FILES FOR ADMIN_1
gulp.start('admin_1:sass'); // BUILD ALL CSS FILES FOR ADMIN_1
gulp.start('admin_1:js'); // BUILD JS FILES FOR ADMIN_1
build_img('/admin_1/html/assets/img'); // COMPRESS IMAGES AND SAVE TO ADMIN_1 ASSETS FOLDER
build_vendors('/admin_1/html/assets/vendors'); // SAVE VENDOR PLUGINS TO ADMIN_1 ASSETS FOLDER
build_demo_data('/admin_1/html/assets/demo'); // SAVE DEMO DATA TO ADMIN_1 ASSETS FOLDER
});
/*
* BUILD ALL FILES FOR ADMIN_2 THEME
*/
gulp.task('build:admin_2', function(){
gulp.start('admin_2:pug');
gulp.start('admin_2:sass');
gulp.start('admin_2:js');
build_img('/admin_2/html/assets/img');
build_vendors('/admin_2/html/assets/vendors');
build_demo_data('/admin_2/html/assets/demo');
});
/*
* BUILD ALL FILES FOR ADMIN_3 THEME
*/
gulp.task('build:admin_3', function(){
gulp.start('admin_3:pug');
gulp.start('admin_3:sass');
gulp.start('admin_3:js');
build_img('/admin_3/html/assets/img');
build_vendors('/admin_3/html/assets/vendors');
build_demo_data('/admin_3/html/assets/demo');
});
/*
* BUILD ALL FILES FOR ADMIN_4 THEME
*/
gulp.task('build:admin_4', function(){
gulp.start('admin_4:pug');
gulp.start('admin_4:sass');
gulp.start('admin_4:js');
build_img('/admin_4/html/assets/img');
build_vendors('/admin_4/html/assets/vendors');
build_demo_data('/admin_4/html/assets/demo');
});
/*
* BUILD ALL FILES FOR ADMIN_5 THEME
*/
gulp.task('build:admin_5', function(){
gulp.start('admin_5:pug');
gulp.start('admin_5:sass');
gulp.start('admin_5:js');
build_img('/admin_5/html/assets/img');
build_vendors('/admin_5/html/assets/vendors');
build_demo_data('/admin_5/html/assets/demo');
});
/*
* BUILD ALL FILES FOR ADMIN_6 THEME
*/
gulp.task('build:admin_6', function(){
gulp.start('admin_6:pug');
gulp.start('admin_6:sass');
gulp.start('admin_6:js');
build_img('/admin_6/html/assets/img');
build_vendors('/admin_6/html/assets/vendors');
build_demo_data('/admin_6/html/assets/demo');
});
/*
* BUILD ALL FILES FOR ADMIN_7 THEME
*/
gulp.task('build:admin_7', function(){
gulp.start('admin_7:pug');
gulp.start('admin_7:sass');
gulp.start('admin_7:js');
build_img('/admin_7/html/assets/img');
build_vendors('/admin_7/html/assets/vendors');
build_demo_data('/admin_7/html/assets/demo');
});
/*
* BUILD ALL FILES FOR ADMIN_8 THEME
*/
gulp.task('build:admin_8', function(){
gulp.start('admin_8:pug');
gulp.start('admin_8:sass');
gulp.start('admin_8:js');
build_img('/admin_8/html/assets/img');
build_vendors('/admin_8/html/assets/vendors');
build_demo_data('/admin_8/html/assets/demo');
});
function build_img(output) {
return gulp.src(config.path.src+'/img/**/*.*')
.pipe(imagemin({ // compress images
progressive: true, // compress .jpg
interlaced: true, // compress .gif
svgoPlugins: [{removeViewBox: false}], // compress .svg
use: [pngquant()],
optimizationLevel: 3 // compression level 0-7
}))
.pipe(gulp.dest(config.path.output+output));
}
function build_vendors(output) {
gulp.src(config.path.bower_components+'/**/*.*')
.pipe(gulp.dest(config.path.output+output));
gulp.src(config.path.vendors+'/**/*.*')
.pipe(gulp.dest(config.path.output+output));
}
function build_demo_data(output) {
gulp.src(config.path.src+'/demo/**/*.*')
.pipe(gulp.dest(config.path.output+output));
}
<file_sep>/template/adminca_v2.0.1/tools/gulp/gulp-tasks/sass.js
var gulp = require('gulp'),
sass = require('gulp-sass'),
cleanCSS = require('gulp-clean-css'),
autoprefixer = require('gulp-autoprefixer'),
concat = require('gulp-concat'),
rename = require('gulp-rename'),
bourbon = require('node-bourbon'),
gulpif = require('gulp-if'),
readConfig = require('read-config'),
config = readConfig('./config.json');
/*
* BUILD ALL CSS FILES FOR ALL THEMES
*/
gulp.task('build:sass', [
'admin_1:sass',
'admin_2:sass',
'admin_3:sass',
'admin_4:sass',
'admin_5:sass',
'admin_6:sass',
'admin_7:sass',
'admin_8:sass',
]);
/*
* BUILD ALL CSS FILES FOR ADMIN_1 THEME
*/
gulp.task('admin_1:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_1 Theme: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_1/layout.scss -- admin_1 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_1.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_1.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_1.sass.output.bundle));
//== THEME SKIN FILES
gulp.src(config.build.admin_1.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_1.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_1.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_1.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_2 Bootstrap 4 variant THEME
*/
gulp.task('admin_2:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN 2 Bootstrap 4 variant: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_2/layout.scss -- admin_2 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_2.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_2.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_2.sass.output.bundle));
// THEME SKIN FILES
gulp.src(config.build.admin_2.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_2.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_2.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_2.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_3 Bootstrap 4 variant THEME
*/
gulp.task('admin_3:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_3 Bootstrap 4 variant: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_3/layout.scss -- admin_3 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_3.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_3.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_3.sass.output.bundle));
// THEME SKIN FILES
gulp.src(config.build.admin_3.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_3.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_3.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_3.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_4 Bootstrap 4 variant THEME
*/
gulp.task('admin_4:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_4 Bootstrap 4 variant: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_4/layout.scss -- admin_4 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_4.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_4.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_4.sass.output.bundle));
//== THEME SKIN FILES
gulp.src(config.build.admin_4.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_4.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_4.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_4.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_5 Bootstrap 4 variant THEME
*/
gulp.task('admin_5:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_5 Bootstrap 4 variant: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_5/layout.scss -- admin_5 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_5.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_5.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_5.sass.output.bundle));
//== THEME SKIN FILES
gulp.src(config.build.admin_5.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_5.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_5.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_5.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_6 THEME
*/
gulp.task('admin_6:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_6 theme: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_5/layout.scss -- admin_5 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_6.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_6.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_6.sass.output.bundle));
//== THEME SKIN FILES
gulp.src(config.build.admin_6.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_6.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_6.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_6.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_7 THEME
*/
gulp.task('admin_7:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_7 theme: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_7/layout.scss -- admin_7 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_7.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_7.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_7.sass.output.bundle));
//== THEME SKIN FILES
gulp.src(config.build.admin_7.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_7.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_7.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_7.sass.output.pages));
});
/*
* BUILD ALL CSS FILES FOR ADMIN_8 THEME
*/
gulp.task('admin_8:sass', function(){
/*
* MAIN CSS FILE FOR ADMIN_8 theme: included
* bootstrap_4/main.scss -- ui components
* layouts/admin_8/layout.scss -- admin_8 theme layout
* vendors/vendors.scss -- reset vendor plugins styles
*/
gulp.src(config.build.admin_8.sass.src.bundle)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(concat('main.css'))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_8.sass.output.bundle))
.pipe(rename({suffix: '.min'}))
.pipe(cleanCSS({compatibility: 'ie8'}))
.pipe(gulp.dest(config.build.admin_8.sass.output.bundle));
//== THEME SKIN FILES
gulp.src(config.build.admin_8.sass.src.themes)
.pipe(sass({
includePaths: bourbon.includePaths
}))
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_8.sass.output.themes));
//== PAGES STYLES
gulp.src(config.build.admin_8.sass.src.pages)
.pipe(gulpif(config.compile.cssMinify, cleanCSS({compatibility: 'ie8'})))
.pipe(autoprefixer({
browsers: ['last 16 versions'],
cascade: false
}))
.pipe(gulp.dest(config.build.admin_8.sass.output.pages));
});
<file_sep>/template/adminca_v2.0.1/docs/assets/js/custom.js
$(function(){
"use strict";
$("a[data-href]").on("click", function(e) {
var t = $(this);
$("html, body").stop().animate({
scrollTop: $(t.attr("href")).offset().top - 90
}, 1e3), e.preventDefault()
});
$("#sidebarScroll").find(".docs-sidebar-list li a[href^='#']").on('click', function (e) {
e.preventDefault();
var hash = this.hash;
$('html, body').animate({
scrollTop: $(hash).offset().top - 90
}, 500, function () {
});
});
$('header').click('.sidebar-toggler',function(){
$('.docs-sidebar').toggleClass('opened');
});
});<file_sep>/adminca_v2.0.1/adminca_v2.0.1/tools/gulp/gulp-tasks/watch.js
var gulp = require('gulp'),
readConfig = require('read-config'),
config = readConfig('./config.json');
/*
* Watchers
*/
gulp.task('serve', function () {
browserSync.init({
server: {
baseDir: "../"
},
host: 'localhost',
port: 3000,
});
gulp.watch(config.path.src+'/js/**/*.js', ['watch:js']);
gulp.watch(config.path.src+'/sass/**/*.+(scss|sass)', ['watch:sass']);
gulp.watch(config.path.src+'/pug/**/*.pug', ['watch:pug']);
});
gulp.task('watch:js', ['build:js'], function (done) {
browserSync.reload();
done();
});
gulp.task('watch:sass', ['build:sass'], function (done) {
browserSync.reload();
done();
});
gulp.task('watch:pug', ['build:pug'], function (done) {
browserSync.reload();
done();
});
<file_sep>/template/adminca_v2.0.1/tools/gulp/gulp-tasks/js.js
var gulp = require('gulp'),
uglify = require('gulp-uglify'),
concat = require('gulp-concat'),
rename = require('gulp-rename'),
jshint = require('gulp-jshint'),
gulpif = require('gulp-if'),
readConfig = require('read-config'),
config = readConfig('./config.json'),
args = require('yargs').argv;
/*
* BUILD ALL JS FILES FOR ALL THEMES
*/
gulp.task('build:js',[
'admin_1:js',
'admin_2:js',
'admin_3:js',
'admin_4:js',
'admin_5:js',
'admin_6:js',
'admin_7:js',
'admin_8:js',
]);
/*
* BUILD JS FILES FOR ADMIN_1 variant THEME
*/
gulp.task('admin_1:js', function() {
gulp.src(config.build.admin_1.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_1.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_1.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_1/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_1.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_2 variant THEME
*/
gulp.task('admin_2:js', function() {
gulp.src(config.build.admin_2.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_2.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_2.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_2/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_2.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_3 variant THEME
*/
gulp.task('admin_3:js', function() {
gulp.src(config.build.admin_3.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_3.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_3.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_3/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_3.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_4 variant THEME
*/
gulp.task('admin_4:js', function() {
gulp.src(config.build.admin_4.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_4.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_4.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_4/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_4.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_5 variant THEME
*/
gulp.task('admin_5:js', function() {
gulp.src(config.build.admin_5.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_5.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_5.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_5/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_5.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_6 variant THEME
*/
gulp.task('admin_6:js', function() {
gulp.src(config.build.admin_6.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_6.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_6.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_6/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_6.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_7 variant THEME
*/
gulp.task('admin_7:js', function() {
gulp.src(config.build.admin_7.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_7.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_7.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_7/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_7.js.output+'/scripts'));
});
/*
* BUILD JS FILES FOR ADMIN_8 variant THEME
*/
gulp.task('admin_8:js', function() {
gulp.src(config.build.admin_8.js.src)
.pipe(jshint())
.pipe(jshint.reporter('jshint-stylish'))
.pipe(jshint.reporter('fail'))
.pipe(concat('app.js'))
.pipe(gulp.dest(config.build.admin_8.js.output))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(config.build.admin_8.js.output));
//== Page level scripts
gulp.src(config.path.src+'/js/theme/admin_8/scripts/**/*.js')
.pipe(gulp.dest(config.build.admin_8.js.output+'/scripts'));
});
<file_sep>/adminca_v2.0.1/adminca_v2.0.1/src/js/theme/admin_3/layout.js
$(window).on('load resize scroll', function () {
if ($(this).width() < 992) $('body').removeClass('sidebar-mini').addClass('drawer-sidebar');
});
$(function(){
// LAYOUT SETTINGS
// ======================
// SIDEBAR TOGGLE ACTION
$('.js-sidebar-toggler').click(function() {
if( $('body').hasClass('drawer-sidebar') ) {
$('#sidebar').backdrop();
} else {
$('body').toggleClass('sidebar-mini');
}
});
// fixed navbar
$('#_fixedNavbar').change(function() {
if($(this).is(':checked')) $('body').addClass('fixed-navbar');
else $('body').removeClass('fixed-navbar');
});
// drawer sidebar
$('#_drawerSidebar').change(function(){
if( $(this).is(':checked') ) {
$('body').addClass('drawer-sidebar');
} else {
$('body').removeClass('drawer-sidebar');
}
setTimeout(function(){
$('body').removeClass('sidebar-mini');
},200);
});
// THEMES COLOR CHANGE
$('.color-skin-box input:radio').change(function() {
var val = $(this).val();
if(val != 'default') {
if(! $('#theme-style').length ) {
$('head').append( "<link href='assets/css/themes/"+val+".css' rel='stylesheet' id='theme-style' >" );
} else $('#theme-style').attr('href', 'assets/css/themes/'+val+'.css');
} else $('#theme-style').remove();
});
});
<file_sep>/adminca_v2.0.1/adminca_v2.0.1/tools/gulp/gulp-tasks/pug.js
var gulp = require('gulp'),
pug = require('gulp-pug'),
data = require('gulp-data'),
htmlbeautify = require('gulp-html-beautify'),
fs = require('fs'),
gulpif = require('gulp-if'),
readConfig = require('read-config'),
config = readConfig('./config.json');
/*
* BUILD ALL HTML FILES FOR ALL THEMES
*/
gulp.task('build:pug', [
'admin_1:pug',
'admin_2:pug',
'admin_3:pug',
'admin_4:pug',
'admin_5:pug',
'admin_6:pug',
'admin_7:pug',
'admin_8:pug',
]);
var options = {
indentSize: 2,
unformatted: [
// https://www.w3.org/TR/html5/dom.html#phrasing-content
'abbr', 'area', 'b', 'bdi', 'bdo', 'br', 'cite',
'code', 'data', 'datalist', 'del', 'dfn', 'em', 'embed', 'i', 'ins', 'kbd', 'keygen', 'map', 'mark', 'math', 'meter', 'noscript',
'object', 'output', 'progress', 'q', 'ruby', 's', 'samp', 'small',
'strong', 'sub', 'sup', 'template', 'time', 'u', 'var', 'wbr', 'text',
'acronym', 'address', 'big', 'dt', 'ins', 'strike', 'tt'
]
};
/*
* BUILD ALL HTML FILES FOR ADMIN_1 THEME
*/
gulp.task('admin_1:pug', function() {
return gulp.src(config.build.admin_1.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_1.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_2 THEME
*/
gulp.task('admin_2:pug', function() {
return gulp.src(config.build.admin_2.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_2.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_3 THEME
*/
gulp.task('admin_3:pug', function() {
return gulp.src(config.build.admin_3.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_3.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_4 THEME
*/
gulp.task('admin_4:pug', function() {
return gulp.src(config.build.admin_4.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_4.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_5 THEME
*/
gulp.task('admin_5:pug', function() {
return gulp.src(config.build.admin_5.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_5.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_6 THEME
*/
gulp.task('admin_6:pug', function() {
return gulp.src(config.build.admin_6.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_6.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_7 THEME
*/
gulp.task('admin_7:pug', function() {
return gulp.src(config.build.admin_7.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_7.pug.output));
});
/*
* BUILD ALL HTML FILES FOR ADMIN_8 THEME
*/
gulp.task('admin_8:pug', function() {
return gulp.src(config.build.admin_8.pug.src)
.pipe(data(function(file){
return JSON.parse(fs.readFileSync(config.path.src+'/pug/menu.json'))
}))
.pipe(pug())
.pipe(htmlbeautify(options))
.pipe(gulp.dest(config.build.admin_8.pug.output));
});
<file_sep>/adminca_v2.0.1/adminca_v2.0.1/src/js/theme/admin_4/layout.js
$(function(){
hideLastTab();
// LAYOUT STYLE
$("[name='layout-style']").change(function(){
if(+$(this).val()) $('body').addClass('boxed-layout');
else $('body').removeClass('boxed-layout');
});
// THEMES COLOR CHANGE
$('.color-skin-box input:radio').change(function() {
var val = $(this).val();
if(val != 'default') {
if(! $('#theme-style').length ) {
$('head').append( "<link href='assets/css/themes/"+val+".css' rel='stylesheet' id='theme-style' >" );
} else $('#theme-style').attr('href', 'assets/css/themes/'+val+'.css');
} else $('#theme-style').remove();
});
});
function hideLastTab() {
var boxWidth = $('.navbar-tabs-wrapper').outerWidth(),
tabWidth = $('.top-navbar-tabs').outerWidth(),
tabsDropdown = $('.top-navbar-tabs').find('.tabs-dropdown');
if(tabWidth > boxWidth) {
tabsDropdown = $('.top-navbar-tabs').find('.tabs-dropdown');
var lastChild = $('.top-navbar-tabs .nav-item:eq(-2)');
$("<a/>", {
class: "dropdown-item",
href: lastChild.find('.nav-link').attr('href'),
'data-toggle': 'tab',
html: lastChild.find('.nav-link').html(),
}).prependTo(tabsDropdown.find('.dropdown-menu'));
lastChild.remove();
hideLastTab();
}
}
<file_sep>/template/adminca_v2.0.1/src/js/theme/admin_2/layout.js
$(function(){
// SIDEBAR ACTIVATE METISMENU
$(".metismenu").metisMenu();
// SIDEBAR TOGGLE ACTION
$('.js-sidebar-toggler').click(function() {
if( $('body').hasClass('drawer-sidebar') ) {
$('.page-sidebar').backdrop();
} else {
$('.page-sidebar').toggleClass('opened');
}
$(this).toggleClass('active');
});
// LAYOUT STYLE
$("[name='layout-style']").change(function(){
if(+$(this).val()) $('body').addClass('boxed-layout');
else $('body').removeClass('boxed-layout');
});
// drawer sidebar
$('#_drawerSidebar').change(function(){
$('.page-sidebar').removeClass('opened');
if( $(this).is(':checked') ) {
$('body').addClass('drawer-sidebar');
} else {
$('body').removeClass('drawer-sidebar');
}
});
// THEMES COLOR CHANGE
$('.color-skin-box input:radio').change(function() {
var val = $(this).val();
if(val != 'default') {
if(! $('#theme-style').length ) {
$('head').append( "<link href='assets/css/themes/"+val+".css' rel='stylesheet' id='theme-style' >" );
} else $('#theme-style').attr('href', 'assets/css/themes/'+val+'.css');
} else $('#theme-style').remove();
});
});
| c75015bf8b3fba427058ff0216ceb47ec06b2177 | [
"JavaScript"
] | 9 | JavaScript | Andrei199541/Angular-Adminca | 4310392195608cb75aef9a77ae3cfa33e98a4f27 | 986209a5288c39d5442c42cfffae1379e81e3710 |
refs/heads/master | <repo_name>MrHacker499/illusion-music-bot<file_sep>/README.md
## Illusion Music Bot
### Created by Tetrabyte#4866
---
### If you need help you can DM Tetrabyte#4866 on Discord or submit an issue with what you're having trouble with.
---
### Todo List before public
- [x] Setup base bot project
- [x] Setup command handler
- [x] Add ping command
- [ ] Add play command
- [ ] Add stop command
- [ ] Add volume command
- [ ] Add pause/resume command
- [ ] Add queue command
- [ ] Add help command
- [ ] Add shuffle command
- [ ] Add remove command
- [ ] Add repeat command
<file_sep>/commands.js
const config = require('./config.js');
const cmds = [];
cmds.ping = {
name: `ping`,
help: `Just a test command to see if the bot is online.`,
trigger: ({ client, msg, params, raw, clean }) => {
var ms = 0;
var mscount = setInterval(() => { ms++; }, 1);
msg.channel.send('Pinging').then((m) => { m.edit('Pong : **' + ms + 'ms**'); clearInterval(mscount); });
}
};
cmds.play = {
name: `play`,
help: `Play some music using a youtube link or search query.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Play Command //
}
};
cmds.stop = {
name: `stop`,
help: `Make the bot stop playing music and disconnect from the channel.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Stop command //
}
};
cmds.volume = {
name: `volume`,
help: `Change the current volume that the music is playing on.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Volume Command //
}
};
cmds.pause = {
name: `pause`,
help: `Pause the current playing song.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Pause Command //
}
};
cmds.resume = {
name: `resume`,
help: `Resume the music playing if you paused it.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Resume Command //
}
};
cmds.queue = {
name: `queue`,
help: `Get a list of the songs currently in the queue.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Queue Command //
}
};
cmds.shuffle = {
name: `shuffle`,
help: `Shuffle the current queue.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Shuffle Command //
}
};
cmds.remove = {
name: `remove`,
help: `Remove a song from the queue.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Remove Command //
}
};
cmds.repeat = {
name: `repeat`,
help: `Make the bot repeat the current song until you turn it off.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Repeat Command //
}
};
cmds.help = {
name: `help`,
help: `Returns with a list of commands for the bot.`,
trigger: ({ client, msg, params, raw, clean }) => {
// Help Command //
}
};
module.exports = cmds; | b4986793893768acb12b15bf091037085c7e6e5d | [
"Markdown",
"JavaScript"
] | 2 | Markdown | MrHacker499/illusion-music-bot | 0893e0368b00548313515bedf84b71a4462fb6e9 | e3db0bcc4f12bd13fbbfb5ff231f94e0fcdb5cfa |
refs/heads/master | <file_sep>module.exports = {
read: function(req, callback) {
var data = '';
req.on('data', function(chunk) {
data += chunk;
});
req.on('end', function() {
var bytes = [];
for (var i=0;i<data.length;i++) {
bytes.push(data.charCodeAt(i));
}
callback(bytes);
});
}
};<file_sep># document-exchange-force
Repository for Document Exchange Service for testing external file store integration with HTML DocMan
<file_sep>var fs = require('fs'),
sqlite = require('sqlite3'),
uuid = require('node-guid');
function init(dbFile) {
var isNew = !fs.existsSync(dbFile);
var tempDb = new sqlite.Database(dbFile);
if (isNew) {
tempDb.run(
'CREATE TABLE documents (' +
'id TEXT, ' +
'business TEXT, ' +
'category TEXT, ' +
'subcategory TEXT, ' +
'type TEXT, ' +
'extension TEXT, ' +
'relationship TEXT, ' +
'tin TEXT, ' +
'title TEXT, ' +
'uuid TEXT, ' +
'doc BLOB' +
')'
);
}
return tempDb;
}
var db = init('docs.db');
module.exports = {
search: function(tin, callback) {
db.all('SELECT id, uuid, type, title FROM documents WHERE tin="' + tin + '"',
function(err, rows) {
if (callback) {
callback(err, rows);
}
}
);
},
put: function(meta, doc, callback) {
db.run(
'INSERT INTO documents (' +
'id, ' +
'business, ' +
'category, ' +
'subcategory, ' +
'type, ' +
'extension, ' +
'relationship, ' +
'tin, ' +
'title, ' +
'uuid, ' +
'doc' +
') VALUES(?,?,?,?,?,?,?,?,?,?,?)',
uuid.new(),
meta.business,
meta.category,
meta.subcategory,
meta.type,
meta.extension,
meta.relationship,
meta.tin,
meta.title,
meta.uuid,
doc,
function(err) {
if (callback) {
callback(err);
}
}
);
}
};<file_sep>var express = require('express'),
mapper = require('./mapper'),
reader = require('./reader'),
search = require('./search'),
store = require('./store'),
app = express();
function getParam(req, name) {
if (req && req.query && req.query[name]) {
return req.query[name];
}
return null;
}
app.use(function(req, res, next) {
res.header('Access-Control-Allow-Credentials', 'true');
res.header('Access-Control-Allow-Headers', req.get('Access-Control-Request-Headers'));
res.header('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS');
res.header('Access-Control-Allow-Origin', req.get('origin'));
if ('OPTIONS' == req.method) {
res.sendStatus(200);
}
else {
next();
}
});
app.get('/search', function(req, res) {
var tin = getParam(req, 'tin');
search.exec(tin, function(results) {
res.end(results);
});
});
app.put('/*/*', function(req, res) {
var meta = mapper.mapFrom(req);
reader.read(req, function(doc) {
store.put(meta, null, function(err) {
if (err) {
console.log(err);
res.status(500).end();
} else {
res.status(200).end();
}
});
});
});
app.listen(process.env.PORT || 8080); | 57310730e9008caee8378bb5cde54f17ea0726bc | [
"JavaScript",
"Markdown"
] | 4 | JavaScript | roycenobles/docexc | cf9edbc1f44a7fb8a4e31686025d93102d0ee66c | 8183bd3e06271555ec974db6df7022834adcb8a3 |
refs/heads/master | <file_sep>import argparse
import onnx
import os.path
import sys
from onnx_tf.backend import prepare
import numpy as np
import tensorflow as tf
# Test inputs
TEST_INPUTS = np.array([[[-0.5525, 0.6355, -0.3968]],[[-0.6571, -1.6428, 0.9803]],[[-0.0421, -0.8206, 0.3133]],[[-1.1352, 0.3773, -0.2824]],[[-2.5667, -1.4303, 0.5009]]])
TEST_INITIAL_H = np.array([[[0.5438, -0.4057, 1.1341]]])
TEST_INITIAL_C = np.array([[[-1.1115, 0.3501, -0.7703]]])
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="prefix")
return graph
## Parse command-line arguments
argparser = argparse.ArgumentParser(description='')
argparser.add_argument("onnx_model", type=str, help="Name of ONNX model file (mandatory)")
argparser.add_argument("tf_model", type=str, help="Name of Tensorflow model file (mandatory)")
args = argparser.parse_args()
## Load ONNX model
model = onnx.load(args.onnx_model)
tf_rep = prepare(model)
'''
## Print some information
# tf_rep.uninitialized = names of input nodes
# tf_rep.input_dict = dict from nodename to tensor
# tf_rep.predict_net.external_output = names of output nodes
print("Input placeholders:")
for input_name in tf_rep.predict_net.external_input:
it = tf_rep.input_dict[input_name]
print(" %s, shape %s, %s" % (input_name, it.shape, it.dtype))
print("\nInput dictionary:")
for input_name in tf_rep.input_dict:
it = tf_rep.input_dict[input_name]
print(" %s, shape %s, %s" % (input_name, it.shape, it.dtype))
print("\nOutput tensors:")
for output_name in tf_rep.predict_net.external_output:
ot = tf_rep.predict_net.output_dict[output_name]
print(" %s, shape %s, %s" % (ot.name, ot.shape, ot.dtype))
sys.stdout.flush()
'''
## Write graph
absolute_tf_path = os.path.realpath(args.tf_model)
dir, file = os.path.split(absolute_tf_path)
as_text = file.endswith(".pbtxt") # text = .pbtxt; binary = .pb + as_text=False
print("\nWriting Tensorflow model as %s proto ..." % ("text" if as_text else "binary"))
sys.stdout.flush()
tf.train.write_graph(tf_rep.predict_net.graph.as_graph_def(), dir, file, as_text=as_text) # Export ohne Variablen?! Vorsicht!
print("Wrote Tensorflow model to %s." % absolute_tf_path)
graph = load_graph(absolute_tf_path)
print("Loaded Tensorflow model from %s." % absolute_tf_path)
with tf.Session(graph=graph) as sess:
out = sess.run("prefix/Squeeze_3:0", feed_dict={"prefix/0:0": TEST_INPUTS, "prefix/1:0": TEST_INITIAL_H, "prefix/2:0": TEST_INITIAL_C})
print(out)
<file_sep>import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import onnx
import numpy as np
from onnx_tf.backend import prepare
from onnx import helper
generate_onnx = True
torch.manual_seed(1)
# Test inputs
TEST_INPUTS = autograd.Variable(torch.FloatTensor([[[-0.5525, 0.6355, -0.3968]],[[-0.6571, -1.6428, 0.9803]],[[-0.0421, -0.8206, 0.3133]],[[-1.1352, 0.3773, -0.2824]],[[-2.5667, -1.4303, 0.5009]]]))
TEST_INPUTS_ASLIST = [torch.FloatTensor([[-0.5525, 0.6355, -0.3968]]),
torch.FloatTensor([[-0.6571, -1.6428, 0.9803]]),
torch.FloatTensor([[-0.0421, -0.8206, 0.3133]]),
torch.FloatTensor([[-1.1352, 0.3773, -0.2824]]),
torch.FloatTensor([[-2.5667, -1.4303, 0.5009]])]
TEST_INPUTS_2 = autograd.Variable(torch.FloatTensor([[[-0.1658, 0.0353, -0.7295]],[[0.2575, -0.2657, -1.7373]],[[0.7332, 1.1558, 0.6375]]]))
# Initial states (unidirectional)
TEST_INITIAL_H = autograd.Variable(torch.FloatTensor([[[0.5438, -0.4057, 1.1341]]]))
TEST_INITIAL_C = autograd.Variable(torch.FloatTensor([[[-1.1115, 0.3501, -0.7703]]]))
# Initial states (bidirectional)
TEST_INITIAL_H_2 = autograd.Variable(torch.FloatTensor([[[0.4975, 0.2355, -1.6301]],
[[-0.2330, 0.6485, -0.0955]]]))
TEST_INITIAL_C_2 = autograd.Variable(torch.FloatTensor([[[-0.7467, 0.3893, 1.3873]],
[[ 0.7035, -1.7967, -0.4481]]]))
# Alternatively, generate inputs on the fly:
# inputs = [autograd.Variable(torch.randn((1, 3))) for _ in range(5)]
# hidden = (autograd.Variable(torch.randn(1, 1, 3)), autograd.Variable(torch.randn((1, 1, 3))))
if generate_onnx:
# generate an LSTM and save it to lstm.onnx
lstm = nn.LSTM(3, 3) # Input dim is 3, hidden dim is 3
#lstm = nn.LSTM(3, 3, bidirectional=True)
# "Loop style":
#hidden = (TEST_INITIAL_H, TEST_INITIAL_C) # initialize the hidden state.
#for i in TEST_INPUTS_ASLIST:
# Step through the sequence one element at a time.
# after each step, hidden contains the hidden state.
# out, hidden = lstm(i.view(1, 1, -1), hidden)
# print(out)
# "Unroll style":
# alternatively, we can do the entire sequence all at once.
# the first value returned by LSTM is all of the hidden states throughout
# the sequence. the second is just the most recent hidden state
# (compare the last slice of "out" with "hidden" below, they are the same)
# The reason for this is that:
# "out" will give you access to all hidden states in the sequence
# "hidden" will allow you to continue the sequence and backpropagate,
# by passing it as an argument to the lstm at a later time
# Add the extra 2nd dimension
out, hidden = lstm(TEST_INPUTS, (TEST_INITIAL_H, TEST_INITIAL_C))
#out, hidden = lstm(TEST_INPUTS, (TEST_INITIAL_H_2, TEST_INITIAL_C_2))
print(out)
print(hidden)
torch.onnx.export(lstm, (TEST_INPUTS, (TEST_INITIAL_H, TEST_INITIAL_C)), "lstm.onnx", verbose=False)
else:
# read the model from lstm.onnx with onnx-tensorflow
model = onnx.load("lstm.onnx")
tf_rep = prepare(model)
import tensorflow as tf
print(tf_rep.run({"0": TEST_INPUTS, "1": TEST_INITIAL_H, "2": TEST_INITIAL_C}))
<file_sep>import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Iterator;
import org.tensorflow.Graph;
import org.tensorflow.Session;
import org.tensorflow.Tensor;
import org.tensorflow.Operation;
public class OnnxLoader {
public static void main(String[] args) {
// put the actual directory and filename of your model here
byte[] graphDef = readAllBytesOrExit(Paths.get("/home/stefan/Hiwi/NeuralIntegration/onnx_to_tensorflow", "test.pb"));
// put the actual shape of your input placeholder(s) here,
// and initialize with your actual input data
//float[][] input = new float[64][1000];
float[][][] inputs = {{{-0.5525f, 0.6355f, -0.3968f}},{{-0.6571f, -1.6428f, 0.9803f}},{{-0.0421f, -0.8206f, 0.3133f}},{{-1.1352f, 0.3773f, -0.2824f}},{{-2.5667f, -1.4303f, 0.5009f}}};
float[][][] initial_h = {{{0.5438f, -0.4057f, 1.1341f}}};
float[][][] initial_c = {{{-1.1115f, 0.3501f, -0.7703f}}};
try (Graph g = new Graph()) {
g.importGraphDef(graphDef);
try (Session s = new Session(g)) {
Tensor tInputs = Tensor.create(inputs);
Tensor tInitialH = Tensor.create(initial_h);
Tensor tInitialC = Tensor.create(initial_c);
// feed your actual input placeholders, and fetch
// your actual output tensors here, using the
// actual dtype of your output data
Tensor<Float> tResult = s.runner().feed("0:0", tInputs).feed("1:0", tInitialH).feed("2:0", tInitialC).fetch("Squeeze_3:0").run().get(0).expect(Float.class);
// convert as required by your actual
// output tensor(s)
float[][][] result = new float[5][1][3];
tResult.copyTo(result);
for( int i = 0; i < result.length; i++ ) {
for( int j = 0; j < result[i].length; j++ ) {
String outp = "";
for( int k = 0; k < result[i][j].length; k++ ) {
outp += Float.toString(result[i][j][k]) + " ";
}
System.out.println(outp);
}
}
}
}
}
// from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/java/src/main/java/org/tensorflow/examples/LabelImage.java
private static byte[] readAllBytesOrExit(Path path) {
try {
return Files.readAllBytes(path);
} catch (IOException e) {
System.err.println("Failed to read [" + path + "]: " + e.getMessage());
System.exit(1);
}
return null;
}
}
<file_sep>FROM phusion/baseimage
RUN apt-get update
RUN apt-get install -y git wget bzip2 python python3 python3-pip unzip
RUN pip3 install protobuf numpy tensorflow pybind11 pyyaml mkl-devel setuptools cmake cffi typing
# compile Pytorch from source
RUN git clone --recursive https://github.com/pytorch/pytorch
WORKDIR /pytorch
RUN python3 setup.py install
# get current protoc
WORKDIR /protoc
RUN wget https://github.com/google/protobuf/releases/download/v3.5.1/protobuf-all-3.5.1.zip
RUN unzip protobuf-all-3.5.1.zip
WORKDIR /protoc/protobuf-3.5.1
RUN ./configure
RUN make install
ENV LD_LIBRARY_PATH=/usr/local/lib
# compile ONNX from source
WORKDIR /
RUN git clone --recursive https://github.com/onnx/onnx.git
WORKDIR /onnx
RUN python3 setup.py install
WORKDIR /
# docker run -v ~/Documents/workspace/onnx_to_tensorflow/:/onnx_to_tensorflow -v ~/Documents/workspace/onnx-tensorflow:/onnx-tensorflow -it onnx bash
<file_sep># Convert ONNX models to Tensorflow
This is a simple Python 3 script which uses the [onnx-tensorflow backend](https://github.com/onnx/onnx-tensorflow) to convert an ONNX model into a Tensorflow model.
First, follow the installation instructions of onnx-tensorflow.
Then run onnx_to_tensorflow as follows:
```
python convert_onnx_tf.py model.onnx model.pb
```
You can choose whether the Tensorflow is saved in binary or text format by using the filename extension `.pb` or `.pbtxt`, respectively.
## Technical note
The Tensorflow file is written using the [write_graph](https://www.tensorflow.org/api_docs/python/tf/train/write_graph) method. This saves the graph structure and any constant weights, but not the weights in `Variable` nodes of the computation graph. As far as I can tell, onnx-tensorflow does not produce `Variable` nodes, so this seems fine; but I have not tested it thoroughly.
## Usage from Java
One usecase for this script is to be able to train a neural network with any framework that exports ONNX (e.g., PyTorch), and then run the trained model from Java, using the [Tensorflow Java binding](https://www.tensorflow.org/install/install_java). The file `OnnxLoader.java` in this repository illustrates how to do this.
| a776cda11bf120e3479bc7a80d32e835e457f375 | [
"Markdown",
"Java",
"Python",
"Dockerfile"
] | 5 | Python | coli-saar/onnx_to_tensorflow | d988755a7d3664a5415f1b8a566a9e295e5f8808 | ba5d1a5f14b015b87d21614d05e3b5fce8c11225 |
refs/heads/master | <file_sep>var messages = {results:
[ {username: 'josephine', text: 'Hey Hey Hey', roomname: 'Pinnacles'},
{username: 'john', text: 'Hello Hello Hello', roomname: 'Valley'} ]
};
var headers = {
'access-control-allow-origin': '*',
'access-control-allow-methods': 'GET, POST, PUT, DELETE, OPTIONS',
'access-control-allow-headers': 'content-type, accept',
'access-control-max-age': 10 // Seconds.
};
var gatherMessage = function(request, callback) {
var dataStream = '';
request.on('data', function(chunk) {
dataStream += chunk.toString();
});
request.on('end', function() {
var msgObj = JSON.parse(dataStream);
callback(msgObj);
});
};
var requestHandler = function(request, response) {
console.log('Serving request type ' + request.method + ' for url ' + request.url);
var url = request.url.split('?')[0];
if (url === '/classes/messages') {
if (request.method === 'OPTIONS') {
response.writeHead(200, headers);
response.end();
} else if (request.method === 'GET') {
headers['Content-Type'] = 'application/json';
response.writeHead(200, headers);
response.end(JSON.stringify(messages));
} else if (request.method === 'POST') {
gatherMessage(request, function(msgObj) {
messages.results.unshift(msgObj);
response.writeHead(201, headers);
response.end(JSON.stringify(null));
});
}
} else {
response.writeHead(404, headers);
response.end();
}
};
exports.requestHandler = requestHandler; | 7d150ec13fbd221e07b1a4675266a3df1c1f0287 | [
"JavaScript"
] | 1 | JavaScript | joeyzaozaoli/chatterbox-server | f170cae2afcf962e0180c8c23c3e3633f8176030 | f17074579f272758dfe1cc0ffeb39c11e20d35e8 |
refs/heads/master | <file_sep>const Product = require('../models/Product');
// ПОЛУЧЕНИЕ полного списка продуктов.
const getAll = (req, res) => {
Product.findAll()
.then((data) => {
res.render('index.hbs', {
products: data,
});
})
.catch((err) => console.log(err));
};
// ДОБАВЛЕНИЕ в список новых продуктов.
const getCreate = (req, res) => {
res.render('create.hbs');
};
const create = (req, res) => {
Product.create({
name: req.body.name,
price: req.body.price,
})
.then(() => {
res.redirect('/products/');
})
.catch((err) => console.log(err));
};
// РЕДАКТИРОВАНИЕ информации по ID.
const getUpdate = (req, res) => {
Product.findAll({
where: { id: req.params.id },
})
.then((data) => {
res.render('edit.hbs', {
product: data[0],
});
})
.catch((err) => console.log(err));
};
const update = (req, res) => {
Product.update({
name: req.body.name,
price: req.body.price,
},
{
where: { id: req.body.id },
})
.then(() => {
res.redirect('/products/');
})
.catch((err) => console.log(err));
};
// УДАЛЕНИЕ информации по ID.
const remove = (req, res) => {
Product.destroy({
where: { id: req.params.id },
})
.then(() => {
res.redirect('/products/');
})
.catch((err) => console.log(err));
};
module.exports = {
getAll,
getCreate,
create,
getUpdate,
update,
remove,
};
<file_sep>module.exports = {
up: (queryInterface) => queryInterface.bulkInsert('products', [
{ name: 'Intel Core i5-2500K', price: '1000' },
{ name: 'AMD Ryzen 3600', price: '2000' },
{ name: 'NVIDIA GeForce GTX 1050Ti', price: '3000' }], {}),
down: (queryInterface) => queryInterface.bulkDelete('products', null, {}),
};
<file_sep># CRUD_full
Функционал CRUD.
Front: HTML + Handlebars
Back: Node.js + Express.js
Database: MySQL + Sequelize
__________________________________________________________________
1. Запустить MySQL.
2. Если Ваши имя пользователя и пароль к серверу отличны от значений по умолчанию, то внесите изменения в код:
2.1. config -> config.json (строки 3, 4),
2.2. config -> database.js (строка 4: вместо 'root' - имя пользователя, в пустых кавычках '' - пароль),
2.3. config -> createDB.js (строки 10, 11).
3. В терминале редактора кода или командной строке (предварительно перейдя в каталог с проектом) прописать:
3.1. npm install - установка модулей, необходимых для функционирования программы;
3.2. npm start - запуск основного кода.
4. ОПЦИОНАЛЬНО: после создания БД и таблицы products в ней (пункт 3.2) можете дополнительно выполнить:
4.1. npx sequelize-cli db:seed:all - заполнение таблицы products заготовками (хранятся в папке seeders).
Если в процессе работы понадобится полностью очистить таблицу от размещённой в ней информации, то доступна команда:
4.2. npx sequelize-cli db:seed:undo:all
5. Адрес главной страницы:
http://localhost:3000/products/<file_sep>const bodyParser = require('body-parser');
const methodOverride = require('method-override');
const products = require('../controllers/products');
const urlencodedParser = bodyParser.urlencoded({ extended: false });
module.exports = (app) => {
app.use(bodyParser.json());
app.use(methodOverride('_method'));
// ПОЛУЧЕНИЕ полного списка продуктов.
app.get('/products/', products.getAll);
// ДОБАВЛЕНИЕ в список новых продуктов.
app.get('/products/create', products.getCreate);
app.post('/products/create', urlencodedParser, products.create);
// РЕДАКТИРОВАНИЕ информации по ID.
app.get('/products/edit/:id', products.getUpdate);
app.put('/products/edit', urlencodedParser, products.update);
// УДАЛЕНИЕ информации по ID.
app.delete('/products/delete/:id', products.remove);
};
| 201bbc70788dac6155153e4c4740dc27f83bf791 | [
"JavaScript",
"Markdown"
] | 4 | JavaScript | Sholastix/CRUD_full | a8b1be9160b59cfb3e0bb6affc1d59eeb8ef3ff7 | 02a1906dfa02a0f23129dc4689c621696c60b25f |
refs/heads/master | <file_sep>package EllerBank;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import com.mysql.jdbc.PreparedStatement;
public class editAccount {
static final String JDBC_DRIVER = "com.mysql.jdbc.Driver";
static final String DB_URL = "jdbc:mysql://mora:3306/jacksmysqluser";
static final String USER = "jacksmysqluser";
static final String PASS = "<PASSWORD>";
private String output;
private double bal;
public editAccount(){
}
public String setUser(int accnt, double bal){
Connection conn = null;
Statement stmt = null;
String firstN;
String lastN;
int accntNum;
double balance;
try{
Class.forName("com.mysql.jdbc.Driver");
System.out.println("Editing database...");
conn = DriverManager.getConnection(DB_URL, USER, PASS);
System.out.println("Connection Success!");
System.out.println("Edit...");
stmt = conn.createStatement();
String get = ("SELECT * FROM Bank WHERE account=" + accnt +";");
ResultSet rs = stmt.executeQuery(get);
if(rs.next()) {
Statement updateEXP = conn.createStatement();
String query = ("update Bank set balance = '"+ bal +"' where `account` = '"+accnt+"'");
int updateEXP_done = updateEXP.executeUpdate(query);
int str1 = rs.getInt("account");
String dataName = rs.getString("first");
String out = ("Changes made");
this.output = out;
}
conn.close();
}catch(SQLException se){
se.printStackTrace();
output = null;
}catch(Exception e){
output = null;
e.printStackTrace();
}finally{
try{
if(stmt!=null)
conn.close();
}catch(SQLException se){
}
try{
if(conn!=null)
conn.close();
}catch(SQLException se){
se.printStackTrace();
}
}
System.out.println("Goodbye!");
return output;
}
}
<file_sep>EllerBank
=========
<file_sep>package EllerBank;
import java.awt.BorderLayout;
import java.awt.EventQueue;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.border.EmptyBorder;
import javax.swing.JProgressBar;
import javax.swing.JTextField;
import javax.swing.JLabel;
import javax.swing.UIManager;
import javax.swing.UnsupportedLookAndFeelException;
import javax.swing.JButton;
import java.awt.event.ActionListener;
import java.awt.event.ActionEvent;
import javax.swing.SwingConstants;
import java.awt.Font;
import javax.swing.JPasswordField;
import com.jgoodies.forms.factories.DefaultComponentFactory;
public class UserInterface extends JFrame {
private JPanel contentPane;
/**
* Launch the application.
*/
/**
* Create the frame.
*/
public UserInterface() {
/**try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException
| UnsupportedLookAndFeelException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
**/
setResizable(false);
setTitle("Bank Interface");
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
setBounds(100, 100, 450, 200);
contentPane = new JPanel();
contentPane.setBorder(new EmptyBorder(5, 5, 5, 5));
setContentPane(contentPane);
contentPane.setLayout(null);
JLabel lblWelcomeToEllerbank = new JLabel("Welcome to EllerBank!");
lblWelcomeToEllerbank.setFont(new Font("Segoe UI", Font.PLAIN, 20));
lblWelcomeToEllerbank.setHorizontalAlignment(SwingConstants.CENTER);
lblWelcomeToEllerbank.setBounds(79, 11, 286, 77);
contentPane.add(lblWelcomeToEllerbank);
JButton btnCreateAccount = new JButton("Create New Account");
btnCreateAccount.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
NewAccount newAccount = new NewAccount();
}
});
btnCreateAccount.setBounds(10, 99, 153, 50);
contentPane.add(btnCreateAccount);
JButton btnLoginToExisting = new JButton("Login to Existing Account");
btnLoginToExisting.setFont(new Font("Tahoma", Font.PLAIN, 10));
btnLoginToExisting.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
OldAccount oldAccount = new OldAccount();
}
});
btnLoginToExisting.setBounds(281, 99, 153, 50);
contentPane.add(btnLoginToExisting);
}
public static void main(String[] args) {
EventQueue.invokeLater(new Runnable() {
public void run() {
try {
UserInterface frame = new UserInterface();
frame.setVisible(true);
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
}
<file_sep>package EllerBank.GarbageClasses;
/** package EllerBank;
import java.util.Scanner;
public class test {
public static void main(String[] args) {
System.out.println("Welcome to EllerBank");
System.out.println("What is your first name?");
String fname, lname;
fname = new Scanner(System.in).nextLine();
System.out.println("What is your last name?");
lname = new Scanner(System.in).nextLine();
Account test = new Account(fname, lname);
System.out.println(test);
}
}
**/ | 45bce21df910b3da93d292e33c28c7ca8b892466 | [
"Markdown",
"Java"
] | 4 | Java | lizardlab/EllerBank | 9432181d086c783c8b338b1ec94f9d89a6a0b755 | fef39c6d98dcbf3bd1358913307200815d66c883 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.