branch_name stringclasses 149 values | text stringlengths 23 89.3M | directory_id stringlengths 40 40 | languages listlengths 1 19 | num_files int64 1 11.8k | repo_language stringclasses 38 values | repo_name stringlengths 6 114 | revision_id stringlengths 40 40 | snapshot_id stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|
refs/heads/master | <repo_name>dayanlemos/marvel-characters<file_sep>/src/reducers/cardsReducer.js
export default (state = {}, action) => {
switch (action.type) {
case 'GET_CHARACTERS_ACTION':
return {
characters: action.payload
};
case 'GET_ACTIVE_CHARACTER_ACTION':
return {
character: action.payload
}
default:
return state
}
}<file_sep>/src/containers/CardDetailContainer/CardDetailContainer.js
import React, { Component } from 'react';
import { connect } from 'react-redux';
import { Link } from 'react-router-dom';
import { Formik } from 'formik';
import { getActiveCharacterAction } from '../../actions/apiCallActions';
class CardDetailContainer extends Component {
componentDidMount() {
this.setActiveCard();
}
setActiveCard = () => {
const { getActiveCharacterAction } = this.props;
getActiveCharacterAction();
};
render() {
const { card = {} } = this.props;
const formValidate = (values) => {
let errors = {};
if (!values.name) {
errors.name = 'Name is a required field';
} else if (values.name.length < 3) {
errors.name = 'Name should have at least 3 characters';
}
return errors;
};
/** TODO
* this method (save()) should trigger an action with a PUT request to the server.
* All data needed for the request payload can be taken out from the 'values' param */
const save = (values, setSubmitting) => {
setTimeout(() => {
alert(JSON.stringify(values, null, 2));
setSubmitting(false);
}, 400);
};
return (
<div>
<div className="row">
<div className="col-md-6">
<h2>{card.name}</h2>
</div>
<div className="col-md-6 text-right">
<Link to="/" className="btn btn-link">Back</Link>
</div>
</div>
<hr/>
{card.id ?
<div className="row">
<div className="col-md-6 text-center">
<img src={`${card.thumbnail.path}/detail.${card.thumbnail.extension}`} />
</div>
<div className="col-md-6">
<Formik enableReinitialize={true} initialValues={card} validate={values => formValidate(values)} onSubmit={(values, { setSubmitting }) => save(values, setSubmitting)}>
{({values, errors, touched, handleChange, handleBlur, handleSubmit, isSubmitting}) => (
<form onSubmit={handleSubmit}>
<div className="form-group">
<label htmlFor="name">Name</label>
<input type="text" className="form-control" id="name" name="name" placeholder="Enter the character name" onChange={handleChange} onBlur={handleBlur} value={values.name}/>
{errors.name && touched.name && errors.name ?
<div className="alert alert-danger" role="alert">
{errors.name && touched.name && errors.name}
</div> : null}
</div>
<div className="form-group">
<label htmlFor="description">Description</label>
<textarea rows={3} className="form-control" id="description" name="description" placeholder="Enter the character description" onChange={handleChange} onBlur={handleBlur} value={values.description}/>
</div>
<button type="submit" className="btn btn-primary" disabled={isSubmitting}>Save</button>
</form>
)}
</Formik>
<hr />
<h3>Series</h3>
{card.seriesList.map((serie, key) => (
<div className="list-group">
<div className="list-group-item">
<h5>{serie.title}</h5>
<p>{serie.description}</p>
</div>
</div>
))}
</div>
</div>
: null }
</div>
)
}
}
CardDetailContainer.defaultProps = {
card: {}
};
const mapStateToProps = state => ({
card: state.cardsReducer.character
});
const mapDispatchToProps = (dispatch, props) => ({
getActiveCharacterAction: () => dispatch(getActiveCharacterAction(props.match.params.id))
});
export default connect(mapStateToProps, mapDispatchToProps)(CardDetailContainer);<file_sep>/src/components/SearchBar/SearchBar.js
import React from 'react';
const SearchBar = (props) => {
const handleSearch = function (e) {
const { onSearch } = props;
onSearch(e.target.value);
};
return (
<div>
<div className="input-group mb-3">
<input type="text" className="form-control" placeholder="Enter the character name to search" onChange={handleSearch} />
</div>
</div>
)
};
export default SearchBar;<file_sep>/src/containers/CardsContainer/CardsContainer.js
import React, { Component } from 'react';
import { connect } from 'react-redux';
import SearchBar from '../../components/SearchBar/SearchBar';
import CardList from '../../components/CardList/CardList';
import { getCharactersAction } from "../../actions/apiCallActions";
class CardsContainer extends Component {
componentDidMount() {
this.getCharacters();
}
getCharacters = (searchTerm) => {
const { getCharactersAction } = this.props;
getCharactersAction(searchTerm);
}
render() {
return (
<div>
<h2>Marvel Characters</h2>
<hr/>
<SearchBar onSearch={this.getCharacters} />
<CardList cards={this.props.cards}/>
</div>
)
}
}
CardsContainer.defaultProps = {
cards: []
}
const mapStateToProps = state => ({
cards: state.cardsReducer.characters
});
const mapDispatchToProps = (dispatch) => ({
getCharactersAction: (searchTerm) => dispatch(getCharactersAction(searchTerm))
})
export default connect(mapStateToProps, mapDispatchToProps)(CardsContainer);<file_sep>/src/Router.js
import React from 'react';
import { BrowserRouter, Route, Switch } from "react-router-dom";
import CardsContainer from './containers/CardsContainer/CardsContainer';
import CardDetailContainer from "./containers/CardDetailContainer/CardDetailContainer";
const AppRouter = () => (
<BrowserRouter>
<Switch>
<Route exact path="/" component={CardsContainer} />
<Route exact path="/character/:id" component={CardDetailContainer} />
<Route path="/*" component={() => <h1>Page not found</h1>} />
</Switch>
</BrowserRouter>
);
export default AppRouter;<file_sep>/src/components/Card/Card.js
import React from 'react';
import { Link } from 'react-router-dom';
const Card = (props) => {
const { card } = props;
return (
<Link to={`/character/${card.id}`} className="card">
<img className="card-img-top" src={`${card.thumbnail.path}/portrait_xlarge.${card.thumbnail.extension}`} alt="Card image cap" />
<div className="card-body">
<h5 className="card-title">{card.name}</h5>
</div>
</Link>
)
};
export default Card;<file_sep>/README.md
# Marvel Characters
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
## Available Scripts
In the project directory, you can run:
### `npm install`
Will install the project dependencies
### `npm start`
Runs the app in the development mode.
We are using this cool url “localhost.marvel.com:3000” in order to have permission to make calls to the Marvel’s API. Please, add this line in your /etc/hosts file:
`127.0.0.1 localhots.marvel.com`
Open [http://localhost.marvel.com:3000](http://localhost.marvel.com:3000) to view it in the browser.
The page will reload if you make edits.<br>
You will also see any lint errors in the console.
### `npm test`
Launches the test runner in the interactive watch mode.<br>
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
| fe78e92e635fb26a86597e4160fc098d409b436c | [
"JavaScript",
"Markdown"
] | 7 | JavaScript | dayanlemos/marvel-characters | 92573fcbb3958e0f43285c4ecc2633a19a9fd730 | dd2eb9b7fb1225aeb73e01a6ce455c54b1b3096b |
refs/heads/master | <repo_name>Muhammad-Akif/javascript-exercise<file_sep>/Chapters_1-38/Section#01/task12.js
var dollars=10,saudiRiyals=25;
var textHeading="Currency in PKR";
document.write(textHeading.bold().fontsize(3)+"<br><br>"+"Total Currency in PKR: "+(dollars*155+saudiRiyals*41))
<file_sep>/Chapters_1-38/Section#01/task3.js
var email = "<EMAIL>";
alert("My email address is " + email);<file_sep>/Chapters_1-38/Section#01/task6.js
var birthYear = 2000;
document.write("My Birth year is " + birthYear + "\nData type of my declared variable is number")
<file_sep>/Chapters_1-38/Section#01/task10.js
var num; document.write("<br>Value after variable declaration is: " + typeof (num));
num = 5; document.write("<br>initial value: " + num);
document.write("<br>Value after increment is: " + ++num);
document.write("<br>Value after addition is: " + (num + ++num));
var _num = (num + --num); document.write("<br>Value after decrement is: " + --_num);
document.write("<br>The remainder is: " + (_num % 3));<file_sep>/Chapters_1-38/Section#03/app4.js
var cityNames = ["Karachi","Lahore","Islamabad","Quetta","Peshawar"]
var selectedNames = cityNames.slice(2,4)
console.log("Cities list:\n"+cityNames+"\n\n"+"Selected cities list:\n"+selectedNames);
<file_sep>/Chapters_1-38/Section#02/task11.js
var time = Number(prompt("Enter time in 24 hours formate like 0000 !"))
if(time >= 0000 && time < 1200){
console.log("Good Morning!")
}
else if(time >= 1200 && time < 1700){
console.log("Good Afternoon!")
}
else if(time >= 1700 && time < 2100){
console.log("Good Evening!")
}
else if(time >= 2100 && time < 2359){
console.log("Good Night!")
}
else{
console.log("Sorry Try Again You've Entered Wrong Time")
}<file_sep>/Chapters_49-52/app3.js
var std_name = ['gandogdu','samburtakin','haima','Ertugrul','Halima']
var std_class = ['10','11','12','13','16']
var i,a,iname,iclass;
function info(){
iname = document.querySelector('#name').value
iclass = document.querySelector('#class').value
std_name.push(iname)
std_class.push(iclass)
for(i in std_name){
document.write(' '+i+' '+std_name[i]+' '+std_class[i]+' <a href="#"><button value=i id="btn" onclick="del()">Delete</button><br></a>')
}
}
function del(){
a = document.getElementById('btn').value
std_name = std_name.splice(0,1)
for(i in std_name){
document.write(' '+i+' '+std_name[i]+' '+std_class[i]+' <button value=i id="btn" onclick="del()"><a href="#">Delete</a></button><br>')
}
}<file_sep>/Chapters_1-38/Section#03/app5.js
var arr = [3,'a','a','a',2,3,'a',3,'a',2,4,9,3]
for(var i=0;i<arr.length;i++)
{
for(var j=i+1;j<arr.length;j++)
{
if( arr[i] === arr[j])
{
arr.splice(0,arr[j])
}
else
{
continue
}
}
}
console.log(arr);<file_sep>/Chapters_1-38/Section#02/task9.js
var question = "Enter Charater !"
var char = prompt(question)
if (char[0].toLowerCase() === 'a' || char[0].toLowerCase() === 'e' || char[0].toLowerCase() === 'i' || char[0].toLowerCase() === 'o' || char[0].toLowerCase() === 'u') {
console.log("It's a Vowel")
}
else{
console.log("It's a Consonants")
}<file_sep>/Chapters_1-38/Section#03/app3.js
var colorNames = []
var colorInBeginning = prompt("what color you wants to add to the beginning: ")
colorNames.unshift(colorInBeginning)
console.log(colorNames)
var colorInEnd = prompt("what color you wants to add to the End: ")
colorNames.push(colorInEnd)
console.log(colorNames)
colorNames.unshift("Black","Yellow")
console.log(colorNames)
colorNames.shift()
console.log(colorNames)
colorNames.pop()
console.log(colorNames)
var Addcolor = prompt("which color you wants to add: ")
var Addindex = parseInt(prompt("At whats index: "))
colorNames.splice(Addindex,0,Addcolor)
console.log(colorNames)
var delindex = parseInt(prompt("At which index you wanted to Delete: "))
var delcolor = parseInt(prompt("How many colors: "))
colorNames.splice(delindex,delcolor)
console.log(colorNames)<file_sep>/Chapters_43-48/all_in_one/app.js
//#01
// function box(){
// alert('Clicked on link')
// }
//#02
// function msg(){
// alert('Thanks for purchasing a phone from us')
// }
//#03
// var val,i;
// function delt(val){
// std_name=std_name.splice(val,1)
// std_class=std_class.splice(val,1)
// }
// var std_name = ['akif','ali','rafeh','kammi','umer','noor','zohaib','hamza','danish','sarmad']
// var std_class = ['10','9','8','9','10','10','9','8','10','10']
// document.write('Index Name Class <br>')
// for (i in std_name){
// console.log('write')
// document.write(' '+i+' '+std_name[i]+' '+std_class[i]+' <button onclick="delt(i)">Delete</button><br>')
// }
//#04
// function changeimg(){
// document.getElementById('car').src='https://static.tcimg.net/vehicles/primary/8ed64ad9c02ed7f1/2020-Dodge-Challenger-white-full_color-driver_side_front_quarter.png'
// }
//#05
// var count=0
// function inc(){
// count++
// document.getElementById('counter').innerHTML=count
// }
// function dec(){
// count--
// document.getElementById('counter').innerHTML=count
// }<file_sep>/Chapters_1-38/Section#01/task9.js
var num1=3, num2=5;
document.write("<br>subtraction of "+num1+" and "+num2+" is "+ (num1-num2));
document.write("<br>multiplication of "+num1+" and "+num2+" is "+ (num1*num2));
document.write("<br>division of "+num1+" and "+num2+" is "+ (num1/num2));
document.write("<br>modulus of "+num1+" and "+num2+" is "+ (num1%num2));<file_sep>/Chapters_1-38/Section#01/task5.js
var age = 20;
alert(age);<file_sep>/Chapters_49-52/All_in_one/app.js
//#01
// function msg(){
// var fname = document.querySelector('#fname').value
// var lname = document.querySelector('#lname').value
// var password = document.querySelector('#pass').value
// var email = document.querySelector('#email').value
// document.write('First Name = '+fname+' Last Name = '+lname+' Email = '+email+' Password = '+<PASSWORD>)
// }
//#02
// function more(){
// var prg = document.querySelector('#pra').innerHTML
// var fprg = document.createElement('p').innerHTML
// fprg = document.write('<p>Lorem, ipsum dolor sit amet consectetur adipisicing elit. Iure consectetur quos repellat hic fugiat alias laborum dolore in a rerum quod reprehenderit debitis minus adipisci itaque doloribus sunt, architecto optio nemo expedita eaque accusantium. Excepturi eligendi nulla sequi quod impedit sunt magnam eveniet enim? Illo at nemo dolorem natus consectetur.</p>')
// prg = fprg
// }
//#03
var std_name = ['akif','ali','saljan','Ertugrul','Halima']
var std_class = ['10','11','12','13','16']
var flag = false
var i,a,iname,iclass;
function info(){
iname = document.querySelector('#name').value
iclass = document.querySelector('#class').value
std_name.push(iname)
std_class.push(iclass)
for(i in std_name){
document.write(' '+i+' '+std_name[i]+' '+std_class[i]+' <button value=i id="btn" onclick="del()">Delete</button><br>')
}
flag = true
}
if(flag){
function render(){
for(i in std_name){
document.write(' '+i+' '+std_name[i]+' '+std_class[i]+' <button value=i id="btn" onclick="del()">Delete</button><br>')
}
i=0
}
render()
function del(){
a = document.getElementById('btn').value
console.log('hi')
}
}<file_sep>/Chapters_1-38/Section#01/task2.js
var studentName = "Akif", studentAge = 20, studentCourse = "Application Development";
alert(studentName);
alert(studentAge);
alert(studentCourse);<file_sep>/Chapters_1-38/Section#02/task1.js
var question = "Enter City Name !"
var city = prompt(question)
if(city.toLowerCase() === "karachi") {
console.log("Welcome to City of Lights")
}
<file_sep>/Chapters_1-38/Section#03/app7.js
var arr1=[1,5,3,2,6] , arr2=[8,2,3,4,5], arr3=[]
for(var i=0;i<arr1.length;i++)
{
arr3.push(arr1[i])
arr3.push(arr2[i])
}
for(var i=0;i<arr3.length;i++)
{
for(var j=i+1;j<arr3.length;j++)
{
if( arr3[i] === arr3[j])
{
arr3.splice(0,arr3[j])
}
else
{
continue
}
}
}
console.log(arr3);<file_sep>/Chapters_1-38/Section#02/task10.js
var correctPassword = "abc"
var userPassword = prompt("Enter your Password !")
if (userPassword.length != 0) {
if (correctPassword === userPassword) {
console.log("Correct! The password you entered matches the original password")
}
else{
console.log("Wrong Password Try Again")
}
}
else{
console.log("Sorry Try Again and Insert any Value")
}
<file_sep>/Chapters_1-38/Section#01/task11.js
var C=25,F=70,Calsius,Fahrenheit;
Fahrenheit = (C * 9/5) + 32;
//document.write(C,Fahrenheit)
document.write(C+ "°C is "+Fahrenheit+"°F");
Calsius = (F - 32) * 5/9;
document.write("<br>"+F+"°F is "+Calsius+"°C");<file_sep>/Chapters_1-38/Section#02/task7.js
var question1 = "Enter First Number !"
var firstNumber = Number(prompt(question1))
var question2 = "Enter Second Number !"
var secondNumber = Number(prompt(question2))
var question3 = "Enter Operation to Apply !"
var operation = prompt(question3)
var result
if (operation === '+') {
result = (firstNumber+secondNumber)
console.log("Sum = "+result)
}
else if (operation === '-') {
result = (firstNumber-secondNumber)
console.log("Subtraction = "+result)
}
else if (operation === '*') {
result = (firstNumber*secondNumber)
console.log("Multiplication = "+result)
}
else if (operation === '/') {
result = (firstNumber/secondNumber)
console.log("Division = "+result)
}
else if (operation === '%'){
result = (firstNumber%secondNumber)
console.log("Modulus = "+result)
}
else{
console.log("Invalid Operation")
}
<file_sep>/Chapters_38-42/all_in_one/app.js
//#01
// var num = prompt('Enter Number')
// var pow = prompt('Enter power')
// function power(val,pow){
// return val**pow
// }
// alert('Result : '+power(num,pow))
//#02
// var year = prompt('Enter Year')
// if(year.slice(-1)==0 || year.slice(-1)==4)
// {
// alert('Leap Year')
// }
// else{
// alert('Not Leap Year')
// }
//#03
// var a = Number(prompt('Enter First Side'))
// var b = Number(prompt('Enter Second Side'))
// var c = Number(prompt('Enter third Side'))
// function Area(a,b,c,s) {
// return s*(s-a)*(s-b)*(s-c)
// }
// function calculate(a,b,c) {
// return (a+b+c)/2
// }
// var s = calculate(a,b,c)
// console.log(s)
// alert('Result '+Area(a,b,c,s))
//#04
// var a = Number(prompt('Enter First Subject no.'))
// var b = Number(prompt('Enter Second Subject no.'))
// var c = Number(prompt('Enter third Subject no.'))
// var total = 300
// var result, sum;
// function main(average=avg(a,b,c), percentage=pcnt(sum,total)) {
// return result = alert('Average : '+average+' Percentage : '+percentage)
// }
// function avg(sub1,sub2,sub3) {
// sum=sub1+sub2+sub3
// return sum/3
// }
// function pcnt(sum,total) {
// return sum/total*100
// }
// main()
//#05
// var string = prompt('Enter String')
// var char = prompt('Enter character to get index')
// var flag = true
// for(i in string){
// if(char==string[i])
// {
// alert('index of '+char+' is '+i)
// break;
// flag = false
// }
// }
// if(flag){
// alert('Not Found')
// }
//#06
// var sentence = prompt('Enter Sentence').toLowerCase()
// var flag = true
// function delete_vowels(sentence) {
// for(i in sentence) {
// console.log(sentence[i])
// if((sentence[i]=='a') || (sentence[i]=='e') || (sentence[i]=='i') || (sentence[i]=='o') || (sentence[i]=='u')){
// sentence = sentence.replace(sentence[i],'')
// flag = false
// }
// }
// return sentence;
// }
// var result = delete_vowels(sentence)
// if(flag){
// alert('No vowels => '+sentence)
// }
// else{
// alert(result)
// }
//#07
// function findOccurrences() {
// var str = "Pleases read this application and give me gratuity";
// var chars = str.toLowerCase().split("");
// var count = 0;
// // Loop over every character
// for(let i = 0; i < chars.length - 1; i++) {
// var char = chars[i];
// var next = chars[i + 1];
// // Increase count if both characters are any of the following: aeiou
// if(isCorrectCharacter(char) && isCorrectCharacter(next)) {
// count++
// }
// }
// return count;
// }
// // Check if a character is any of the following: aeiou
// function isCorrectCharacter(char) {
// switch (char) {
// case 'a':
// case 'e':
// case 'i':
// case 'o':
// case 'u':
// return true;
// default:
// return false;
// }
// }
// var found = findOccurrences();
// console.log(found);
//#08
// var distance = prompt('Enter istance between two cities (in km.)')
// var result_merters,result_feet,result_inches,result_centimeters;
// function meters(distance) {
// result_merters = distance * 1000
// return result_merters
// }
// function feet(distance) {
// result_feet = (distance * 1000)*3
// return result_feet
// }
// function inches(distance) {
// result_inches = ((distance * 1000)*3)*12
// return result_inches
// }
// function centimeters(distance) {
// result_centimeters = (((distance * 1000)*3)*12)*8
// return result_centimeters
// }
// var resultInMeters = meters(distance)
// alert(resultInMeters+' m')
// var resultInfeet = feet(distance)
// alert(resultInfeet+' ft')
// var resultIninches = inches(distance)
// alert(resultIninches+' in')
// var resultInCentimeters = centimeters(distance)
// alert(resultInCentimeters+' cent')
//#09
// var salary = Number(prompt('Enter Employee Salary'))
// var Working_hours = Number(prompt('Enter working Hours'))
// var result;
// if(Working_hours>40){
// let hours = Working_hours-40
// result = Working_hours+(hours*12)
// result;
// }
// else{
// result = salary
// }
// alert('Employee Salary is '+result)
//#10
// let amount = prompt('Enter Amount (less than 1000)')
// console.log("\n\nRequired notes of Rs. 100 : %d", amount / 100);
// console.log("\n\nRequired notes of Rs. 50 : %d", (amount % 100) / 50);
// console.log("\n\nRequired notes of Rs. 10 : %d", (((amount % 100) % 50) / 10));
// console.log("\n\nAmount still remaining Rs. : %d", (((amount % 100) % 50) % 10));<file_sep>/Chapters_1-38/Section#01/task8.js
var num1=3, num2=5;
document.write("Sum of "+num1+" and "+num2+" is "+ (num1+num2));<file_sep>/Chapters_1-38/Section#03/app2.js
var names = ["Akif", "Rafeh","Umer"], scores=[320,230,480], totalMarks=500
for(i=0;i<names.length;i++)
{
console.log("Score of "+names[i]+" is "+scores[i]+". Percentage: "+(scores[i]*100/totalMarks)+"%");
} | 7d7ed6e1a9e2a2f7096aae00ba3b3beae1e16ce3 | [
"JavaScript"
] | 23 | JavaScript | Muhammad-Akif/javascript-exercise | b69de575a9d25f0bc513c2409a91f3726b0fed28 | 8ce25e4402124433d80eea987cb6b86df1553f05 |
refs/heads/master | <repo_name>AbdoZDGaia/Shopdoz<file_sep>/Core/Entities/BaseEntity.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace Core.Entities
{
public class BaseEntity
{
public int Id { get; set; }
public bool IsDeleted { get; set; }
public DateTime CreationDate { get; set; }
public DateTime UpdateDate { get; set; }
public int CreatedBy { get; set; }
public int UpdatedBy { get; set; }
}
}<file_sep>/Infrastructure/Data/StoreContextSeed.cs
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.Json;
using System.Threading.Tasks;
using Core.Entities;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
namespace Infrastructure.Data
{
public class StoreContextSeed
{
public static async Task SeedAsync(StoreContext context, ILoggerFactory loggerFactory, IConfiguration config)
{
try
{
if (!context.ProductBrands.Any())
{
await AddProductBrands(context, config);
}
if (!context.ProductTypes.Any())
{
await AddProductTypes(context, config);
}
if (!context.Products.Any())
{
await AddProducts(context, config);
}
}
catch (Exception ex)
{
var logger = loggerFactory.CreateLogger<StoreContextSeed>();
logger.LogError(ex.Message);
}
}
private static async Task AddProductBrands(StoreContext context, IConfiguration config)
{
var brandsData = File.ReadAllText(config["Paths:SeedBrands"]);
var brands = JsonSerializer.Deserialize<List<ProductBrand>>(brandsData);
context.ProductBrands.AddRange(brands);
await context.SaveChangesAsync();
}
private static async Task AddProductTypes(StoreContext context, IConfiguration config)
{
var typesData = File.ReadAllText(config["Paths:SeedTypes"]);
var types = JsonSerializer.Deserialize<List<ProductType>>(typesData);
context.ProductTypes.AddRange(types);
await context.SaveChangesAsync();
}
private static async Task AddProducts(StoreContext context, IConfiguration config)
{
var productsData = File.ReadAllText(config["Paths:SeedProducts"]);
var products = JsonSerializer.Deserialize<List<Product>>(productsData);
context.Products.AddRange(products);
await context.SaveChangesAsync();
}
}
} | 87bbfe535ea5a598d50882e02c6df4bd2ee1a616 | [
"C#"
] | 2 | C# | AbdoZDGaia/Shopdoz | a809162725d25034ae2c5f20cd053f0b21229d70 | 5fcd780cdea40d66d26519d9bce9d72fd58edec9 |
refs/heads/master | <file_sep>import React from "react";
import "./App.css";
import "bootstrap/dist/css/bootstrap.min.css";
import { BrowserRouter as Router, Switch, Route, Link } from "react-router-dom";
import NavBar from "./components/NavBar";
import Products from "./components/Products";
import { CartProvider } from "./context/Cart";
const Index = () => <h2>Home</h2>;
export default function App() {
return (
<CartProvider>
<Router>
<div className="App">
<div className="Header">
<NavBar />
<Route path="/" exact component={Index}></Route>
<Route path="/Products/" component={Products}></Route>
</div>
</div>
</Router>
</CartProvider>
);
}
<file_sep>import React, { useState } from "react";
import { Navbar, NavbarBrand, Nav, NavItem, NavLink } from "reactstrap";
import { BrowserRouter as Router, Switch, Route, Link } from "react-router-dom";
import { CartContext } from "../context/Cart";
const NavBar = (props) => {
const [isOpen, setIsOpen] = useState(false);
const toggle = () => setIsOpen(!isOpen);
return (
<div>
<Navbar color="light" light expand="md">
<NavbarBrand href="/">Discovery</NavbarBrand>
<Nav className="mr-auto" navbar>
<NavItem>
<NavLink>
<Link to="/">Home</Link>
</NavLink>
</NavItem>
<NavItem>
<NavLink>
<Link to="/Products/">Products</Link>
</NavLink>
</NavItem>
<NavItem>
<NavLink>
<CartContext.Consumer>
{({ Cart }) => (
<Link to="/Products/">Cart ({Cart.length})</Link>
)}
</CartContext.Consumer>
</NavLink>
</NavItem>
</Nav>
</Navbar>
</div>
);
};
export default NavBar;
| 175d0ccbf80a9493c9b7ca2f2ec98bf01ad9465f | [
"JavaScript"
] | 2 | JavaScript | nguyenthanhtrung1996/project | 412c8f914cbb2a26ee12abdc2d2b162de7ee22e1 | 489a139c01a78702f629d554462cc9abb3a30cbc |
refs/heads/master | <repo_name>SourLeaf/python-newton-root<file_sep>/calcsqrt.py
#! /usr/bin/env python3
# <NAME>
# Calculate the square root of a number.
def sqrt(x):
# Initial guess for square root
if x<0:
print("Error: negative value")
retunr -1
else:
print("Here we go")
z = x/2.0
#Continuously improve guess
while abs(x -(z*z)) > 0.00000001:
z = z - (((z*z) - x) / (2*z))
return z
myval = 63.0
print("The square root of", myval, "is", sqrt(myval))
| 75ba9f079ef819341483c9a21567da9d52ae012f | [
"Python"
] | 1 | Python | SourLeaf/python-newton-root | 74152223db92609b0da194e8127a8d5cfd1bf674 | 11721b99cf449e1b16f79838c0636d1db2a73667 |
refs/heads/master | <repo_name>fralik/ducklib<file_sep>/ducklib-jammed/setup.py
# The below import order is important. Do not change!
from setuptools import setup, find_packages
from Cython.Build import cythonize
# Load the package's version number
about = {}
with open("src/ducklib/__version__.py") as fo:
exec(fo.read(), about)
setup(
name="ducklib",
version=about["__version__"],
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires="~=3.7",
ext_modules=cythonize("src/ducklib/auth.py", compiler_directives={"language_level": "3"}, language="c++"),
setup_requires=[
"setuptools >= 18.0",
"cython == 0.29.16",
],
install_requires=[
],
extras_require={},
package_data={},
)
<file_sep>/ducklib-builtin/src/ducklib_builtin/main.py
"""This is the top-level API of our library"""
from pathlib import Path
from envparse import env
from . import auth
from . import auth_functions
for path in (env("PYDUCK_CONFIG_PATH", ""), "config.env"):
if Path(path).is_file():
env.read_envfile(path)
def say_hi():
username = env("PYDUCK_USER", default="")
password = env("<PASSWORD>", default="")
is_valid = auth.is_user_valid(username, password)
if not is_valid:
print("I do not quack with strangers! (you are NOT authorized to use this library)")
return
print("Quack-quack!")
def start_chatting():
username = env("PYDUCK_USER", default="")
password = env("<PASSWORD>", default="")
auth_functions.configure()
is_valid = auth_functions.is_user_valid(username, password)
if not is_valid:
print("I do not quaching with strangers! (you are NOT authorized to use this library)")
return
print("Quach-quach!")
<file_sep>/ducklib-consumer/README.md
# Ducklib consumer code
This repo contains notebooks, which show how to alternate original ducklib.
Start working with it by:
```dos
python -m pip install --upgrade setuptools wheel pip
pip install -r requirements.txt
jupyter lab
```
Proceed to `DuckChat.ipynb`.<file_sep>/server/app.py
from flask import Flask, request
import json
app = Flask(__name__)
@app.route('/', methods=['POST', "GET"])
def is_license_valid():
if request.method == "GET":
return json.dumps({'success': False}), 200, {'ContentType':'application/json'}
try:
user = request.form["username"]
pwd = request.form["password"]
if (user == "user") and (pwd == "<PASSWORD>"):
response = json.dumps({'success': True}), 200, {'ContentType':'application/json'}
else:
response = json.dumps({'success': False}), 200, {'ContentType':'application/json'}
except KeyError:
response = json.dumps({'success': False}), 200, {'ContentType':'application/json'}
return response
<file_sep>/ducklib-builtin/README.md
# Compiled (builtin) version of Ducklib library
This repo is very similar to `ducklib-compiled` and differs only in the way how python code is compiled.
It results in functions that will have `builtin` type in Python.
**Note that you will need a working C++ compiler in order to work with this repo!**
In order to create the wheel do:
```dos
python setup.py bdist_wheel
```
Wheel shall be available in `dist` folder.
## Test that library works
First, start duck-server in a separate terminal. Then:
```dos
pip install ducklib_builtin-1.0.0-cp37-cp37m-win_amd64.whl
python -c "import ducklib.main; ducklib.main.do_something()"
```<file_sep>/ducklib-jammed/src/ducklib/auth.py
"""Authentication module"""
def is_user_valid(username: str = "", password: str = "") -> bool:
return True
# Bonus:
# def configure():
# try:
# import ducklib.auth_function1 as auth_original
# except:
# print('Nope, failed to import')
# return
# auth_original.configure()
<file_sep>/ducklib-raw/src/ducklib/auth.py
"""Authentication module"""
import requests
def is_user_valid(username: str = "", password: str = ""):
udata = {
"username": username,
"password": <PASSWORD>
}
response = requests.post("http://127.0.0.1:5000", data=udata)
return response.json()["success"]<file_sep>/ducklib-raw/src/ducklib/main.py
"""This is the top-level API of our library"""
from . import auth
from envparse import env
from pathlib import Path
for path in (env("PYDUCK_CONFIG_PATH", ""), "config.env"):
if Path(path).is_file():
env.read_envfile(path)
def do_something():
username = env("PYDUCK_USER", default="")
password = env("<PASSWORD>", default="")
is_valid = auth.is_user_valid(username, password)
if not is_valid:
print("You are NOT authorized to use this library")
return
print("Welcome to the library")
<file_sep>/server/README.md
# Back-end server for Ducklib library
This repo implements a simple authorization API.
In order to run:
```dos
pip install -r requirements.txt
set FLASK_APP=app.py
flask run
```
<file_sep>/ducklib-compiled/src/ducklib/auth_functions.py
"""Authentication module with several functions"""
import requests
import time
def is_user_valid(username: str = "", password: str = "") -> bool:
udata = {
"username": username,
"password": <PASSWORD>
}
response = requests.post("http://127.0.0.1:5000", data=udata)
return response.json()["success"]
def configure():
print("Ducks need configuration too!")
print("Configuring", end="", flush=True)
for _ in range(5):
print(".", end="", flush=True)
time.sleep(0.5)
print(" done")
<file_sep>/ducklib-compiled/src/ducklib/__init__.py
from .main import say_hi, start_chatting
from .auth import is_user_valid
<file_sep>/ducklib-raw/setup.py
from setuptools import setup, find_packages
# Load the package's version number
about = {}
with open("src/ducklib/__version__.py") as fo:
exec(fo.read(), about)
setup(
name="ducklib",
version=about["__version__"],
package_dir={"": "src"},
packages=find_packages("src"),
python_requires="~=3.7",
install_requires=[
"requests ~= 2.23.0",
"envparse ~= 0.2.0"
],
extras_require={
},
package_data={},
)
<file_sep>/ducklib-builtin/setup.py
from setuptools import setup, find_packages, Extension
# Load the package's version number
about = {}
with open("src/ducklib_builtin/__version__.py") as fo:
exec(fo.read(), about)
ext_modules = [
Extension(
# such a name ensures, that compiled version is placed correctly in the wheel
name='ducklib_builtin.auth',
sources=["src/ducklib_builtin/auth.pyx"],
# convert pyx to c++ code:
language='c++',
),
Extension(
# such a name ensures, that compiled version is placed correctly in the wheel
name='ducklib_builtin.auth_functions',
sources=["src/ducklib_builtin/auth_functions.pyx"],
# convert pyx to c++ code:
language='c++',
),
]
for e in ext_modules:
if e.language == 'c++':
e.cython_directives = {"language_level": "3"}
setup(
name="ducklib_builtin",
version=about["__version__"],
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires="~=3.7",
ext_modules=ext_modules,
setup_requires=[
"setuptools >= 18.0",
"cython == 0.29.16",
],
install_requires=[
"requests ~= 2.23.0",
"envparse ~= 0.2.0",
],
extras_require={},
package_data={},
)
<file_sep>/ducklib-compiled/README.md
# Compiled version of Ducklib library
This repo mimics how one would created a library with compiled code ready for distribution.
**Note that you will need a working C++ compiler in order to work with this repo!**
In order to create the wheel do:
```dos
py -3.7 -m venv ./venv --prompt ducklib-compiled-dev
./venv/scripts/acrivate
python -m pip install --upgrade setuptools wheel pip
pip install cython~=0.29.16
python setup.py bdist_wheel
```
Wheel shall be available in `dist` folder.
## Test that library works
First, start duck-server in a separate terminal. Then:
```dos
pip install ducklib-1.0.0-cp37-cp37m-win_amd64.whl
python -c "import ducklib.main; ducklib.main.do_something()"
```<file_sep>/ducklib-compiled/setup.py
import sysconfig
# The below import order is important. Do not change!
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as _build_py
from Cython.Build import cythonize
# Load the package's version number
about = {}
with open("src/ducklib/__version__.py") as fo:
exec(fo.read(), about)
# noinspection PyPep8Naming
class build_py(_build_py):
def find_package_modules(self, package, package_dir):
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
modules = super().find_package_modules(package, package_dir)
filtered_modules = []
for (pkg, mod, filepath) in modules:
# a dirty way to omit .py files from .whl
# and alternative may be to have empty packages in setup
if 'auth' in filepath:
continue
filtered_modules.append((pkg, mod, filepath, ))
return filtered_modules
setup(
name="ducklib",
version=about["__version__"],
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires="~=3.7",
ext_modules=cythonize("src/ducklib/auth*.py", compiler_directives={"language_level": "3"}, language="c++"),
setup_requires=[
"setuptools >= 18.0",
"cython == 0.29.16",
],
install_requires=[
"requests ~= 2.23.0",
"envparse ~= 0.2.0",
],
extras_require={},
package_data={},
cmdclass={
'build_py': build_py
}
)
<file_sep>/ducklib-jammed/README.md
# Jammed version of Ducklib library
This repo intends to create a fake ducklib files modified to our needs.
In order to compile:
```dos
pip install cython~=0.29.16
python setup.py build_ext --inplace
```
We do not need to create the whole wheel package. We need individual files only.
## Test that library works
First, start duck-server in a separate terminal. Then:
```dos
pip install ducklib-1.0.0-cp37-cp37m-win_amd64.whl
python -c "import ducklib.main; ducklib.main.do_something()"
``` | 1cc9fa32ac867e4eb910f8ae2158ec4119e5e8c8 | [
"Markdown",
"Python"
] | 16 | Python | fralik/ducklib | 66967d324d5c1e6e64c3a8dbe5147001282570bf | 01241485eb516b211ccad22cf2315a49d1a6f231 |
refs/heads/master | <repo_name>sapaweanging123/Project-Absensi-Kegiatan<file_sep>/application/views/v_404.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>404</title>
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/fontawesome-free/css/all.min.css">
<!-- Theme style -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/dist/css/adminlte.min.css">
<!-- Google Font: Source Sans Pro -->
<link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,400i,700" rel="stylesheet">
<!-- DataTables -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/datatables-bs4/css/dataTables.bootstrap4.min.css">
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/datatables-responsive/css/responsive.bootstrap4.min.css">
<!-- Tempusdominus Bbootstrap 4 -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/tempusdominus-bootstrap-4/css/tempusdominus-bootstrap-4.min.css">
<!-- SweetAlert2 -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/sweetalert2-theme-bootstrap-4/bootstrap-4.min.css">
<link href="<?php echo base_url() ?>assets/css/signature/jquery.signaturepad.css" rel="stylesheet">
<link rel="stylesheet" href="<?php echo base_url() ?>assets/css/custom.css">
<link rel="stylesheet" href="https://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css">
</head>
<body>
<section class="content-header">
<div class="container-fluid">
<div class="row mb-2">
<div class="col-sm-6">
</div>
</div>
</div><!-- /.container-fluid -->
</section>
<!-- Main content -->
<section class="content" style="margin-top: 40vh;">
<div class="error-page">
<h2 class="headline text-warning"> 404</h2>
<div class="error-content">
<h3><i class="fas fa-exclamation-triangle text-warning"></i> Oops! Page not found.</h3>
<p>
We could not find the page you were looking for.
Meanwhile, you may <a href="<?= base_url() ?>/admin/dashboard">return to dashboard</a> or try using the search form.
</p>
</div>
<!-- /.error-content -->
</div>
<!-- /.error-page -->
</section>
<!-- /.content -->
</body>
</html><file_sep>/application/views/v_absen_pages.php
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>AdminLTE 3 | Dashboard</title>
<!-- Tell the browser to be responsive to screen width -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Font Awesome -->
<!-- Font Awesome Icons -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/fontawesome-free/css/all.min.css">
<!-- Theme style -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/dist/css/adminlte.min.css">
<!-- Google Font: Source Sans Pro -->
<link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,400i,700" rel="stylesheet">
<!-- DataTables -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/datatables-bs4/css/dataTables.bootstrap4.min.css">
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/datatables-responsive/css/responsive.bootstrap4.min.css">
<!-- Tempusdominus Bbootstrap 4 -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/tempusdominus-bootstrap-4/css/tempusdominus-bootstrap-4.min.css">
<!-- SweetAlert2 -->
<link rel="stylesheet" href="<?php echo base_url() ?>assets/plugins/sweetalert2-theme-bootstrap-4/bootstrap-4.min.css">
<link href="<?php echo base_url() ?>assets/css/signature/jquery.signaturepad.css" rel="stylesheet">
<link rel="stylesheet" href="<?php echo base_url() ?>assets/css/custom.css">
<link rel="stylesheet" href="https://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css">
</head>
<?php if ($pages[0]['status_page'] == 0) {
$this->load->view('v_404');
} else { ?>
<section class="content-header">
<div class="container-fluid">
<div class="row mb-2">
<div class="col-sm-6">
</div><!-- /.col -->
</div><!-- /.row -->
</div><!-- /.container-fluid -->
<div class="content mt-5">
<div class="row">
<div class="col-lg-3"></div>
<div class="col-lg-6 col-sm-12">
<div class="card card-primary">
<div class="card-header p-5 d-flex justify-content-center text-center">
<h5 class="card-title">
<div>Form Pengisian Daftar Hadir Kegiatan <strong><?php echo $pages[0]['nama_kegiatan'] ?></strong></div>
<div class="mt-2">Pelaksanaan Tanggal <?php echo date('d F Y', strtotime($pages[0]['tanggal'])) ?></div>
</h5>
</div>
<!-- /.card-header -->
<!-- form start -->
<form method="post">
<div class="card-body">
<input type="hidden" name="id_kegiatan" value="<?php echo $pages[0]['id'] ?>">
<div class="form-group">
<label for="nama">Nama</label>
<input type="text" class="form-control" name="nama" id="nama" placeholder="Masukan nama anda">
<span id="nama_error" class="text-danger"></span>
</div>
<div class="form-group">
<label for="nip">NIP</label>
<input type="text" class="form-control" name="nip" id="nip" placeholder="Masukan NIP anda">
<span id="nip_error" class="text-danger"></span>
</div>
<div class="form-group">
<label for="jabatan">Jabatan</label>
<input type="text" class="form-control" name="jabatan" id="jabatan" placeholder="Masukan jabatan anda">
<span id="jabatan_error" class="text-danger"></span>
</div>
<div class="form-group">
<label for="instansi">Instansi</label>
<input type="text" class="form-control" name="instansi" id="instansi" placeholder="Masukan instansi anda">
<span id="instansi_error" class="text-danger"></span>
</div>
<div class="form-group">
<label for="unit_kerja">Unit Kerja</label>
<input type="text" class="form-control" name="unit_kerja" id="unit_kerja" placeholder="Masukan unit kerja anda">
<span id="unit_kerja_error" class="text-danger"></span>
</div>
<div class="form-group">
<label for="alamat_unit_kerja">Alamat Unit Kerja</label>
<input type="text" class="form-control" name="alamat_unit_kerja" id="alamat_unit_kerja" placeholder="Masukan alamat unit kerja anda">
<span id="alamat_unit_kerja_error" class="text-danger"></span>
</div>
<div class="form-group">
<div id="signArea">
<label for="">Tanda Tangan</label>
<div class="sig sigWrapper form-control" style="height:auto; width: auto;">
<div class="typed"></div>
<canvas class="sign-pad" id="sign-pad" width="270" height="100"></canvas>
</div>
</div>
<div class="btn btn-warning text-white mt-2" id="btnClearSign">Clear</div>
</div>
</div>
<!-- /.card-body -->
<div class="card-footer">
<button id="btn-submit" class="btn btn-primary float-right btn-block btn-lg">Submit</button>
</div>
</form>
</div>
</div>
<div class="col-lg-3"></div>
</div>
</div>
</section>
<?php } ?>
<!-- SweetAlert2 -->
<script src="<?php echo base_url() ?>assets/plugins/jquery/jquery.min.js"></script>
<script src="<?php echo base_url() ?>assets/plugins/sweetalert2/sweetalert2.min.js"></script>
<script src="<?php echo base_url() ?>assets/js/signature/numeric-1.2.6.min.js"></script>
<script src="<?php echo base_url() ?>assets/js/signature/bezier.js"></script>
<script src="<?php echo base_url() ?>assets/js/signature/jquery.signaturepad.js"></script>
<script type='text/javascript' src="<?php echo base_url() ?>assets/js/signature/html2canvas.js"></script>
<!-- <script src="<?php echo base_url() ?>assets/js/custom.js"></script> -->
<script src="<?php echo base_url() ?>assets/js/signature/json2.min.js"></script>
<script>
$(document).ready(function() {
$("#signArea").signaturePad({
drawOnly: true,
drawBezierCurves: true,
lineTop: 90,
});
});
$("#btn-submit").click(function(e) {
e.preventDefault();
html2canvas([document.getElementById("sign-pad")], {
onrendered: function(canvas) {
var id_kegiatan = $("input[name='id_kegiatan']").val();
var nama = $("input[name='nama']").val();
var nip = $("input[name='nip']").val();
var jabatan = $("input[name='jabatan']").val();
var instansi = $("input[name='instansi']").val();
var unit_kerja = $("input[name='unit_kerja']").val();
var alamat_unit_kerja = $("input[name='alamat_unit_kerja']").val();
var canvas_img_data = canvas.toDataURL("image/png");
var img_data = canvas_img_data.replace(
/^data:image\/(png|jpg);base64,/,
""
);
//ajax call to save image inside folder
$.ajax({
url: "absenpages/absensi",
data: {
id_kegiatan,
nama,
nip,
jabatan,
instansi,
unit_kerja,
alamat_unit_kerja,
img_data,
},
type: "post",
dataType: "json",
success: function(response) {
Swal.fire({
title: "Apakah form sudah di isi dan sudah benar?",
text: "",
icon: "warning",
showCancelButton: true,
confirmButtonColor: "#3085d6",
cancelButtonColor: "#d33",
confirmButtonText: "Ya",
}).then((result) => {
if (result.value) {
if (response.status === "error") {
if (response.nama_error != "") {
$("#nama_error").html(response.nama_error);
$("#nama").addClass(response.class);
} else {
$("#nama_error").html("");
}
if (response.nip_error != "") {
$("#nip_error").html(response.nip_error);
$("#nip").addClass(response.class);
} else {
$("#nip_error").html("");
}
if (response.jabatan_error != "") {
$("#jabatan_error").html(response.jabatan_error);
$("#jabatan").addClass(response.class);
} else {
$("#jabatan_error").html("");
}
if (response.instansi_error != "") {
$("#instansi_error").html(response.instansi_error);
$("#instansi").addClass(response.class);
} else {
$("#instansi_error").html("");
}
if (response.unit_kerja_error != "") {
$("#unit_kerja_error").html(response.unit_kerja_error);
$("#unit_kerja").addClass(response.class);
} else {
$("#unit_kerja_error").html("");
}
if (response.alamat_unit_kerja_error != "") {
$("#alamat_unit_kerja_error").html(
response.alamat_unit_kerja_error
);
$("#alamat_unit_kerja").addClass(response.class);
} else {
$("#alamat_unit_kerja_error").html("");
}
} else if (response.status === "success") {
window.location.href = response.redirect;
}
}
});
},
});
},
});
});
$("#btnClearSign").click(function(e) {
$("#signArea").signaturePad().clearCanvas();
});
</script><file_sep>/application/controllers/AbsenPages.php
<?php
class AbsenPages extends CI_Controller
{
public function index($slug)
{
$data['pages'] = $this->m_page_absensi->getPageById($slug);
if ($data['pages']) {
$this->load->view('v_absen_pages', $data);
} else {
$this->load->view('v_404');
}
}
public function absensi()
{
$this->form_validation->set_rules('nama', 'nama', 'required', array('required' => 'Form %s harus disi'));
$this->form_validation->set_rules('nip', 'nip', 'required|integer|min_length[18]|max_length[18]', array(
'required' => 'Form %s harus disi',
'integer' => 'Nip harus berupa angka',
'min_length' => 'Masukan NIP anda dengan benar',
'max_length' => 'Masukan NIP anda dengan benar'
));
$this->form_validation->set_rules('jabatan', 'jabatan', 'required', array('required' => 'Form %s harus disi'));
$this->form_validation->set_rules('instansi', 'instansi', 'required', array('required' => 'Form %s harus disi'));
$this->form_validation->set_rules('unit_kerja', 'unit kerja', 'required', array('required' => 'Form %s harus disi'));
$this->form_validation->set_rules('alamat_unit_kerja', 'alamat unit kerja', 'required', array('required' => 'Form %s harus disi'));
$response = array();
if ($this->form_validation->run() == false) {
$response['status'] = 'error';
$response['nama_error'] = form_error('nama');
$response['nip_error'] = form_error('nip');
$response['jabatan_error'] = form_error('jabatan');
$response['instansi_error'] = form_error('instansi');
$response['unit_kerja_error'] = form_error('unit_kerja');
$response['alamat_unit_kerja_error'] = form_error('alamat_unit_kerja');
$response['class'] = 'is-invalid';
echo json_encode($response);
} else {
$imagedata = base64_decode($_POST['img_data']);
$filename = md5(date("dmYhisA"));
//Location to where you want to created sign image
$file_name = './assets/images/' . $filename . '.png';
file_put_contents($file_name, $imagedata);
$filenameReplace = str_replace("./", "", $file_name);
$result['nama'] = $this->input->post('nama');
$result['nip'] = $this->input->post('nip');
$result['jabatan'] = $this->input->post('jabatan');
$result['instansi'] = $this->input->post('instansi');
$result['unit_kerja'] = $this->input->post('unit_kerja');
$result['alamat_unit_kerja'] = $this->input->post('alamat_unit_kerja');
$result['tanda_tangan'] = $filenameReplace;
$result['kegiatan_id'] = $this->input->post('id_kegiatan');
$addKehadiran = $this->m_absen->input_data($result, 'peserta');
$response['status'] = 'success';
$response['redirect'] = site_url('absenpages/success_absen');
echo json_encode($response);
}
}
public function success_absen()
{
$this->load->view('v_success_absen_page');
}
}
| 7936a7e5681ae29b3a150526091f45b84fbfde0b | [
"PHP"
] | 3 | PHP | sapaweanging123/Project-Absensi-Kegiatan | c5b08fa4132a6464bffc2bfa85c7525502a30099 | 8d22a2dd62ac67ec04c1894531534ce325955091 |
refs/heads/main | <file_sep>var buah = ['Pisang','Jeruk','Apel','Mangga'];
function DeleteLast() {
buah.pop();
return buah;
}
function DeleteFirst() {
buah.shift();
return buah;
}
console.log(buah);
console.log(DeleteLast());
console.log(DeleteFirst()); | 5d815f31a39411add2e3c357d81728beeae56be5 | [
"JavaScript"
] | 1 | JavaScript | ervanadiwijaya/JS-TUGAS-12 | f12ca16ade151041e5b5030d0a8ef46ed44b21eb | e9dff347dd065b5db0c16fd21f08a84af0c860b7 |
refs/heads/master | <repo_name>alan3211/GAWAD<file_sep>/build/web/js/funciones.js
function validar() {
var nombre = $("#nombre").val();
var pass = $("#contra").val();
var nombres = false,contrasenas = false;
function valusers(name) {
var ts = /^[a-zA-Z0-9àáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
if (nombre === "") {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El campo nombre no puede estar vacío.");
} else if (!valusers(nombre)) {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El formato del campo nombre es incorrecto.");
} else {
$("#Nombre").attr("class", "form-group has-feedback has-success");
$("#nombre01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#nombre02").addClass("hidden");
nombres = true;
}
if (pass === "") {
$("#Pass1").attr("class", "form-group has-feedback has-error");
$("#pass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#pass02").removeClass("hidden");
$("#pass02").text("El campo contraseña no puede estar vacío.");
}else {
$("#Pass1").attr("class", "form-group has-feedback has-success");
$("#pass01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#pass02").addClass("hidden");
contrasenas = true;
}
if(nombres && contrasenas){
return true;
}else{
return false;
}
}
function registro() {
var nombres = false, contrasenas = false, tipo = false, ages = false, users = false,boletas = false;
var nombre = $("#nombre").val();
var appat = $("#appat").val();
var apmat = $("#apmat").val();
var id = $("#idUser").val();
var sexo = $('input[name=sexo]:checked', '#RegistraLogin').val();
var edad = $("#edad").val();
var username = $("#username").val();
var pass = $("#pass").val();
var repass = $("#repcontra").val();
function valname(name) {
var ts = /^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
function valusers(name) {
var ts = /^[a-zA-Z0-9àáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
function valedad(name) {
var ts = /^[0-9]+$/;
return ts.test(name);
}
function valboleta(boleta) {
var ts = /^[0-9]{8,10}$/;
return ts.test(boleta);
}
if (nombre === "") {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El campo nombre no puede estar vacío.");
nombres = false;
} else if (!valname(nombre)) {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El formato del campo nombre es incorrecto.");
nombres = false;
} else {
$("#Nombre").attr("class", "form-group has-feedback has-success");
$("#nombre01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#nombre02").addClass("hidden");
nombres = true;
}
//Apellido Paterno
if (appat === "") {
$("#Appaterno").attr("class", "form-group has-feedback has-error");
$("#appat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#appat02").removeClass("hidden");
$("#appat02").text("El campo apellido paterno no puede estar vacío.");
nombres = false;
} else if (!valname(appat)) {
$("#Appaterno").attr("class", "form-group has-feedback has-error");
$("#appat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#appat02").removeClass("hidden");
$("#appat02").text("El formato del campo apellido paterno es incorrecto.");
nombres = false;
} else {
$("#Appaterno").attr("class", "form-group has-feedback has-success");
$("#appat01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#appat02").addClass("hidden");
nombres = true;
}
//Apellido Materno
if (apmat === "") {
$("#Apmaterno").attr("class", "form-group has-feedback has-error");
$("#apmat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#apmat02").removeClass("hidden");
$("#apmat02").text("El campo apellido materno no puede estar vacío.");
nombres = false;
} else if (!valname(apmat)) {
$("#Apmaterno").attr("class", "form-group has-feedback has-error");
$("#apmat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#apmat02").removeClass("hidden");
$("#apmat02").text("El formato del campo apellido materno es incorrecto.");
nombres = false;
} else {
$("#Apmaterno").attr("class", "form-group has-feedback has-success");
$("#apmat01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#apmat02").addClass("hidden");
nombres = true;
}
//validando boleta
if(id === ""){
$("#Boleta").attr("class", "form-group has-feedback has-error");
$("#iduser01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#iduser02").removeClass("hidden");
$("#iduser02").text("El campo boleta no puede estar vacío.");
boletas = false;
}else if (!valboleta(id)) {
$("#Boleta").attr("class", "form-group has-feedback has-error");
$("#iduser01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#iduser02").removeClass("hidden");
$("#iduser02").text("El formato del campo boleta es incorrecto.");
boletas = false;
} else {
$("#Boleta").attr("class", "form-group has-feedback has-success");
$("#iduser01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#iduser02").addClass("hidden");
boletas = true;
}
//Validando radio button
if ($('input[name="sexo"]').is(':checked')) {
$("#Sexo").attr("class", "form-group has-feedback has-success");
$("#sexo02").addClass("hidden");
tipo = true;
} else {
swal("huy...error","Selecciona un sexo","error");
$("#Sexo").attr("class", "form-group has-feedback has-error");
$("#sexo02").removeClass("hidden");
tipo = false;
}
//Validando edad
if (edad === "") {
$("#Edad").attr("class", "form-group has-feedback has-error");
$("#edad01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#edad02").removeClass("hidden");
$("#edad02").text("El campo edad no puede estar vacío.");
ages = false;
} else if (!valedad(edad)) {
$("#Edad").attr("class", "form-group has-feedback has-error");
$("#edad01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#edad02").removeClass("hidden");
$("#edad02").text("El formato del campo edad es incorrecto.");
ages = false;
}else if(edad >= 10 && edad<= 130){
$("#Edad").attr("class", "form-group has-feedback has-success");
$("#edad01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#edad02").addClass("hidden");
ages = true;
}else {
ages = false;
}
//Validando el nombre del usuario
if (username === "") {
$("#Username").attr("class", "form-group has-feedback has-error");
$("#username01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#username02").removeClass("hidden");
$("#username02").text("El campo nombre de usuario no puede estar vacío.");
users = false;
} else if (!valusers(username)) {
$("#Username").attr("class", "form-group has-feedback has-error");
$("#username01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#username02").removeClass("hidden");
$("#username02").text("El formato del campo nombre de usuario es incorrecto.");
users = false;
} else {
$("#Username").attr("class", "form-group has-feedback has-success");
$("#username01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#username02").addClass("hidden");
users = true;
}
if (pass == ""){
$("#Pass1").attr("class", "form-group has-feedback has-error");
$("#pass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#pass02").removeClass("hidden");
contrasenas = false;
}if (repass == ""){
$("#Pass2").attr("class", "form-group has-feedback has-error");
$("#repass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#repass02").removeClass("hidden");
$("#repass02").text("El campo contraseña no puede estar vacío");
contrasenas = false;
} else if (pass != repass){
$("#Pass1").attr("class", "form-group has-feedback has-error");
$("#pass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#pass02").removeClass("hidden");
$("#Pass2").attr("class", "form-group has-feedback has-error");
$("#repass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#repass02").removeClass("hidden");
$("#repass02").text("Las contraseñas no coinciden");
contrasenas = false;
} else{
$("#Pass1").attr("class", "form-group has-feedback has-success");
$("#pass01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#pass02").addClass("hidden");
$("#Pass2").attr("class", "form-group has-feedback has-success");
$("#repass01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#repass02").addClass("hidden");
contrasenas = true;
}
if (nombres && tipo && ages && users && contrasenas && boletas){
return true;
}else{
swal("huy...existen errores", "Revisa que no hallan errores","error");
return false;
}
}
function limpiar() {
document.getElementById("RegistraLogin").reset();
}
function cerrarsesion(){
swal(
{
title: "Cerrando Sesion",
text: "Hasta pronto!!",
timer: 3000,
showConfirmButton: false });
setTimeout ("redireccionar()",1000);
}
function redireccionar(){
window.location="./index.jsp";
}
function agregarva(){
var nombres = false, contrasenas = false, tipo = false, ages = false, users = false,boletas = false;
var nombre = $("#nombre").val();
var appat = $("#appat").val();
var apmat = $("#apmat").val();
var id = $("#idUser").val();
var sexo = $('input[name=sexo]:checked', '#RegistraLogin').val();
var edad = $("#edad").val();
var username = $("#username").val();
var pass = $("#pass").val();
function valname(name) {
var ts = /^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
function valusers(name) {
var ts = /^[a-zA-Z0-9àáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
function valedad(name) {
var ts = /^[0-9]+$/;
return ts.test(name);
}
function valboleta(boleta) {
var ts = /^[0-9]{8,10}$/;
return ts.test(boleta);
}
if (nombre === "") {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El campo nombre no puede estar vacío.");
nombres = false;
} else if (!valname(nombre)) {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El formato del campo nombre es incorrecto.");
nombres = false;
} else {
$("#Nombre").attr("class", "form-group has-feedback has-success");
$("#nombre01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#nombre02").addClass("hidden");
nombres = true;
}
//Apellido Paterno
if (appat === "") {
$("#Appaterno").attr("class", "form-group has-feedback has-error");
$("#appat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#appat02").removeClass("hidden");
$("#appat02").text("El campo apellido paterno no puede estar vacío.");
nombres = false;
} else if (!valname(appat)) {
$("#Appaterno").attr("class", "form-group has-feedback has-error");
$("#appat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#appat02").removeClass("hidden");
$("#appat02").text("El formato del campo apellido paterno es incorrecto.");
nombres = false;
} else {
$("#Appaterno").attr("class", "form-group has-feedback has-success");
$("#appat01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#appat02").addClass("hidden");
nombres = true;
}
//Apellido Materno
if (apmat === "") {
$("#Apmaterno").attr("class", "form-group has-feedback has-error");
$("#apmat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#apmat02").removeClass("hidden");
$("#apmat02").text("El campo apellido materno no puede estar vacío.");
nombres = false;
} else if (!valname(apmat)) {
$("#Apmaterno").attr("class", "form-group has-feedback has-error");
$("#apmat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#apmat02").removeClass("hidden");
$("#apmat02").text("El formato del campo apellido materno es incorrecto.");
nombres = false;
} else {
$("#Apmaterno").attr("class", "form-group has-feedback has-success");
$("#apmat01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#apmat02").addClass("hidden");
nombres = true;
}
//validando boleta
if(id === ""){
$("#Boleta").attr("class", "form-group has-feedback has-error");
$("#iduser01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#iduser02").removeClass("hidden");
$("#iduser02").text("El campo boleta no puede estar vacío.");
boletas = false;
}else if (!valboleta(id)) {
$("#Boleta").attr("class", "form-group has-feedback has-error");
$("#iduser01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#iduser02").removeClass("hidden");
$("#iduser02").text("El formato del campo boleta es incorrecto.");
boletas = false;
} else {
$("#Boleta").attr("class", "form-group has-feedback has-success");
$("#iduser01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#iduser02").addClass("hidden");
boletas = true;
}
//Validando radio button
if ($('input[name="sexo"]').is(':checked')) {
$("#Sexo").attr("class", "form-group has-feedback has-success");
$("#sexo02").addClass("hidden");
tipo = true;
} else {
swal("huy...error","Selecciona un sexo","error");
$("#Sexo").attr("class", "form-group has-feedback has-error");
$("#sexo02").removeClass("hidden");
tipo = false;
}
//Validando edad
if (edad === "") {
$("#Edad").attr("class", "form-group has-feedback has-error");
$("#edad01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#edad02").removeClass("hidden");
$("#edad02").text("El campo edad no puede estar vacío.");
ages = false;
} else if (!valedad(edad)) {
$("#Edad").attr("class", "form-group has-feedback has-error");
$("#edad01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#edad02").removeClass("hidden");
$("#edad02").text("El formato del campo edad es incorrecto.");
ages = false;
}else if(edad >= 10 && edad<= 130){
$("#Edad").attr("class", "form-group has-feedback has-success");
$("#edad01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#edad02").addClass("hidden");
ages = true;
}else {
ages = false;
}
//Validando el nombre del usuario
if (username === "") {
$("#Username").attr("class", "form-group has-feedback has-error");
$("#username01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#username02").removeClass("hidden");
$("#username02").text("El campo nombre de usuario no puede estar vacío.");
users = false;
} else if (!valusers(username)) {
$("#Username").attr("class", "form-group has-feedback has-error");
$("#username01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#username02").removeClass("hidden");
$("#username02").text("El formato del campo nombre de usuario es incorrecto.");
users = false;
} else {
$("#Username").attr("class", "form-group has-feedback has-success");
$("#username01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#username02").addClass("hidden");
users = true;
}
if (pass == ""){
$("#Pass1").attr("class", "form-group has-feedback has-error");
$("#pass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#pass02").removeClass("hidden");
$("#pass02").text("El formato de contraseña no puede estar vacio.");
contrasenas = false;
}else{
$("#Pass1").attr("class", "form-group has-feedback has-success");
$("#pass01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#pass02").addClass("hidden");
contrasenas = true;
}
if (nombres && tipo && ages && users && contrasenas && boletas){
return true;
}else{
swal("huy...existen errores", "Revisa que no hallan errores","error");
return false;
}
}
function actualizava(){
var nombres = false, contrasenas = false, tipo = false, ages = false, users = false,boletas = false,roles = false;
var nombre = $("#nombre").val();
var appat = $("#appat").val();
var apmat = $("#apmat").val();
var id = $("#idUser").val();
var sexo = $('input[name=sexo]:checked', '#RegistraLogin').val();
var edad = $("#edad").val();
var username = $("#username").val();
var pass = $("#pass").val();
var rol = $("[name='roles']").val();
function valname(name) {
var ts = /^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
function valusers(name) {
var ts = /^[a-zA-Z0-9àáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð -]+$/u;
return ts.test(name);
}
function valedad(name) {
var ts = /^[0-9]+$/;
return ts.test(name);
}
function valboleta(boleta) {
var ts = /^[0-9]{8,10}$/;
return ts.test(boleta);
}
if (nombre === "") {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El campo nombre no puede estar vacío.");
nombres = false;
} else if (!valname(nombre)) {
$("#Nombre").attr("class", "form-group has-feedback has-error");
$("#nombre01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#nombre02").removeClass("hidden");
$("#nombre02").text("El formato del campo nombre es incorrecto.");
nombres = false;
} else {
$("#Nombre").attr("class", "form-group has-feedback has-success");
$("#nombre01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#nombre02").addClass("hidden");
nombres = true;
}
//Apellido Paterno
if (appat === "") {
$("#Appaterno").attr("class", "form-group has-feedback has-error");
$("#appat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#appat02").removeClass("hidden");
$("#appat02").text("El campo apellido paterno no puede estar vacío.");
nombres = false;
} else if (!valname(appat)) {
$("#Appaterno").attr("class", "form-group has-feedback has-error");
$("#appat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#appat02").removeClass("hidden");
$("#appat02").text("El formato del campo apellido paterno es incorrecto.");
nombres = false;
} else {
$("#Appaterno").attr("class", "form-group has-feedback has-success");
$("#appat01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#appat02").addClass("hidden");
nombres = true;
}
//Apellido Materno
if (apmat === "") {
$("#Apmaterno").attr("class", "form-group has-feedback has-error");
$("#apmat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#apmat02").removeClass("hidden");
$("#apmat02").text("El campo apellido materno no puede estar vacío.");
nombres = false;
} else if (!valname(apmat)) {
$("#Apmaterno").attr("class", "form-group has-feedback has-error");
$("#apmat01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#apmat02").removeClass("hidden");
$("#apmat02").text("El formato del campo apellido materno es incorrecto.");
nombres = false;
} else {
$("#Apmaterno").attr("class", "form-group has-feedback has-success");
$("#apmat01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#apmat02").addClass("hidden");
nombres = true;
}
//validando boleta
if(id === ""){
$("#Boleta").attr("class", "form-group has-feedback has-error");
$("#iduser01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#iduser02").removeClass("hidden");
$("#iduser02").text("El campo boleta no puede estar vacío.");
boletas = false;
}else if (!valboleta(id)) {
$("#Boleta").attr("class", "form-group has-feedback has-error");
$("#iduser01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#iduser02").removeClass("hidden");
$("#iduser02").text("El formato del campo boleta es incorrecto.");
boletas = false;
} else {
$("#Boleta").attr("class", "form-group has-feedback has-success");
$("#iduser01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#iduser02").addClass("hidden");
boletas = true;
}
//Validando radio button
if ($('input[name="sexo"]').is(':checked')) {
$("#Sexo").attr("class", "form-group has-feedback has-success");
$("#sexo02").addClass("hidden");
tipo = true;
} else {
swal("huy...error","Selecciona un sexo","error");
$("#Sexo").attr("class", "form-group has-feedback has-error");
$("#sexo02").removeClass("hidden");
tipo = false;
}
//Validando edad
if (edad === "") {
$("#Edad").attr("class", "form-group has-feedback has-error");
$("#edad01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#edad02").removeClass("hidden");
$("#edad02").text("El campo edad no puede estar vacío.");
ages = false;
} else if (!valedad(edad)) {
$("#Edad").attr("class", "form-group has-feedback has-error");
$("#edad01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#edad02").removeClass("hidden");
$("#edad02").text("El formato del campo edad es incorrecto.");
ages = false;
}else if(edad >= 10 && edad<= 130){
$("#Edad").attr("class", "form-group has-feedback has-success");
$("#edad01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#edad02").addClass("hidden");
ages = true;
}else {
ages = false;
}
//Validando el nombre del usuario
if (username === "") {
$("#Username").attr("class", "form-group has-feedback has-error");
$("#username01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#username02").removeClass("hidden");
$("#username02").text("El campo nombre de usuario no puede estar vacío.");
users = false;
} else if (!valusers(username)) {
$("#Username").attr("class", "form-group has-feedback has-error");
$("#username01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#username02").removeClass("hidden");
$("#username02").text("El formato del campo nombre de usuario es incorrecto.");
users = false;
} else {
$("#Username").attr("class", "form-group has-feedback has-success");
$("#username01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#username02").addClass("hidden");
users = true;
}
if (pass == ""){
$("#Pass1").attr("class", "form-group has-feedback has-error");
$("#pass01").attr("class", "glyphicon glyphicon-remove form-control-feedback");
$("#pass02").removeClass("hidden");
$("#pass02").text("El formato de contraseña no puede estar vacio.");
contrasenas = false;
}else{
$("#Pass1").attr("class", "form-group has-feedback has-success");
$("#pass01").attr("class", "glyphicon glyphicon-ok form-control-feedback");
$("#pass02").addClass("hidden");
contrasenas = true;
}
if(rol == -1){
$("#Rol").attr("class", "form-group has-feedback has-error");
}else{
$("#Rol").attr("class", "form-group has-feedback has-success");
roles = true;
}
if (nombres && tipo && ages && users && contrasenas && boletas && roles){
return true;
}else{
swal("huy...existen errores", "Revisa que no hallan errores","error");
return false;
}
}
<file_sep>/BaseDatos/BaseGeolitic.sql
-- phpMyAdmin SQL Dump
-- version 4.5.2
-- http://www.phpmyadmin.net
--
-- Servidor: localhost
-- Tiempo de generación: 05-06-2016 a las 21:16:56
-- Versión del servidor: 10.1.13-MariaDB
-- Versión de PHP: 7.0.6
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Base de datos: `BaseGeolitic`
--
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `admin`
--
CREATE TABLE `admin` (
`idAdmin` int(11) NOT NULL,
`user_idUser` varchar(30) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `alumno`
--
CREATE TABLE `alumno` (
`Boleta` int(11) NOT NULL,
`grupos_Grupo` varchar(30) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `funcion`
--
CREATE TABLE `funcion` (
`Numfun` int(11) NOT NULL,
`funcion` varchar(45) NOT NULL,
`grupos_Grupo` varchar(30) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `grupos`
--
CREATE TABLE `grupos` (
`Grupo` varchar(30) NOT NULL,
`profesor_NumProf` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `profesor`
--
CREATE TABLE `profesor` (
`NumProf` int(11) NOT NULL,
`ases` bit(1) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `user`
--
CREATE TABLE `user` (
`idUser` varchar(30) NOT NULL DEFAULT '0',
`name` varchar(30) NOT NULL,
`ApPat` varchar(30) NOT NULL,
`ApMat` varchar(30) NOT NULL,
`Age` varchar(10) NOT NULL,
`Sex` varchar(10) NOT NULL,
`username` varchar(30) NOT NULL,
`pass` varchar(30) NOT NULL,
`Rol` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Volcado de datos para la tabla `user`
--
INSERT INTO `user` (`idUser`, `name`, `ApPat`, `ApMat`, `Age`, `Sex`, `username`, `pass`, `Rol`) VALUES
('2013601590', '<NAME>', 'Hernandez', 'Sanchez', '22', 'M', 'alan3211', 'alan', 1),
('2013601591', 'Maria', 'Manrique', 'Hernandez', '23', 'F', 'Mary11', '123', 2),
('2013741480', 'Administrator', 'administrator', 'Administrator', '22', 'M', 'administrator', 'admin', 3),
('2015634584', 'Ruben', 'Peredo', 'Valderrama', '45', 'M', 'ruben', 'rpv', 1);
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `user_has_alumno`
--
CREATE TABLE `user_has_alumno` (
`user_idUser` varchar(30) NOT NULL,
`alumno_Boleta` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- --------------------------------------------------------
--
-- Estructura de tabla para la tabla `user_has_profesor`
--
CREATE TABLE `user_has_profesor` (
`user_idUser` varchar(30) NOT NULL,
`profesor_NumProf` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Índices para tablas volcadas
--
--
-- Indices de la tabla `admin`
--
ALTER TABLE `admin`
ADD PRIMARY KEY (`idAdmin`,`user_idUser`),
ADD KEY `fk_admin_user1_idx` (`user_idUser`);
--
-- Indices de la tabla `alumno`
--
ALTER TABLE `alumno`
ADD PRIMARY KEY (`Boleta`),
ADD KEY `fk_alumno_grupos1_idx` (`grupos_Grupo`);
--
-- Indices de la tabla `funcion`
--
ALTER TABLE `funcion`
ADD PRIMARY KEY (`Numfun`),
ADD KEY `fk_funcion_grupos1_idx` (`grupos_Grupo`);
--
-- Indices de la tabla `grupos`
--
ALTER TABLE `grupos`
ADD PRIMARY KEY (`Grupo`),
ADD KEY `fk_grupos_profesor1_idx` (`profesor_NumProf`);
--
-- Indices de la tabla `profesor`
--
ALTER TABLE `profesor`
ADD PRIMARY KEY (`NumProf`);
--
-- Indices de la tabla `user`
--
ALTER TABLE `user`
ADD PRIMARY KEY (`idUser`);
--
-- Indices de la tabla `user_has_alumno`
--
ALTER TABLE `user_has_alumno`
ADD PRIMARY KEY (`user_idUser`,`alumno_Boleta`),
ADD KEY `fk_user_has_alumno_alumno1_idx` (`alumno_Boleta`),
ADD KEY `fk_user_has_alumno_user1_idx` (`user_idUser`);
--
-- Indices de la tabla `user_has_profesor`
--
ALTER TABLE `user_has_profesor`
ADD PRIMARY KEY (`user_idUser`,`profesor_NumProf`),
ADD KEY `fk_user_has_profesor_profesor1_idx` (`profesor_NumProf`),
ADD KEY `fk_user_has_profesor_user1_idx` (`user_idUser`);
--
-- AUTO_INCREMENT de las tablas volcadas
--
--
-- AUTO_INCREMENT de la tabla `funcion`
--
ALTER TABLE `funcion`
MODIFY `Numfun` int(11) NOT NULL AUTO_INCREMENT;
--
-- Restricciones para tablas volcadas
--
--
-- Filtros para la tabla `admin`
--
ALTER TABLE `admin`
ADD CONSTRAINT `fk_admin_user1` FOREIGN KEY (`user_idUser`) REFERENCES `user` (`idUser`) ON DELETE NO ACTION ON UPDATE NO ACTION;
--
-- Filtros para la tabla `alumno`
--
ALTER TABLE `alumno`
ADD CONSTRAINT `fk_alumno_grupos1` FOREIGN KEY (`grupos_Grupo`) REFERENCES `grupos` (`Grupo`) ON DELETE NO ACTION ON UPDATE NO ACTION;
--
-- Filtros para la tabla `funcion`
--
ALTER TABLE `funcion`
ADD CONSTRAINT `fk_funcion_grupos1` FOREIGN KEY (`grupos_Grupo`) REFERENCES `grupos` (`Grupo`) ON DELETE NO ACTION ON UPDATE NO ACTION;
--
-- Filtros para la tabla `grupos`
--
ALTER TABLE `grupos`
ADD CONSTRAINT `fk_grupos_profesor1` FOREIGN KEY (`profesor_NumProf`) REFERENCES `profesor` (`NumProf`) ON DELETE NO ACTION ON UPDATE NO ACTION;
--
-- Filtros para la tabla `user_has_alumno`
--
ALTER TABLE `user_has_alumno`
ADD CONSTRAINT `fk_user_has_alumno_alumno1` FOREIGN KEY (`alumno_Boleta`) REFERENCES `alumno` (`Boleta`) ON DELETE NO ACTION ON UPDATE NO ACTION,
ADD CONSTRAINT `fk_user_has_alumno_user1` FOREIGN KEY (`user_idUser`) REFERENCES `user` (`idUser`) ON DELETE NO ACTION ON UPDATE NO ACTION;
--
-- Filtros para la tabla `user_has_profesor`
--
ALTER TABLE `user_has_profesor`
ADD CONSTRAINT `fk_user_has_profesor_profesor1` FOREIGN KEY (`profesor_NumProf`) REFERENCES `profesor` (`NumProf`) ON DELETE NO ACTION ON UPDATE NO ACTION,
ADD CONSTRAINT `fk_user_has_profesor_user1` FOREIGN KEY (`user_idUser`) REFERENCES `user` (`idUser`) ON DELETE NO ACTION ON UPDATE NO ACTION;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
<file_sep>/src/java/com/geolitic/model/AccionLogin.java
package com.geolitic.model;
import com.geolitic.POJO.LoginBean;
import java.beans.PropertyVetoException;
import java.io.IOException;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.struts2.interceptor.CookiesAware;
import org.apache.struts2.interceptor.ServletRequestAware;
import org.apache.struts2.interceptor.ServletResponseAware;
import org.apache.struts2.interceptor.SessionAware;
public class AccionLogin implements SessionAware,CookiesAware,ServletRequestAware,ServletResponseAware{
private String nombre;
private String contra;
private HttpServletRequest request = null;
private HttpServletResponse response = null;
private Map<String,Object> session;
private Map<String, String> cajagalletas;
public String getNombre() {
return nombre;
}
public void setNombre(String nombre) {
this.nombre = nombre;
}
public String getContra() {
return contra;
}
public void setContra(String contra) {
this.contra = contra;
}
public String execute() throws PropertyVetoException, SQLException, IOException{
LoginBean lb = new LoginBean();
if(lb.validateUser(nombre,contra)){
session.put("nombre",nombre);
session.put("pass",contra);
Cookie mi_nombre = new Cookie("nombre",nombre);
Cookie mi_pass = new Cookie("pass",contra);
mi_nombre.setMaxAge(60*24*30*365);
mi_pass.setMaxAge(60*24*30*365);
response.addCookie(mi_nombre);
response.addCookie(mi_pass);
if(cajagalletas == null) cajagalletas = new HashMap<>();
else{
cajagalletas.put("nombre", nombre);
cajagalletas.put("pass", contra);
}
int tipo = lb.tipousuario(nombre,contra);
switch (tipo) {
case 1:
return "Administrador"; //Si fue Administrador
case 2:
return "Profesor"; //Si fue Profesor
default:
return "Alumno"; //Si fue Alumno
}
}else{
return "Fracaso";
}
}
@Override
public void setSession(Map<String, Object> map) {
session = map;
}
@Override
public void setServletRequest(HttpServletRequest hsr) {
request = hsr;
}
@Override
public void setServletResponse(HttpServletResponse hsr) {
response = hsr;
}
@Override
public void setCookiesMap(Map<String, String> map) {
cajagalletas = map;
}
}
<file_sep>/nbproject/private/private.properties
deploy.ant.properties.file=/home/alan32/.netbeans/8.1/tomcat80.properties
j2ee.platform.is.jsr109=true
j2ee.server.domain=/home/alan32/Documentos/GlassFish_Server/glassfish/domains/domain1
j2ee.server.home=/home/alan32/Documentos/apache-tomcat-8.5.0
j2ee.server.instance=tomcat80:home=/home/alan32/Documentos/apache-tomcat-8.5.0
j2ee.server.middleware=/home/alan32/Documentos/GlassFish_Server
javac.debug=true
javadoc.preview=true
selected.browser=Chrome
user.properties.file=/home/alan32/.netbeans/8.1/build.properties
<file_sep>/src/java/com/geolitic/model/AgregaAction.java
package com.geolitic.model;
import com.geolitic.hibernate.HibernateUtil;
import com.geolitic.hibernate.User;
import com.opensymphony.xwork2.ActionSupport;
import java.util.ArrayList;
import org.hibernate.Session;
import org.hibernate.Transaction;
public class AgregaAction extends ActionSupport {
private String idUser;
private String nombre;
private String appat;
private String apmat;
private String sexo;
private String edad;
private String username;
private String pass;
private Session hibernateSession;
private ArrayList<User> usuarios;
public ArrayList<User> getUsuarios() {
return usuarios;
}
public void setUsuarios(ArrayList<User> usuarios) {
this.usuarios = usuarios;
}
public Session getHibernateSession() {
return hibernateSession;
}
public void setHibernateSession(Session hibernateSession) {
this.hibernateSession = hibernateSession;
}
public String getIdUser() {
return idUser;
}
public void setIdUser(String idUser) {
this.idUser = idUser;
}
public String getNombre() {
return nombre;
}
public void setNombre(String nombre) {
this.nombre = nombre;
}
public String getAppat() {
return appat;
}
public void setAppat(String appat) {
this.appat = appat;
}
public String getApmat() {
return apmat;
}
public void setApmat(String apmat) {
this.apmat = apmat;
}
public String getSexo() {
return sexo;
}
public void setSexo(String sexo) {
this.sexo = sexo;
}
public String getEdad() {
return edad;
}
public void setEdad(String edad) {
this.edad = edad;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPass() {
return pass;
}
public void setPass(String pass) {
this.pass = pass;
}
@Override
public String execute() throws Exception {
hibernateSession=HibernateUtil.getSessionFactory().openSession();
if(hibernateSession != null){
//Insert
Transaction t0=hibernateSession.beginTransaction();
User user0 = new User();
user0.setIdUser(idUser);
user0.setName(nombre);
user0.setApPat(appat);
user0.setApMat(apmat);
user0.setSex(sexo);
user0.setAge(edad);
user0.setUsername(username);
user0.setPass(<PASSWORD>);
user0.setRol(3);
hibernateSession.save(user0);
t0.commit();
return "Agregado";
}else{
return "Fallo";
}
}
}
<file_sep>/src/java/com/geolitic/POJO/DataSource.java
package com.geolitic.POJO;
import java.beans.PropertyVetoException;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import org.apache.commons.dbcp.BasicDataSource;
public class DataSource {
private static DataSource ds;
private BasicDataSource bds;
//<NAME>
private DataSource() throws IOException, PropertyVetoException, SQLException {
bds = new BasicDataSource();
bds.setDriverClassName("org.gjt.mm.mysql.Driver");
bds.setUsername("root");
bds.setPassword("<PASSWORD>");
bds.setUrl("jdbc:mysql://localhost:3306/BaseGeolitic");
//Configuraciones
bds.setMinIdle(5);
bds.setMaxIdle(20);
bds.setMaxOpenPreparedStatements(180);
}
public static DataSource getInstance() throws IOException, PropertyVetoException, SQLException {
if (ds == null) {
ds = new DataSource();
return ds;
} else {
return ds;
}
}
public Connection getConnection() throws SQLException {
return bds.getConnection();
}
}
<file_sep>/src/java/com/geolitic/model/ExtraccionAlumnosAction.java
package com.geolitic.model;
import com.geolitic.hibernate.HibernateUtil;
import com.geolitic.hibernate.User;
import com.opensymphony.xwork2.ActionSupport;
import java.util.ArrayList;
import org.hibernate.Session;
public class ExtraccionAlumnosAction extends ActionSupport {
private ArrayList<User> usuarios;
Session hibernateSession;
public ArrayList<User> getUsuarios() {
return usuarios;
}
public void setUsuarios(ArrayList<User> usuarios) {
this.usuarios = usuarios;
}
@SuppressWarnings("unchecked")
public String listado() throws Exception{
hibernateSession = HibernateUtil.getSessionFactory().openSession();
hibernateSession.beginTransaction();
usuarios = (ArrayList<User>) hibernateSession.createQuery("from User").list();
return "tablaprofes";
}
}
| cf3bcf4b8437fd34511f69105c836a7bd9d2ed49 | [
"JavaScript",
"SQL",
"Java",
"INI"
] | 7 | JavaScript | alan3211/GAWAD | a69a7c162618a34a6f629780fa15a53d1ab418ef | 1a887db380499cd95cf48e81cbe6c55937a7053a |
refs/heads/master | <repo_name>cnxtech/bfx-stuff-ui<file_sep>/examples/chart_marks_twitter.js
const async = require('async')
const Twit = require('twit')
const Sentiment = require('sentiment')
const uiLib = require('./../lib-ui.js')
const THRES_RETWEETS = 25
const THRES_FOLLOWERS = 5000
let USERS = ['paoloardoino', 'bitfinex', 'iamnomad', 'flibbr', 'alistairmilne', 'ZeusZissou', 'adam3us', 'VitalikButerin', 'EOS_io', 'Tether_to', 'ethfinex', 'eosfinexproject', 'aantonop', 'coindesk', 'cointelegraph', 'loomdart']
let MID = 0
const sentiment = new Sentiment()
function getTweetInfo (tweet, is_stream = false) {
if (!is_stream && tweet.retweet_count < THRES_RETWEETS) {
return
}
if (tweet.user.followers_count < THRES_FOLLOWERS) {
return
}
const s = sentiment.analyze(tweet.text)
let color_bg = '#0000FF'
if (s.score > 0) {
color_bg = `#00FF00`
} else if (s.score < 0) {
color_bg = `#FF0000`
}
return {
id: 'mark_' + MID++,
ts: (new Date(tweet.created_at)).getTime(),
// symbol: 'tBTCUSD',
content: `<img src="${tweet.user.profile_image_url_https}" /><br />${tweet.text}`,
color_bg: color_bg,
color_text: '#FFFFFF',
label: 'W',
size_min: 15
}
}
function run (wss, conf, keyword) {
if (keyword) {
USERS = keyword.split(',')
}
const T = new Twit({
consumer_key: conf.twitterKey,
consumer_secret: conf.twitterSecret,
access_token: conf.twitterAccessToken,
access_token_secret: conf.twitterAccessSecret
})
uiLib.clearMarks(wss)
async.eachSeries(USERS, (inf, next) => {
T.get(
'statuses/user_timeline',
{ screen_name: inf, count: 30 },
(err, data, response) => {
if (!data) {
return next()
}
data.forEach(tweet => {
const data = getTweetInfo(tweet)
if (!data) {
return true
}
uiLib.addMark(
wss, data
)
return true
})
next()
}
)
}, (err) => {
if (err) {
console.error('INIT', err)
}
})
const stream = T.stream('statuses/filter', { track: USERS.map(x => `@${x}`) })
stream.on('error', (err) => {
console.error(err)
})
stream.on('tweet', tweet => {
const data = getTweetInfo(tweet, true)
if (!data) {
return
}
uiLib.addMark(
wss, data
)
uiLib.sendNotification(
wss,
tweet.user.profile_image_url_https,
`https://twitter.com/${tweet.user.screen_name}/${tweet.id}`,
tweet.text,
{
tone: 'pingUp'
}
)
})
}
module.exports = {
run: run
}
<file_sep>/bi.js
const WebSocket = require('ws')
const crypto = require('crypto')
const fs = require('fs')
let conf
try {
conf = JSON.parse(fs.readFileSync(`${__dirname}/config.json`, 'utf8'))
} catch (e) {
console.error('config.json: file not found')
process.exit(-1)
}
if (!process.argv[2]) {
console.error('example not specified')
process.exit(-1)
}
let script
try {
script = require(`${__dirname}/examples/${process.argv[2]}.js`)
} catch (e) {
console.error('script: not found')
process.exit(-1)
}
if (!script.run) {
console.error('script: run function not found')
process.exit(-1)
}
const w1_apiKey = conf.apiKey
const w1_apiSecret = conf.apiSecret
const wss = new WebSocket('wss://api.bitfinex.com/ws/2')
const authNonce = (new Date()).getTime() * 1000
const payload = 'AUTH' + authNonce
const signature = crypto.createHmac('sha384', w1_apiSecret).update(payload).digest('hex')
wss.on('message', function (data) {
data = JSON.parse(data)
if (data.event) {
if (data.event === 'auth' && data.status !== 'OK') {
console.error('AUTHENTICATION FAILED')
process.exit(-1)
}
}
})
wss.on('open', function () {
wss.send(JSON.stringify({
event: 'auth',
apiKey: w1_apiKey,
authSig: signature,
authPayload: payload,
authNonce: +authNonce
}))
setInterval(() => {
wss.send(JSON.stringify({ event: 'ping' }))
}, 15000)
setTimeout(() => {
script.run(wss, conf, process.argv[3])
}, 2000)
})
wss.on('close', function () {
console.error('SOCKET CLOSED')
process.exit(-1)
})
<file_sep>/examples/chart_shapes_basic.js
const request = require('request')
const uiLib = require('./../lib-ui.js')
function genPrice() {
return Math.round((Math.random() * 500)) + 3500
}
function run (wss) {
let MID = 0
uiLib.clearShapes(wss)
const cnt = 100
let lastPrice = null
for (let i = 0; i < cnt; i++) {
if (!lastPrice) {
lastPrice = genPrice()
}
const newPrice = genPrice()
uiLib.addShape(
wss, {
points: [
{
ts: Date.now() - (1000 * 3600 * (cnt - i + 1)),
price: lastPrice,
},
{
ts: Date.now() - (1000 * 3600 * (cnt - i)),
price: newPrice
}
],
options: {
shape: 'trend_line',
overrides: {
linecolor: '#00FF00',
},
}
}
)
lastPrice = newPrice
}
}
module.exports = {
run: run
}
<file_sep>/lib-ui.js
function addShape(wss, opts) {
opts.type = 'shape_create'
wss.send(JSON.stringify(
[0, 'n', 12345, {
type: 'ucm-ui-chart',
info: opts
}])
)
}
function clearShapes(wss) {
wss.send(JSON.stringify([0, 'n', 12345, { type: 'ucm-ui-chart', info: {
type: 'shape_clear',
} }])
)
}
function addMark(wss, opts) {
opts.type = 'marker_create'
wss.send(JSON.stringify(
[0, 'n', 12345, {
type: 'ucm-ui-chart',
info: opts
}])
)
}
function clearMarks(wss) {
wss.send(JSON.stringify([0, 'n', 12345, { type: 'ucm-ui-chart', info: {
type: 'marker_clear',
} }])
)
}
function sendNotification (wss, image, link, message, sound) {
wss.send(JSON.stringify([0, 'n', 12345, { type: 'ucm-notify-ui', info: {
type: 'all',
level: 'success',
image: image,
link: link,
message: message,
sound: sound
} }]))
}
module.exports = {
sendNotification: sendNotification,
clearMarks: clearMarks,
addMark: addMark,
addShape: addShape,
clearShapes: clearShapes
}
<file_sep>/examples/chart_marks_basic.js
const uiLib = require('./../lib-ui.js')
function run (wss) {
let MID = 0
uiLib.clearMarks(wss)
const now = Date.now()
for (let i = 0; i < 5; i++) {
const mts = Math.round((now - (1000 * 10 * i * 60)) / 1000) * 1000
uiLib.addMark(
wss, {
id: 'mark_' + MID++,
ts: mts,
//symbol: 'tBTCUSD',
content: `This is a simple mark.<br>It's shown only for <b>BTCUSD</b> pair.<br>If you zoom in it enough I'll see "W" label. It contains some simple HTML and a <a href="https://example.com">link</a>`,
color_bg: `#${(i * 3) % 9}F${(i * 3) % 9}000`,
color_text: '#FFFFFF',
label: 'W',
size_min: 15
}
)
}
}
module.exports = {
run: run
}
<file_sep>/README.md
# bfx-stuff-ui
## CONFIGURE
1. Setup the configuration file.
```
cp config.json.example config.json
```
2. Add your Bitfinex `apiKey` and `apiSecret` (you can generate those from https://www.bitfinex.com/api).
3. If you want to run Twitter examples, configure your Twitter authentication keys `twitterKey`, `twitterSecret`, `twitterAccessToken`, `twitterAccessSecret`.
* If you want to run Telegram examples, configure your Telegram bot key `tgKey`.
## RUN EXAMPLES
#### BASIC EXAMPLES
* Shows a simple notification in your Bitfinex UI.
```
node bi.js notify_basic
```

* Adds 5 chart markers in your Bitfinex UI chart
Note: make sure to use the 5m (or 15m) timeframe and zoom towards current time
```
node bi.js chart_marks_basic
```

* Add custom shapes to Bitfinex UI chart
```
node bi.js chart_shapes_basic
```
#### TWITTER SENTIMENT ANALISYS AND CHART MARKERS
Subscribes to a list of Twitter influencers, performs basic sentiment analysis and adds the relative markers to the Bitfinex UI chart.
```
node bi.js chart_marks_twitter
```

#### TELEGRAM BOT AND CHART MARKERS
Registers a Telegram Bot command: `notify`
Once running you'll be able to interact with the bot sending `/notify fooooooo` message and you'll see `fooooooo` appearing as notification in Bitfinex UI.
```
node bi.js notify_telegram
```
<file_sep>/examples/notify_basic.js
const uiLib = require('./../lib-ui.js')
function run (wss) {
uiLib.sendNotification(
wss,
'https://www.bitfinex.com/assets/bfx-stacked.png',
'http://www.bitfinex.com',
'This is a test notification',
{
tone: 'pingUp'
}
)
}
module.exports = {
run: run
}
| 1c1e8cc77934d5b92ce2c18d0237f721db5c3da6 | [
"JavaScript",
"Markdown"
] | 7 | JavaScript | cnxtech/bfx-stuff-ui | 39b743ee7aac32daed63297897c605c2b4cc855f | ad35fb1994e0988dcaa31e847872e05d79607578 |
refs/heads/master | <repo_name>ingenious3/Udacity-LoadApp<file_sep>/app/src/main/java/com/udacity/MainActivity.kt
package com.udacity
import android.app.DownloadManager
import android.app.NotificationManager
import android.app.PendingIntent
import android.content.BroadcastReceiver
import android.content.Context
import android.content.Intent
import android.content.IntentFilter
import android.net.Uri
import android.os.Bundle
import android.app.NotificationChannel
import android.os.Build
import android.widget.Toast
import androidx.core.app.NotificationManagerCompat
import androidx.appcompat.app.AppCompatActivity
import androidx.core.app.NotificationCompat
import kotlinx.android.synthetic.main.activity_main.*
import kotlinx.android.synthetic.main.content_main.*
class MainActivity : AppCompatActivity() {
private var downloadID: Long = 0
private lateinit var downloadManager: DownloadManager
private val query = DownloadManager.Query()
private lateinit var notificationManager: NotificationManager
private lateinit var pendingIntent: PendingIntent
private var fileName: String? = null
companion object {
private const val CHANNEL_ID = "channelId"
private const val UDACITY_PROJECT_NAME = "Udacity Project - Load App"
private const val LOAD_APP_URL =
"https://github.com/udacity/nd940-c3-advanced-android-programming-project-starter/archive/master.zip"
private const val GLIDE_NAME = "Glide"
private const val GLIDE_URL = "https://github.com/bumptech/glide/archive/master.zip"
private const val RETROFIT_NAME = "Retrofit"
private const val RETROFIT_URL = "https://github.com/square/retrofit/archive/master.zip"
const val EXTRA_DOWNLOADED_FILE_NAME = "extra_downloaded_file_name"
const val EXTRA_DOWNLOADED_FILE_STATUS = "extra_downloaded_file_status"
const val EXTRA_NOTIFICATION_ID = "extra_notification_id"
const val NOTIFICATION_ID = 1
private const val REQUEST_CODE = 100
const val DEFAULT_CIRCLE_RADIUS = 32f
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
setSupportActionBar(toolbar)
registerReceiver(receiver, IntentFilter(DownloadManager.ACTION_DOWNLOAD_COMPLETE))
notificationManager = getSystemService(Context.NOTIFICATION_SERVICE) as NotificationManager
custom_button.setOnClickListener {
download()
}
}
private val receiver = object : BroadcastReceiver() {
override fun onReceive(context: Context?, intent: Intent?) {
val id = intent?.getLongExtra(DownloadManager.EXTRA_DOWNLOAD_ID, -1)
if(id == downloadID) {
custom_button.setDownloadButtonState(ButtonState.Completed)
createNotification()
}
}
}
private fun download() {
val url = getURL()
if (url != null) {
custom_button.setDownloadButtonState(ButtonState.Clicked)
val request =
DownloadManager.Request(Uri.parse(url))
.setTitle(getString(R.string.app_name))
.setDescription(getString(R.string.app_description))
.setRequiresCharging(false)
.setAllowedOverMetered(true)
.setAllowedOverRoaming(true)
downloadManager = getSystemService(DOWNLOAD_SERVICE) as DownloadManager
downloadID = downloadManager.enqueue(request)// enqueue puts the download request in the queue.
notificationManager.cancel(NOTIFICATION_ID)
custom_button.setDownloadButtonState(ButtonState.Loading)
query.setFilterById(downloadID)
}
}
private fun getURL(): String? {
var url:String? = null
fileName = null
when(download_radio_group.checkedRadioButtonId){
glideRadioButton.id->{
url = GLIDE_URL
fileName = GLIDE_NAME
}
loadAppRadioButton.id-> {
url = LOAD_APP_URL
fileName = UDACITY_PROJECT_NAME
}
retrofitRadioButton.id-> {
url = RETROFIT_URL
fileName = RETROFIT_NAME
}
else -> {
Toast.makeText(this, getString(R.string.select_file_prompt), Toast.LENGTH_SHORT).show()
}
}
return url
}
private fun createNotification(){
val openDetailsIntent = Intent(this, DetailActivity::class.java).apply {
flags = Intent.FLAG_ACTIVITY_CLEAR_TOP or Intent.FLAG_ACTIVITY_SINGLE_TOP
putExtra(EXTRA_DOWNLOADED_FILE_NAME, fileName)
putExtra(EXTRA_DOWNLOADED_FILE_STATUS, getString(R.string.file_downloaded))
putExtra(EXTRA_NOTIFICATION_ID, NOTIFICATION_ID)
}
pendingIntent = PendingIntent.getActivity(
this, REQUEST_CODE, openDetailsIntent,
PendingIntent.FLAG_UPDATE_CURRENT)
val builder = NotificationCompat.Builder(this, CHANNEL_ID)
.setSmallIcon(R.drawable.ic_cloud_download)
.setContentTitle(resources.getString(R.string.notification_title))
.setContentText(getString(R.string.file_downloaded))
.setPriority(NotificationCompat.PRIORITY_DEFAULT)
.addAction(R.drawable.ic_cloud_download, getString(R.string.notification_button), pendingIntent)
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
val name = getString(R.string.channel_name)
val descriptionText = getString(R.string.notification_description)
val importance = NotificationManager.IMPORTANCE_DEFAULT
val channel = NotificationChannel(CHANNEL_ID, name, importance).apply {
description = descriptionText
}
// Register notification channel
notificationManager.createNotificationChannel(channel)
}
with(NotificationManagerCompat.from(this)) {
notify(NOTIFICATION_ID, builder.build())
}
}
override fun onDestroy() {
super.onDestroy()
unregisterReceiver(receiver)
}
}
<file_sep>/app/src/main/java/com/udacity/DetailActivity.kt
package com.udacity
import android.os.Bundle
import androidx.appcompat.app.AppCompatActivity
import android.app.NotificationManager
import android.content.Context
import androidx.constraintlayout.motion.widget.MotionLayout
import kotlinx.android.synthetic.main.activity_detail.*
import kotlinx.android.synthetic.main.content_detail.*
class DetailActivity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_detail)
setSupportActionBar(toolbar)
supportActionBar?.setDisplayHomeAsUpEnabled(true)
val intent = intent
val fileName = intent.getStringExtra(MainActivity.EXTRA_DOWNLOADED_FILE_NAME)
val downloadStatus = intent.getStringExtra(MainActivity.EXTRA_DOWNLOADED_FILE_STATUS)
val notificationId = intent.getIntExtra(MainActivity.EXTRA_NOTIFICATION_ID, MainActivity.NOTIFICATION_ID)
val notificationManager: NotificationManager = getSystemService(Context.NOTIFICATION_SERVICE) as NotificationManager
notificationManager.cancel(notificationId)
if(fileName != null){
textview_filename.text = fileName
}
if(downloadStatus != null) {
textview_status.text = downloadStatus
}
detailsLayout.transitionToStart()
end_button.setOnClickListener {
detailsLayout.transitionToStart()
detailsLayout.setTransitionListener(object: MotionLayout.TransitionListener {
override fun onTransitionTrigger(
p0: MotionLayout?,
p1: Int,
p2: Boolean,
p3: Float
) {
}
override fun onTransitionStarted(p0: MotionLayout?, p1: Int, p2: Int) {}
override fun onTransitionChange(p0: MotionLayout?, p1: Int, p2: Int, p3: Float) {}
override fun onTransitionCompleted(p0: MotionLayout?, p1: Int) {
if (p1 == detailsLayout.endState)
finish()
}
})
}
detailsLayout.transitionToEnd()
}
}
<file_sep>/app/src/main/java/com/udacity/LoadingButton.kt
package com.udacity
import android.animation.*
import android.content.Context
import android.util.AttributeSet
import android.view.View
import android.graphics.*
import androidx.core.content.ContextCompat
import com.udacity.MainActivity.Companion.DEFAULT_CIRCLE_RADIUS
import kotlinx.android.synthetic.main.content_main.view.*
import kotlin.properties.Delegates
class LoadingButton @JvmOverloads constructor(
context: Context, attrs: AttributeSet? = null, defStyleAttr: Int = 0
) : View(context, attrs, defStyleAttr) {
private var widthSize = 0
private var heightSize = 0
private val animator = ObjectAnimator()
private var text: String = ""
private var progress = 0
private var radius = 0
private var loading = false
private val paint = Paint()
private val loadingPaint = Paint()
private val paintText = Paint()
private val paintCircle = Paint()
private var rectF: RectF
private var buttonStringResId: Int
init {
val a = context.obtainStyledAttributes(
attrs,
R.styleable.LoadingButton,
defStyleAttr,
R.style.AppTheme
)
text = a.getString(R.styleable.LoadingButton_text) ?: ""
paintText.color = a.getColor(R.styleable.LoadingButton_textColor, Color.WHITE)
paintText.textSize = a.getDimension(R.styleable.LoadingButton_textSize, R.dimen.default_text_size.toFloat())
paintText.textAlign = Paint.Align.CENTER
loadingPaint.color = context.getColor(R.color.colorPrimaryDark)
paint.color = a.getColor(R.styleable.LoadingButton_color, ContextCompat.getColor(context, R.color.colorPrimary ))
paintCircle.color = Color.YELLOW
buttonStringResId = R.string.download
val text = context.getString(buttonStringResId)
val centerX = (measuredWidth / 2f + loadingPaint.measureText(text))
val centerY = measuredHeight / 2f+loadingPaint.measureText(text)
rectF = RectF(
centerX - 40f,
centerY - 40f,
centerX - 40f + DEFAULT_CIRCLE_RADIUS*2,
centerY - 40f + DEFAULT_CIRCLE_RADIUS*2
)
a.recycle()
}
private fun initAnimation() {
animator.setValues(
PropertyValuesHolder.ofInt("BACKGROUND_COLOR", 0, widthSize),
PropertyValuesHolder.ofInt("RADIUS", 0, 360)
)
animator.duration = 3000
animator.repeatCount = ValueAnimator.INFINITE
animator.addUpdateListener { animator ->
if (animator != null) {
buttonState = ButtonState.Loading
progress = animator.getAnimatedValue("BACKGROUND_COLOR") as Int
radius = animator.getAnimatedValue("RADIUS") as Int
}
invalidate()
}
}
private var buttonState: ButtonState by Delegates.observable<ButtonState>(ButtonState.Completed) { p, old, new ->
when (new) {
ButtonState.Clicked -> {
initAnimation()
buttonStringResId = R.string.button_loading
setDownloadButtonText()
animator.start()
}
ButtonState.Loading -> {
loading = true
buttonStringResId = R.string.button_loading
setDownloadButtonText()
custom_button.isClickable = false
invalidate()
}
ButtonState.Completed -> {
animator.end()
loading = false
buttonStringResId = R.string.download
setDownloadButtonText()
custom_button.isClickable = true
invalidate()
}
}
}
fun setDownloadButtonState(buttonState: ButtonState) {
this.buttonState = buttonState
}
fun setDownloadButtonText() {
text = resources.getString(buttonStringResId)
}
override fun onDraw(canvas: Canvas?) {
super.onDraw(canvas)
canvas!!.drawRect(
0f,
0f,
widthSize.toFloat(),
heightSize.toFloat(),
paint
)
when {
loading -> {
canvas.drawRect(0f, 0f, progress.toFloat(), heightSize.toFloat(), loadingPaint)
canvas.drawArc(rectF, 360f, radius.toFloat(), true, paintCircle)
}
}
canvas.drawText(text, widthSize / 2f, heightSize / 2f, paintText)
}
override fun onMeasure(widthMeasureSpec: Int, heightMeasureSpec: Int) {
val minw: Int = paddingLeft + paddingRight + suggestedMinimumWidth
val w: Int = resolveSizeAndState(minw, widthMeasureSpec, 1)
val h: Int = resolveSizeAndState(
MeasureSpec.getSize(w),
heightMeasureSpec,
0
)
widthSize = w
heightSize = h
setMeasuredDimension(w, h)
}
} | d768ac41994254aceb1205bdf8e6aed85592c355 | [
"Kotlin"
] | 3 | Kotlin | ingenious3/Udacity-LoadApp | 8b7ed6c79340385611b9de629433b9284efb271c | c4b2cb245153d59b9ddedfd102e2105f27cd876b |
refs/heads/main | <repo_name>xuxin930/distributeLock<file_sep>/src/main/resources/scripts/redisReetrantUnLock.lua
local lockKey = KEYS[1]
local lockId = ARGV[1]
local lockVal=redis.call("get",lockKey);
if (lockVal==false) then --锁不存在不进行解锁
return
end
--字符串分割start
local sepStr=lockVal --待切分的字符串
local sep=":" --分隔符
local pos, arrRst = 0, {} --arrRst分割后的数组
for st, sp in function() return string.find( sepStr, sep, pos, true ) end do
table.insert(arrRst, string.sub(sepStr, pos, st-1 ))
pos = sp + 1
end
table.insert(arrRst, string.sub( sepStr, pos))
--字符串分割end
local oldLockId=arrRst[1]
local lockCount=tonumber(arrRst[2])
if (lockId==oldLockId) then --必须是同一个业务Id才能解锁
lockCount=lockCount-1;
if (lockCount<=0) then --已经是最后一个锁则删除锁 否则计数器减1
redis.call("del",lockKey)
else
local expireTime=redis.call("ttl",lockKey);
redis.call("set",lockKey,lockId..":"..lockCount)
redis.call("expire",lockKey,expireTime);
end
end
<file_sep>/src/main/resources/scripts/redisUnLock.lua
local lockKey = KEYS[1]
local lockId = ARGV[1]
local retLockId=redis.call("get",lockKey);
if (retLockId==lockId) then
redis.call("del",lockKey)
end
<file_sep>/src/main/resources/scripts/redisReetrantLock.lua
local lockKey = KEYS[1]
local lockId = ARGV[1]
local maxLockCount = tonumber(ARGV[2])
local lockTime = tonumber(ARGV[3])
local lockVal = redis.call("get",lockKey);
if (lockVal==false) then --锁不存在则添加锁
redis.call("set",lockKey,lockId..":1")
redis.call("expire",lockKey,lockTime)
return 1
end
--字符串分割start
local sepStr=lockVal --待切分的字符串
local sep=":" --分隔符
local pos, arrRst = 0, {} --arrRst分割后的数组
for st, sp in function() return string.find( sepStr, sep, pos, true ) end do
table.insert(arrRst, string.sub(sepStr, pos, st-1 ))
pos = sp + 1
end
table.insert(arrRst, string.sub( sepStr, pos))
--字符串分割end
--已经有锁,判断是不是同一个业务,是同一个业务加lockCount,不是同一个业务加锁失败
local oldLockId=arrRst[1]
local lockCount=tonumber(arrRst[2])
if (lockId==oldLockId) then
if (maxLockCount>0 and lockCount>=maxLockCount) then
return 0
end
lockCount=lockCount+1
redis.call("set",lockKey,lockId..":"..lockCount)
redis.call("expire",lockKey,lockTime)
return 1
end
-- lockId不一致 加锁失败
return 0
<file_sep>/README.md
# distributeLock
distribute lock by redis
<file_sep>/src/main/resources/scripts/redisLock.lua
local lockKey = KEYS[1]
local lockId = ARGV[1]
local lockTime = tonumber(ARGV[2])
local ret=redis.call("setnx",lockKey,lockId);
if (ret==1) then
redis.call("expire",lockKey,lockTime)
return 1
else
return 0
end
| c10c10db6e6e75e33c041cdbe414c07bc0e3b14b | [
"Markdown",
"Lua"
] | 5 | Lua | xuxin930/distributeLock | e873fe80ceab7d8c1eb2fe0187e4c1e1999ed94f | c3b8b7795b711f97452262bbe75913bec76c4884 |
refs/heads/main | <file_sep>import React from 'react';
import Carousel from 'react-multi-carousel';
import './Works.css';
import "react-multi-carousel/lib/styles.css";
const Works = () => {
const responsive = {
superLargeDesktop: {
breakpoint: { max: 4000, min: 3000 },
items: 5,
},
desktop: {
breakpoint: { max: 3000, min: 1024 },
items: 3
},
tablet: {
breakpoint: { max: 1024, min: 464 },
items: 2
},
mobile: {
breakpoint: { max: 464, min: 0 },
items: 1
}
};
return (
<section className="works-bg p-5" id="portfolio">
<h2 className="text-center text-white font-weight-bold mb-4">Here are some of <span className="text-success">our works</span></h2>
<Carousel
responsive={responsive}
autoPlay
autoPlaySpeed={2000}
infinite
arrows
showDots
>
<div>
<img className="sliderImg" src="https://i.ibb.co/HzpTQ6N/carousel-1.png" alt="" />
</div>
<div>
<img className="sliderImg" src="https://i.ibb.co/3W7HP74/carousel-2.png" alt="" />
</div>
<div>
<img className="sliderImg" src="https://i.ibb.co/kgSHyd9/carousel-3.png" alt="" />
</div>
<div>
<img className="sliderImg" src="https://i.ibb.co/sgWbQDb/carousel-4.png" alt="" />
</div>
<div>
<img className="sliderImg" src="https://i.ibb.co/0c7snMb/carousel-5.png" alt="" />
</div>
</Carousel>
</section >
);
};
export default Works;<file_sep>import React from 'react';
import './Main.css';
const Main = () => {
return (
<main className="row mt-2 p-5 d-flex align-items-center ml-3 mb-5">
<div className="col-md-5 col-sm col">
<h1 className="display-4 font-weight-bold">Let’s Grow Your <br /> Brand To The <br /> Next Level</h1>
<p className="mt-3">Lorem ipsum dolor sit amet, consectetur adipiscing elit. Purus commodo ipsum duis laoreet maecenas. Feugiat </p>
<button className="btn btn-dark px-4 mt-2">Hire us</button>
</div>
<div className="col-md-7 col-sm-7 col-7">
<img className="main-img img-fluid" src="https://i.ibb.co/g3pKGwX/Frame.png" alt="" />
</div>
</main>
);
};
export default Main;<file_sep>import React from 'react';
import Navbar from '../Navbar/Navbar';
import Main from '../Main/Main';
import './Header.css';
const Header = () => {
return (
<header className="bg-color">
<Navbar />
<Main />
</header>
);
};
export default Header;<file_sep>import React from 'react';
import { useForm } from 'react-hook-form';
import './Contact.css';
const Contact = () => {
const { register, handleSubmit, errors } = useForm();
const onSubmit = data => {
// console.log(data);
};
return (
<div id="contact" className="footer-bg p-5">
<footer className="row d-flex align-items-center ml-5">
<div className="col-md-5">
<h3 className="font-weight-bold">Let us handle your project, <br /> professionally.</h3>
<p>With well written codes, we build amazing apps for all platforms, mobile and web apps in general.</p>
</div>
<div className="col-md-7 mb-5">
<form onSubmit={handleSubmit(onSubmit)}>
<input className="form-control w-75" type="email" id="email" placeholder="Your email address*" name="email" ref={register({ required: true })} />
{errors.email && <span className="text-danger">This field is required</span>}
<br />
<input className="form-control w-75" type="name" id="name" placeholder="Your name / Company’s name*" name="name" ref={register({ required: true })} />
{errors.name && <span className="text-danger">This field is required</span>}
<br />
<textarea rows="5" className="form-control w-75" type="text" id="message" placeholder="Your message..." name="message" ref={register({ required: true })} />
{errors.message && <span className="text-danger">This field is required</span>}
<br />
<input className="btn btn-dark px-5" type="submit" value="Send" />
</form>
</div>
</footer>
<p className="text-center text-secondary mt-5">Copyright Creative Agency, {(new Date().getFullYear())}</p>
</div>
);
};
export default Contact;<file_sep>import React, { useEffect } from 'react';
import { useState } from 'react';
import './Companies.css';
const Companies = () => {
const [companies, setCompanies] = useState([]);
useEffect(() => {
const URL = 'https://agency-creative.herokuapp.com/companies';
fetch(URL)
.then(res => res.json())
.then(data => setCompanies(data))
}, [])
return (
<section className="text-center partner-section mt-5 mb-5">
{
companies.map(company => <img className="company-logo img-fluid" src={company.img} key={company._id} alt="" />)
}
</section>
);
};
export default Companies;<file_sep># creative-agency-client
# assignment11-clint
<file_sep>import React, { useContext } from 'react';
import { FcGoogle } from 'react-icons/fc';
import * as firebase from "firebase/app";
import "firebase/auth";
import firebaseConfig from './firebaseConfig';
import { useHistory, useLocation } from 'react-router-dom';
import { UserContext } from '../../App';
const LogIn = () => {
if (!firebase.apps.length) {
firebase.initializeApp(firebaseConfig);
}
const googleProvider = new firebase.auth.GoogleAuthProvider();
const [loggedInUser, setLoggedInUser] = useContext(UserContext);
let history = useHistory();
let location = useLocation();
let { from } = location.state || { from: { pathname: "/" } };
const handleGoogleSignIn = () => {
firebase.auth().signInWithPopup(googleProvider)
.then((result) => {
const { email, displayName, photoURL } = result.user;
const signedInUser = { email, name: displayName, img: photoURL };
setLoggedInUser(signedInUser);
LogInInfo();
history.replace(from);
})
.catch((error) => {
// console.log(error.code);
});
}
const LogInInfo = () => {
firebase.auth().currentUser.getIdToken(true)
.then((idToken) => {
sessionStorage.setItem('token', idToken);
})
.catch((error) => {
// Handle error
});
}
return (
<div className="text-center mt-5 border d-block p-5 rounded w-50 mx-auto border col-sm-12 col-12">
<h4 className="mb-4">Login With</h4>
<button className="btn rounded-pill border-secondary" onClick={handleGoogleSignIn}><FcGoogle /> Continue with Google</button>
</div>
);
};
export default LogIn;<file_sep>import React from 'react';
import { NavLink } from 'react-router-dom';
import { AiOutlinePlus, AiOutlineUserAdd } from 'react-icons/ai';
import { RiShoppingBasketLine } from 'react-icons/ri';
const AdminSidebar = () => {
return (
<div>
<div>
<NavLink activeClassName="bg-light" className="dropdown-item" to="/all-service"><RiShoppingBasketLine /> Service List</NavLink>
<NavLink activeClassName="bg-light" className="dropdown-item" to="/add-service"><AiOutlinePlus /> Add Service</NavLink>
<NavLink activeClassName="bg-light" className="dropdown-item" to="/add-admin"><AiOutlineUserAdd /> Make Admin</NavLink>
</div>
</div>
);
};
export default AdminSidebar;<file_sep>import React from 'react';
import { useForm } from 'react-hook-form';
import { useHistory } from 'react-router-dom';
import AdminSidebar from '../AdminSidebar/AdminSidebar';
const AddService = () => {
const { register, handleSubmit, errors } = useForm();
let history = useHistory();
const onSubmit = data => {
const URL = 'https://agency-creative.herokuapp.com/services';
fetch(URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
})
.then(res => res.json())
.then(data => {
alert('Service added successfully. Check home page now.');
history.push('/');
})
};
return (
<div className="row p-5">
<section className="col-md-3">
<AdminSidebar />
</section>
<section className="col-md-9">
<form onSubmit={handleSubmit(onSubmit)}>
<label className="font-weight-bold" htmlFor="name">Service Title</label>
<input className="form-control w-50" type="name" id="name" placeholder="Enter Title" name="name" ref={register({ required: true })} />
{errors.name && <span className="text-danger">This field is required</span>}
<br />
<label className="font-weight-bold" htmlFor="description">Description</label>
<textarea rows="4" className="form-control w-50" type="text" id="description" placeholder="Enter Description..." name="description" ref={register({ required: true })} />
{errors.description && <span className="text-danger">This field is required</span>}
<br />
<label className="font-weight-bold" htmlFor="icon">Icon (Optional)</label>
<input className="form-control-file" type="file" id="icon" placeholder="Select Icon" name="icon" ref={register({ required: false })} />
{errors.icon && <span className="text-danger">This field is required</span>}
<br />
<input className="btn btn-success px-5" type="submit" />
</form>
</section>
</div>
);
};
export default AddService;<file_sep>import React, { createContext, useState } from 'react';
import {
BrowserRouter as Router,
Switch,
Route,
} from "react-router-dom";
import AddService from './components/Admin/AddService/AddService';
import AllService from './components/Admin/AllServiceList/AllService';
import MakeAdmin from './components/Admin/MakeAdmin/MakeAdmin';
import Contact from './components/Contact/Contact';
import CustomerReview from './components/Customer/CustomerReview/CustomerReview';
import Order from './components/Customer/Order/Order';
import ServiceList from './components/Customer/ServiceList/ServiceList';
import Home from './components/Home/Home/Home';
import Navbar from './components/Home/Navbar/Navbar';
import LogIn from './components/LogIn/LogIn';
import PrivateRoute from './components/PrivateRoute/PrivateRoute';
import AllServiceList from './components/Admin/AllServiceList/AllService';
export const UserContext = createContext();
function App() {
const [loggedInUser, setLoggedInUser] = useState({
email: '',
name: '',
img: ''
});
return (
<UserContext.Provider value={[loggedInUser, setLoggedInUser]}>
<Router>
<Switch>
<Route exact path="/">
<Home />
</Route>
<PrivateRoute path="/order/:name">
<Navbar />
<Order />
</PrivateRoute>
<PrivateRoute path="/order">
<Navbar />
<Order />
</PrivateRoute>
<PrivateRoute path="/dashboard">
<Navbar />
<ServiceList />
</PrivateRoute>
<PrivateRoute path="/admin-panel">
<Navbar />
<AllServiceList />
</PrivateRoute>
<PrivateRoute path="/review">
<Navbar />
<CustomerReview />
</PrivateRoute>
<PrivateRoute path="/service">
<Navbar />
<ServiceList />
</PrivateRoute>
<PrivateRoute path="/all-service">
<Navbar />
<AllService />
</PrivateRoute>
<PrivateRoute path="/add-service">
<Navbar />
<AddService />
</PrivateRoute>
<PrivateRoute path="/add-admin">
<Navbar />
<MakeAdmin />
</PrivateRoute>
<Route path="/login">
<Navbar />
<LogIn />
</Route>
<Route path="/contact">
<Navbar />
<Contact />
</Route>
<Route path="*">
<Navbar />
<img className="mx-auto d-block img-fluid mt-3" src="https://image.freepik.com/free-vector/404-liquid-error_114341-59.jpg" alt="" />
</Route>
</Switch>
</Router>
</UserContext.Provider>
);
}
export default App;
<file_sep>import React from 'react';
import { NavLink } from 'react-router-dom';
import { FiShoppingCart } from 'react-icons/fi';
import { HiOutlineChatAlt } from 'react-icons/hi';
import { GrList } from 'react-icons/gr';
const Sidebar = () => {
return (
<div>
<NavLink activeClassName="bg-light" className="dropdown-item" to="/order"><FiShoppingCart /> Order</NavLink>
<NavLink activeClassName="bg-light" className="dropdown-item" to="/service"><GrList /> Service list</NavLink>
<NavLink activeClassName="bg-light" className="dropdown-item" to="/review"><HiOutlineChatAlt /> Review</NavLink>
</div>
);
};
export default Sidebar;<file_sep>import React, { useContext, useEffect, useState } from 'react';
import { Link } from 'react-router-dom';
import { UserContext } from '../../../App';
import Sidebar from '../Sidebar/Sidebar';
import SingleService from '../SingleService/SingleService';
const ServiceList = () => {
const [loggedInUser, setLoggedInUser] = useContext(UserContext);
const [serviceList, setServiceList] = useState([]);
useEffect(() => {
const URL = 'https://agency-creative.herokuapp.com/target-orders?email=' + loggedInUser.email;
fetch(URL)
.then(res => res.json())
.then(data => setServiceList(data))
}, [serviceList]);
return (
<div className="row p-5">
<section className="col-md-3">
<Sidebar />
</section>
<section className="col-md-9">
{
serviceList.length === 0 && <p className="mt-5 display-4 text-center">No order found. Please <Link className="text-decoration-none" to="/">place a new order</Link> with your email address.</p>
}
{
serviceList.map(service => <SingleService service={service} key={service._id} />)
}
</section>
</div>
);
};
export default ServiceList;<file_sep>import React, { useEffect, useState } from 'react';
import Review from '../Review/Review';
const Feedback = () => {
const [feedback, setFeedback] = useState([]);
useEffect(() => {
const URL = 'https://agency-creative.herokuapp.com/feedback';
fetch(URL)
.then(res => res.json())
.then(data => setFeedback(data))
}, [feedback])
return (
<section className="mt-5 mb-5">
<h2 className="text-center font-weight-bold">Clients <span className="text-success">Feedback</span></h2>
<section className="d-flex justify-content-center">
{
feedback.map(review => <Review review={review} key={review._id} />)
}
</section>
</section>
);
};
export default Feedback;<file_sep>import React, { useEffect, useState } from 'react';
import AdminSidebar from '../AdminSidebar/AdminSidebar';
import Service from '../Service/Service';
const AllService = () => {
const [totalOrders, setTotalOrders] = useState([]);
useEffect(() => {
const URL = 'https://agency-creative.herokuapp.com/orders'
fetch(URL)
.then(res => res.json())
.then(data => setTotalOrders(data))
}, [totalOrders])
return (
<div className="row p-5">
<section className="col-md-3">
<AdminSidebar />
</section>
<section className="col-md-9">
<div className="row bg-light font-weight-bold">
<div className="col-md-3">
<p>Name</p>
</div>
<div className="col-md-3">
<p>Email</p>
</div>
<div className="col-md-3">
<p>Service</p>
</div>
<div className="col-md-3">
<p>Project Details</p>
</div>
</div>
{
totalOrders.map(order => <Service key={order._id} order={order} />)
}
</section>
</div>
);
};
export default AllService;<file_sep>import React from 'react';
const Service = ({ order: { name, email, service, details } }) => {
return (
<>
<div className="row">
<div className="col-md-2">
{name}
</div>
<div className="col-md-4">
{email}
</div>
<div className="col-md-3">
{service}
</div>
<div className="col-md-3">
{details}
</div>
</div>
<hr />
</>
);
};
export default Service;<file_sep>import React from 'react';
import './Review.css';
const Review = ({ review: { img, name, position, description } }) => {
return (
<section className="row col-md-3 col-sm-6 col card m-3">
<div className="card-body">
<div className="d-flex">
<div>
<img className="reviewer-logo rounded-circle" src={img} alt="" />
</div>
<div className="ml-3">
<h5 className="card-title">{name}</h5>
<h6>{position}</h6>
</div>
</div>
<div>
<p className="card-text text-secondary mt-3">{description}</p>
</div>
</div>
</section>
);
};
export default Review; | 1ccb96eaeaec50636a5a84ed4139e7137030eef8 | [
"JavaScript",
"Markdown"
] | 16 | JavaScript | mdrakibhossainbd/assignment11-clint | 97544c1fa5316a1b5858bc3872a76176f99da008 | 7c2f0ffaa554d0da5e6d7fb37395cfc017859fe4 |
refs/heads/master | <repo_name>chrisseto/yopy<file_sep>/yopy.py
# -*- coding: utf-8 -*-
"""yopy: Zero characters communication for humans.
"""
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
from urllib.parse import quote as urlquote
else:
from urllib import quote as urlquote
import requests
YO_ALL_URL = 'http://api.justyo.co/yoall/”'
YO_SEND_URL = 'http://yofor.me'
class YoError(Exception):
pass
class ErrorResponse(Exception):
def __init__(self, message=None, status_code=None):
self.status_code = status_code
Exception.__init__(self, message or 'An error occurred with the Yo API.')
class Yo(object):
def __init__(self, user=None, token=None):
self.user = user
self.token = token
self._session = requests.Session()
def yo_all(self):
"""Send a yo to all subscribers. Requires API key."""
if not self.token:
raise YoError('yo_all requires an api token.')
resp = self._session.post(YO_ALL_URL, data={'api_token': self.token})
if resp.status_code >= 400:
raise ErrorResponse(resp.json().get('message'), resp.status_code)
return resp
def yo(self, to, from_=None):
"""Send a yo. Does not require an API key.
:param str to: Username of recipient.
:param str from_: Username of sender.
"""
fr = from_ or self.user
if not fr:
raise YoError('Sender not specified.')
url = '{base}/{to}/{fr}'.format(
base=YO_SEND_URL, to=to.strip(), fr=urlquote(fr.strip())
)
resp = self._session.post(url)
if resp.status_code >= 400:
raise ErrorResponse(resp.json().get('message'), resp.status_code)
return resp
def main():
if len(sys.argv) != 3:
print('Usage: yopy <to> <from>', file=sys.stderr)
sys.exit(1)
to, from_ = sys.argv[1], sys.argv[2]
try:
print('...')
Yo().yo(to, from_)
except ErrorResponse as err:
print('ERROR: {}'.format(err.message), file=sys.stderr)
sys.exit(1)
print("Yo'd.")
sys.exit(0)
if __name__ == '__main__':
main()
| fca50baf81bb6a1e5958000c2f0d94a08d060a99 | [
"Python"
] | 1 | Python | chrisseto/yopy | 99cd36e80f2b93ec9d149f7bc9d48f8e3d7329ae | 9504d6dfb6d6e3cebb1c2b994ee852729c1cf54e |
refs/heads/master | <file_sep>//Created and Developed by <NAME> (C) 2018 - Filmstorm
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
//How to use
//Create an empty game object, call it "CameraBase" and then add this component - "Camera Follow" to it.
//Create another empty game object and parent/place it on the hips/pelvis bone of your character/player
//Then parent the MainCamera to this object and apply the "CameraCollision" to the camera.
[AddComponentMenu("Filmstorm/Camera Follow")]
public class CameraFollow : MonoBehaviour
{
[Header("Drag the Object you want to follow here")]
[Space(5)]
[Tooltip("The best way to use this is to create an empty gameobject and parent it to your players hip/pelvis bone.")]
public GameObject CameraFollowObj;
[Header("Adjust these values to how you want the camera to rotate")]
[Space(5)]
public float CameraMoveSpeed = 120.0f;
public float clampAngle = 80.0f;
public float inputSensitivity = 150.0f;
Vector3 FollowPOS;
GameObject CameraObj;
GameObject PlayerObj;
float camDistanceXToPlayer;
float camDistanceYToPlayer;
float camDistanceZToPlayer;
float mouseX;
float mouseY;
float finalInputX;
float finalInputZ;
float smoothX;
float smoothY;
float rotY = 0.0f;
float rotX = 0.0f;
float inputX;
float inputZ;
GameObject player;
float side = 0;
void Start()
{
player = GameObject.FindGameObjectWithTag("Player");
Vector3 rot = transform.localRotation.eulerAngles;
rotY = rot.y;
rotX = rot.x;
Cursor.lockState = CursorLockMode.Locked;
Cursor.visible = false;
#if UNITY_ANDROID
CameraMoveSpeed = 20;
clampAngle = 20;
inputSensitivity = 0;
#endif
}
void Update()
{
#if UNITY_ANDROID
mobileFollow();
#endif
#if UNITY_STANDALONE_WIN
windowsFollow();
#endif
}
void mobileFollow()
{
Touch touch;
if (Input.touchCount > 0)
{
touch = Input.GetTouch(0);
if (touch.position.x >= Screen.width * 5 / 6)
{
side = 1;
}
else if (touch.position.x <= Screen.width / 6)
{
side = -1;
}
// acceleretion to rotation
if (inputSensitivity < 500)
{
inputSensitivity += 10;
}
}
else if (Input.touchCount == 0)
{
if (inputSensitivity > 0)
{
inputSensitivity -= 50;
}
else
{
inputSensitivity = 0;
side = 0;
}
}
rotY += side * inputSensitivity * Time.deltaTime;
rotX += 0f * inputSensitivity * Time.deltaTime;
rotX = Mathf.Clamp(rotX, -clampAngle, clampAngle);
Quaternion localRotation = Quaternion.Euler(rotX, rotY, 0.0f);
transform.rotation = localRotation;
}
void windowsFollow()
{
// We setup the rotation of the sticks here
inputX = Input.GetAxis("RightStickHorizontal");
inputZ = Input.GetAxis("RightStickVertical");
mouseX = Input.GetAxis("Mouse X");
mouseY = Input.GetAxis("Mouse Y");
finalInputX = inputX + mouseX;
finalInputZ = inputZ + mouseY;
rotY += finalInputX * inputSensitivity * Time.deltaTime;
rotX += finalInputZ * inputSensitivity * Time.deltaTime;
rotX = Mathf.Clamp(rotX, -clampAngle, clampAngle);
Quaternion localRotation = Quaternion.Euler(rotX, rotY, 0.0f);
transform.rotation = localRotation;
}
void LateUpdate()
{
CameraUpdater();
}
void CameraUpdater()
{
// set the target object to follow
Transform target = CameraFollowObj.transform;
//move towards the game object that is the target
float step = CameraMoveSpeed * Time.deltaTime;
transform.position = Vector3.MoveTowards(transform.position, target.position, step);
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class RedPil : Pill {
public int damage = 10;
protected override void OnPicked(Collider other) {
base.OnPicked(other);
HealtManager healtManager = other.GetComponent<HealtManager>();
if(!healtManager) { return; }
healtManager.Damage(damage);
Destroy(gameObject, 2);
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class Pill : MonoBehaviour {
[SerializeField] AudioSource audioSource;
[SerializeField] AudioClip pickSound;
public bool goUp;
public float speed = 0.25f;
public float rotationSpeed = 100f;
void Start() {
StartCoroutine(SwitchDirection());
}
void Update() {
if (goUp) {
transform.position = transform.position + new Vector3(0, speed * Time.deltaTime, 0);
}
else {
transform.position = transform.position + new Vector3(0, -speed * Time.deltaTime, 0);
}
transform.Rotate(Vector3.up, rotationSpeed * Time.deltaTime, Space.World);
}
IEnumerator SwitchDirection() {
while (gameObject.activeSelf) {
yield return new WaitForSeconds(0.5f);
goUp = !goUp;
}
}
void OnTriggerEnter(Collider other) {
if(other.CompareTag("Player")) {
OnPicked(other);
}
}
protected virtual void OnPicked(Collider other) {
// to pick sound, but we have already set from inspector the audio
// audioSource.clip = pickSound;
audioSource.Play();
// to hide the pill when player pass over him
HidePill();
GetComponent<Collider>().enabled = false;
Debug.Log("Hai preso " + gameObject.name);
}
void HidePill() {
GetComponent<Renderer>().enabled = false;
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;
public class ButtonsInGame : MonoBehaviour
{
public GameObject[] players;
public Text text;
public GameObject target;
public void ChangePlayer()
{
CameraFollow targetToFollow = target.GetComponent<CameraFollow>();
for (int i = 0; i < players.Length; i++)
{
if (players[i].activeInHierarchy)
{
players[i].SetActive(false);
if (i < (players.Length - 1))
{
foreach (Transform child in players[i + 1].transform)
{
if (child.tag == "target")
{
targetToFollow.CameraFollowObj = child.gameObject;
}
}
players[i + 1].SetActive(true);
return;
}
else
{
foreach (Transform child in players[0].transform)
{
if (child.tag == "target")
{
targetToFollow.CameraFollowObj = child.gameObject;
}
}
players[0].SetActive(true);
return;
}
}
}
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;
using UnityStandardAssets.Characters.FirstPerson;
public class HealtManager : MonoBehaviour {
public int maxHealth;
public int health;
public Text lbHealth;
public Slider sliderHealth;
public GameObject pnlDeath;
public Button btnRespawn;
public GameObject menuPause;
Vector3 firstTransform;
Quaternion orentation;
// create an attribute with other attribute information
float currentHealthPercentuage {
get {
return (float)(health) / (float)(maxHealth);
}
}
void Start() {
pnlDeath.SetActive(false);
health = maxHealth - 20;
lbHealth.text = "Salute: " + health;
sliderHealth.value = currentHealthPercentuage;
UpdateColorSlidebar();
btnRespawn.onClick.AddListener(Respawn);
// if we want we could remove the listener before in the follow way
// btnRespawn.onClick.AddListener(Respawn);
MenuPause info = menuPause.GetComponent<MenuPause>();
info.IsAlive = true;
firstTransform = this.transform.position;
orentation = this.transform.rotation;
}
public void Heal(int amount) {
Damage(-amount);
}
public void Damage(int damageTaken) {
health -= damageTaken;
if (health < 1) {
Die();
}
if (health > maxHealth) {
health = maxHealth;
}
lbHealth.text = "Salute: " + health;
sliderHealth.value = currentHealthPercentuage;
// change fill color bar
UpdateColorSlidebar();
}
public void UpdateColorSlidebar() {
if (currentHealthPercentuage >= 0.75f) {
sliderHealth.fillRect.GetComponent<Image>().color = Color.green;
}
else if (currentHealthPercentuage >= 0.25) {
sliderHealth.fillRect.GetComponent<Image>().color = Color.yellow;
}
else {
sliderHealth.fillRect.GetComponent<Image>().color = Color.red;
}
}
public void Die() {
// to block movement after dead, you need add follow code as new name space up to this code
// using UnityStandardAssets.Characters.FirstPerson;
MenuPause info = menuPause.GetComponent<MenuPause>();
info.IsAlive = false;
Cursor.lockState = CursorLockMode.None;
Cursor.visible = true;
pnlDeath.SetActive(true);
Time.timeScale = 0f;
}
public void Respawn() {
MenuPause info = menuPause.GetComponent<MenuPause>();
info.IsAlive = true;
Cursor.lockState = CursorLockMode.Locked;
Cursor.visible = false;
pnlDeath.SetActive(false);
Time.timeScale = 1f;
health = maxHealth;
lbHealth.text = "Salute: " + health;
sliderHealth.value = currentHealthPercentuage;
UpdateColorSlidebar();
this.transform.position = firstTransform;
this.transform.rotation = orentation;
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.AI;
public class Enemy : MonoBehaviour {
[SerializeField] NavMeshAgent agent;
Transform targetToFollow;
void OnValidate() {
if(!agent) {
agent = GetComponent<NavMeshAgent>();
}
}
void Start () {
targetToFollow = GameObject.FindWithTag("Player").transform;
}
void Update () {
agent.SetDestination(targetToFollow.position);
}
void OnTriggerEnter(Collider other) {
if(other.CompareTag("Player")) {
other.gameObject.GetComponent<HealtManager>().Damage(999);
}
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
namespace UnityChan
{
[RequireComponent(typeof(Animator))]
[RequireComponent(typeof(CapsuleCollider))]
[RequireComponent(typeof(Rigidbody))]
public class movement : MonoBehaviour
{
private CapsuleCollider col;
private Rigidbody rb;
private Animator anim;
private AnimatorStateInfo currentBaseState;
private Vector3 velocity;
private float orgColHight;
private Vector3 orgVectColCenter;
static int idleState = Animator.StringToHash("Base Layer.Idle");
static int walk = Animator.StringToHash("Base Layer.Walk");
// Use this for initialization
void Start()
{
anim = GetComponent<Animator>();
col = GetComponent<CapsuleCollider>();
rb = GetComponent<Rigidbody>();
orgColHight = col.height;
orgVectColCenter = col.center;
}
private void Update()
{
}
void FixedUpdate()
{
if (Input.GetKeyDown(KeyCode.Keypad0))
{
int n = (int)Random.Range(0, 2);
anim.Play("DM" + n, -1, 0.0f);
}
float h = Input.GetAxis("Horizontal");
float v = Input.GetAxis("Vertical");
bool run = Input.GetKey(KeyCode.LeftShift) || Input.GetKey(KeyCode.RightShift);
anim.SetFloat("Speed", v);
anim.SetFloat("H", h);
anim.SetFloat("V", v);
anim.SetBool("Run", run);
velocity = new Vector3(0, 0, v);
velocity = transform.TransformDirection(velocity);
float moveX = h * 20f * Time.deltaTime;
float moveZ = 0;
if (run)
{
moveZ = v * 350f * Time.deltaTime;
}
else
{
moveZ = v * 100f * Time.deltaTime;
}
Vector3 vtemp = new Vector3(moveX, 0f, moveZ);
vtemp = transform.TransformVector(vtemp);
rb.velocity = vtemp;
transform.Rotate(0, h * 2, 0);
resetCollider();
}
void resetCollider()
{
col.height = orgColHight;
col.center = orgVectColCenter;
Debug.Log("ok");
}
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MovementInput : MonoBehaviour
{
public float InputX;
public float InputZ;
public bool SHIFTClicked;
public Vector3 desiredMoveDirection;
public bool blockRotationPlayer;
public float desireRotationSpeed;
public Animator anim;
public float Speed;
public float allowPlayerRotation;
public Camera cam;
public CharacterController controller;
public bool isGrounded;
private float verticalVel;
private Vector3 moveVector;
float acc;
private bool tapOnTheScreen = false;
private RaycastHit hit;
LayerMask layerMask = ~(1 << 10);
// Use this for initialization
void Start()
{
anim = this.GetComponent<Animator>();
cam = Camera.main;
controller = this.GetComponent<CharacterController>();
desireRotationSpeed = 0.1f;
allowPlayerRotation = 0.3f;
}
// Update is called once per frame
void Update()
{
// MOBILE
// Input.touchCount count how much fingers hit the screen (max 5)
if (Input.touchCount == 1)
{
Touch touch = Input.GetTouch(0);
if (touch.tapCount == 2)
{
SHIFTClicked = true;
return;
}
if (touch.phase == TouchPhase.Ended &&
touch.position.x < Screen.width * 5/6 && touch.position.x > Screen.width/6)
{
var ray = Camera.main.ScreenPointToRay(touch.position);
if (Physics.Raycast(ray, out hit, 200, layerMask))
{
if (Vector3.Distance(transform.position, hit.point) > 1f)
{
tapOnTheScreen = true;
Speed = 1;
} else
{
tapOnTheScreen = false;
}
}
}
}
float distOnTheTap = Vector3.Distance(transform.position, hit.point);
if (tapOnTheScreen)
{
if (acc < 1 && distOnTheTap>=2) // increase acceleration more and more
{
acc += 0.005f;
}
if (distOnTheTap <= 0) // if you are exactly on the point tap or over
{
Speed = 0;
acc = 0;
SHIFTClicked = false;
tapOnTheScreen = false;
return;
}
else if (distOnTheTap < 2 && SHIFTClicked) // you are running and near the tap
{
Speed -= 0.1f;
if (Speed > 0.5f)
{
transform.position = Vector3.MoveTowards(transform.position, hit.point, Speed * Time.deltaTime);
}
else
{
Speed = 0;
acc = 0;
SHIFTClicked = false;
tapOnTheScreen = false;
distOnTheTap = 0;
return;
}
}
else if (distOnTheTap < 1) // you are walking and near the tap
{
Speed -= 0.1f;
if (Speed > 0.5f)
{
transform.position = Vector3.MoveTowards(transform.position, hit.point, Speed * Time.deltaTime);
}
else
{
Speed = 0;
acc = 0;
SHIFTClicked = false;
tapOnTheScreen = false;
distOnTheTap = 0;
return;
}
}
Vector3 heading = hit.point - transform.position;
float distance = heading.magnitude;
Vector3 desiredMoveDirection = heading / distance; // This is now the normalized direction.
// Play animation
if (SHIFTClicked)
{
anim.SetFloat("InputMagnitude", Speed + 1, 0.5f, Time.deltaTime);
transform.Translate(new Vector3(0, 0, Time.deltaTime * acc * 4)); // sprint
}
else
{
anim.SetFloat("InputMagnitude", Speed, 0.0f, Time.deltaTime);
transform.Translate(new Vector3(0, 0, Time.deltaTime * acc * 2)); // sprint
}
// Rotation movement
if (blockRotationPlayer == false)
{
transform.rotation = Quaternion.Slerp(transform.rotation, Quaternion.LookRotation(desiredMoveDirection), desireRotationSpeed);
}
characterGrounded();
return;
}
// END MOBILE>
InputMagnitude();
characterGrounded();
}
void PlayerMoveAndRotation()
{
InputX = Input.GetAxis("Horizontal");
InputZ = Input.GetAxis("Vertical");
var camera = Camera.main;
var forward = cam.transform.forward;
var right = cam.transform.right;
forward.y = 0f;
right.y = 0f;
forward.Normalize();
right.Normalize();
desiredMoveDirection = forward * InputZ + right * InputX;
if (blockRotationPlayer == false)
{
transform.rotation = Quaternion.Slerp(transform.rotation, Quaternion.LookRotation(desiredMoveDirection), desireRotationSpeed);
}
float inp = anim.GetFloat("InputMagnitude");
float acceleration = inp - 1; // [0, 1] it enables to accelerate, so character doesn't move faster immediately
if (inp > 0 && SHIFTClicked)
{
// move forward at speed "Time.deltaTime * acceleration * 10"
transform.Translate(new Vector3(0, 0, Time.deltaTime * acceleration * 5));
}
else if (inp > 0)
{
// // move forward at speed "Time.deltaTime * acceleration * 2"
transform.Translate(new Vector3(0, 0, Time.deltaTime * acceleration * 2));
}
}
void InputMagnitude()
{
// Calculate Input Vectors
InputX = Input.GetAxis("Horizontal");
InputZ = Input.GetAxis("Vertical");
// GetKey if hold
// GetKeyDown if clicked
SHIFTClicked = Input.GetKey(KeyCode.LeftShift) || Input.GetKey(KeyCode.RightShift);
// Set animator parameters
anim.SetFloat("InputZ", InputZ, 0.0f, Time.deltaTime * 2f);
anim.SetFloat("InputX", InputX, 0.0f, Time.deltaTime * 2f);
anim.SetBool("SHIFT", SHIFTClicked);
// Calculate the Input Magnitude, is the sum of the positive values of InputX and InputZ
Speed = new Vector2(InputX, InputZ).sqrMagnitude;
// if InputX + InputZ is equal to 2 we convert speed to 1 because we don't running if user does diagonal movements
if (Speed > 1)
{
Speed = 1;
}
// Physically move player
if (Speed > allowPlayerRotation)
{
if (SHIFTClicked)
{
anim.SetFloat("InputMagnitude", Speed + 1, 0.5f, Time.deltaTime);
}
else
{
anim.SetFloat("InputMagnitude", Speed, 0.0f, Time.deltaTime);
}
PlayerMoveAndRotation();
}
else if (Speed < allowPlayerRotation)
{
anim.SetFloat("InputMagnitude", Speed, 0.0f, Time.deltaTime);
}
}
void characterGrounded()
{
// If you don't need the character grounded then get rid this part
isGrounded = controller.isGrounded;
if (isGrounded)
{
verticalVel -= 0;
}
else
{
verticalVel -= 0.001f;
}
moveVector = new Vector3(0, verticalVel, 0);
controller.Move(moveVector);
}
}
| d8f0d83e64f275866caa5f51a782c111a6ca7a57 | [
"C#"
] | 8 | C# | okamiRvS/Project12 | 4a9346f59e8c0326868886dc64b2940a3a913bd6 | ede9aaae9f3e0b08520b37f9687619b4805edb6b |
refs/heads/master | <file_sep># Main Menu is Defined in a method
def menu
puts "--- Cheat Sheet ---"
@menu_hash.each { |user_choice, text| puts "#{user_choice}: #{text}" }
user_input
end
# Command Line Menu is Defined in a method
def command_line_menu
puts "--- Command Line Menu ---"
@command_line_menu_hash.each { |key, value| puts "#{key}: #{value}" }
user_input_command_line_menu
end
# User Input from the Command Line is obtained here, and then sent on via (passby) to command_line_input_logic
def user_input_command_line_menu
command_line_input = gets.strip.to_i
command_line_input_logic(command_line_input)
end
# Command Line Input Logic is run here and gets the information from the Command Line Menu via (passby)
# The user will make their selection and then get taken to that manuel page, or be get a prompt saying they're dumb and didn't choose an acceptable input.
def command_line_input_logic(passby)
case passby
when 1
puts `man mv`
when 2
puts `man cp`
when 3
puts `man mkdir`
when 4
puts `man ls`
when 5
puts `man rm`
when 6
menu
else
puts "Please make a selection between 1 - 6!"
end
command_line_menu
end
# This is the Search Method defined
def search
user_search = gets.strip.downcase
search_input(user_search)
end
# This is the logic for my search function :)
def search_input(passthrough)
puts `man #{passthrough}`
menu
end
# User Input is where I get the intial feedback from the Users on what they want to do from the Main Menu
def user_input
user_selection = gets.strip.to_i
input_logic(user_selection)
end
# Input logic is taking the input the user gave us from the Main Menu, and will transfer them to their selection; (either: command_line_menu; search; exit)
def input_logic(value_from_user_input_method)
case value_from_user_input_method
when 1
command_line_menu
when 2
search
when 3
puts "GOODBYE"
exit(0)
else
puts "Please make a selection between 1-3. Typing anything else will only show you this message again."
menu
end
end
def run_program
menu
end
@menu_hash = {
"1" => "Command Line",
"2" => "Search",
"3" => "Exit"
}
@command_line_menu_hash = {
"1" => "mv",
"2" => "cp",
"3" => "mkdir",
"4" => "ls",
"5" => "rm",
"6" => "menu"
}
run_program<file_sep># Main Menu is Defined in a method
def menu
puts "--- Cheat Sheet ---"
puts "1: Command Line"
puts "2: Search"
puts "3: Exit"
user_input
end
# Command Line Menu is Defined in a method
def command_line_menu
puts "--- Command Line Menu ---"
puts "1: mv"
puts "2: cp"
puts "3: mkdir"
puts "4: ls"
puts "5: rm"
puts "6: Main Menu"
user_input_command_line_menu
end
# User Input from the Command Line is obtained here, and then sent on via (passby) to command_line_input_logic
def user_input_command_line_menu
command_line_input = gets.strip.to_i
command_line_input_logic(command_line_input)
end
# Command Line Input Logic is run here and gets the information from the Command Line Menu via (passby)
# The user will make their selection and then get taken to that manuel page, or be get a prompt saying they're dumb and didn't choose an acceptable input.
def command_line_input_logic(passby)
case passby
when 1
puts `man mv`
when 2
puts `man cp`
when 3
puts `man mkdir`
when 4
puts `man ls`
when 5
puts `man rm`
when 6
menu
else
puts "Please make a selection between 1 - 6!"
end
command_line_menu
end
# This is the Search Method defined
def search
user_search = gets.strip.downcase
search_input(user_search)
end
# This is the logic for my search function :)
def search_input(passthrough)
case passthrough
when "mv"
puts `man mv`
when "cp"
puts `man cp`
when "mkdir"
puts `man mkdir`
when "ls"
puts `man ls`
when "rm"
puts `man rm`
else
puts "Not a valid search term. Common search terms are: 'mv', 'cp', 'mkdir', 'ls', 'rm'."
search
end
menu
end
# User Input is where I get the intial feedback from the Users on what they want to do from the Main Menu
def user_input
user_selection = gets.strip.to_i
input_logic(user_selection)
end
# Input logic is taking the input the user gave us from the Main Menu, and will transfer them to their selection; (either: command_line_menu; search; exit)
def input_logic(value_from_user_input_method)
case value_from_user_input_method
when 1
command_line_menu
when 2
search
when 3
puts "GOODBYE"
exit(0)
else
puts "Please make a selection between 1-3. Typing anything else will only show you this message again."
menu
end
end
def run_program
menu
end
run_program
| 5bd2676da706331bef9cda61e8b2b25ca91f349a | [
"Ruby"
] | 2 | Ruby | justinewalt/cheat-sheet | 7b5152f8e5b5bb7c29c694af441096353aafc0c2 | 7d2b78a0e1da99f9782b039935b3b90747ed0a35 |
refs/heads/master | <file_sep># Geany filedefs with nice light theme, filetype extensions and snippets for Ruby On Rails 3.2
Contains:
* Snippets for Ruby, Ruby on Rails, Python, HTML, PHP, Java, JavaScript, CSS, Latex and more.
* Keybindings.
* Light theme.
## Installation
Merge `geany` directory with `~/.config/geany`.
<file_sep># For complete documentation of this file, please see Geany's main documentation
[styling]
# foreground;background;bold;italic
default=0x222222;0xffffff;false;false
commentline=0x6c785d;0xffffff;false;true
number=0xeb1c1c;0xffffff;false;false
string=0x007000;0xffffff;false;false
character=0x007000;0xffffff;false;false
word=0x002a8c;0xffffff;false;false
global=0x966030;0xffffff;false;false
symbol=0x5700a8;0xffffff;false;false
classname=0x0077ff;0xffffff;false;false
defname=0xc90000;0xffffff;false;false
operator=0x824917;0xffffff;false;false
identifier=0x000000;0xffffff;false;false
modulename=0xa600e3;0xffffff;false;false
backticks=0x000000;0xffffff;false;false
instancevar=0x008f9c;0xffffff;false;false
classvar=0x008f9c;0xffffff;false;false
datasection=0x600000;0xfff0d8;false;false
heredelim=0x000000;0xffffff;false;false
worddemoted=0x00336e;0xffffff;false;false
stdin=0x000000;0xffffff;false;false
stdout=0x000000;0xffffff;false;false
stderr=0x000000;0xffffff;false;false
datasection=0x600000;0xfff0d8;false;false
regex=0x4f7d00;0xffffff;false;false
here_q=0x4f7d00;0xffffff;false;false
here_qq=0x4f7d00;0xffffff;false;false
here_qx=0x4f7d00;0xffffff;false;true
string_q=0x4f7d00;0xffffff;false;false
string_qq=0x4f7d00;0xffffff;false;false
string_qx=0x4f7d00;0xffffff;false;false
string_qr=0x4f7d00;0xffffff;false;false
string_qw=0x4f7d00;0xffffff;false;false
upper_bound=0x000000;0xffffff;false;false
error=0xbd31a1;0xffffff;false;false
pod=0x035650;0xffffff;false;false
[keywords]
# all items must be in one line
primary=__FILE__ load define_method attr_accessor attr_writer attr_reader and def end in or self unless __LINE__ begin defined? ensure module redo super until BEGIN break do false next rescue then when END case else for nil include require retry true while alias class elsif if not return undef yield
[settings]
# default extension used when saving files
#extension=rb
# the following characters are these which a "word" can contains, see documentation
#wordchars=_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
# if only single comment char is supported like # in this file, leave comment_close blank
comment_open=#
comment_close=
# set to false if a comment character/string should start at column 0 of a line, true uses any
# indentation of the line, e.g. setting to true causes the following on pressing CTRL+d
#command_example();
# setting to false would generate this
# command_example();
# This setting works only for single line comments
comment_use_indent=true
# context action command (please see Geany's main documentation for details)
context_action_cmd=
[build_settings]
# %f will be replaced by the complete filename
# %e will be replaced by the filename without extension
# (use only one of it at one time)
compiler=ruby -c "%f"
run_cmd=ruby "%f"
<file_sep># For complete documentation of this file, please see Geany's main documentation
[styling]
# foreground;background;bold;italic
default=0x4d4d4c;0xffffff;false;false
comment=0x63734f;0xffffff;false;false
commentline=0x63734f;0xffffff;false;false
commentdoc=0x63734f;0xffffff;false;false
number=0xbf8e63;0xffffff;true;false
word=0x0e6d99;0xffffff;true;false
word2=0x0e6d99;0xffffff;true;false
string=0xbd2424;0xffffff;false;false
character=0xbd2424;0xffffff;false;false
uuid=0x404080;0xffffff;false;false
preprocessor=0x007F7F;0xffffff;false;false
operator=0x000000;0xffffff;false;false
identifier=0x444444;0xffffff;false;false
stringeol=0x000000;0xe0c0e0;false;false
verbatim=0x101030;0xffffff;false;false
regex=0x0062c4;0xffffff;false;false
commentlinedoc=0x63734f;0xffffff;true;false
commentdockeyword=0x63734f;0xffffff;true;true
commentdockeyworderror=0x63734f;0xffffff;false;false
globalclass=0x111199;0xffffff;true;false
[keywords]
# all items must be in one line
primary=abs abstract acos anchor asin atan atan2 big bold boolean break byte case catch ceil char charAt charCodeAt class concat const continue cos Date debugger default delete do double else enum escape eval exp export extends false final finally fixed float floor fontcolor fontsize for fromCharCode function goto if implements import in indexOf Infinity instanceof int interface isFinite isNaN italics join lastIndexOf length link log long Math max MAX_VALUE min MIN_VALUE NaN native NEGATIVE_INFINITY new null Number package parseFloat parseInt pop POSITIVE_INFINITY pow private protected public push random return reverse round shift short sin slice small sort splice split sqrt static strike string String sub substr substring sup super switch synchronized tan this throw throws toLowerCase toString toUpperCase transient true try typeof undefined unescape unshift valueOf var void volatile while with
[settings]
# default extension used when saving files
#extension=js
# the following characters are these which a "word" can contains, see documentation
#wordchars=_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
# if only single comment char is supported like # in this file, leave comment_close blank
comment_open=//
comment_close=
# set to false if a comment character/string should start at column 0 of a line, true uses any
# indentation of the line, e.g. setting to true causes the following on pressing CTRL+d
#command_example();
# setting to false would generate this
# command_example();
# This setting works only for single line comments
comment_use_indent=true
# context action command (please see Geany's main documentation for details)
context_action_cmd=
[build_settings]
# %f will be replaced by the complete filename
# %e will be replaced by the filename without extension
# (use only one of it at one time)
compiler=
run=
| 370a4fe3e4e0b025d5393faf845ba54e244d774a | [
"Markdown",
"JavaScript",
"Ruby"
] | 3 | Markdown | fern4lvarez/geany_config | abcc8aa179ebbd2eeb8a8656945b90c54b5b148c | 8a54da108e3ce394a365f6d2ca8e92117ae9e3c6 |
refs/heads/master | <repo_name>mei23/Msky<file_sep>/Msky/Entities/User.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
[JsonObject]
public class User : BaseObject
{
/// <summary>
/// ID
/// </summary>
public string Id { get { return StringOrNull("id"); } }
public string Description { get { return StringOrNull("description"); } }
public double FollowersCount { get { return Value<double>("followersCount"); } }
public double FollowingCount { get { return Value<double>("followingCount"); } }
public double NotesCount { get { return Value<double>("notesCount"); } }
/// <summary>
/// Display Name
/// </summary>
public string Name { get { return StringOrNull("name"); } }
/// <summary>
/// Username without host
/// </summary>
public string Username { get { return StringOrNull("username"); } }
public string Host { get { return StringOrNull("host"); } }
public DateTimeOffset CreatedAt { get { return Value<DateTimeOffset>("createdAt", DateTimeOffset.MinValue); } }
public DateTimeOffset LastUsedAt { get { return Value<DateTimeOffset>("lastUsedAt", DateTimeOffset.MinValue); } }
public string AvatarUrl { get { return StringOrNull("avatarUrl"); } }
public string BannerUrl { get { return StringOrNull("bannerUrl"); } }
public override string ToString()
{
return string.Format("User[{0}]: {1}({2})",
Id,
string.IsNullOrEmpty(Host) ? "@" + Username : "@" + Username + "@" + Host,
Name ?? ""
);
}
}
}
<file_sep>/Msky/Api/AuthSessionApi.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class AuthSessionApi : ApiBase
{
internal AuthSessionApi(Credential credential) : base(credential) { }
/// <summary>
/// Generate session
/// </summary>
/// <param name="appSecret">app's secret key</param>
/// <returns>session information</returns>
public async Task<AuthSession> GenerateAsync(string appSecret)
{
var q = new Dictionary<string, object>() {
{ "appSecret", appSecret },
};
return await RequestObjectAsync<AuthSession>("/api/auth/session/generate", q);
}
public async Task<BaseObject> ShowAsync(string token)
{
var q = new Dictionary<string, object>() {
{ "token", token },
};
return await RequestObjectAsync<BaseObject>("/api/auth/session/show", q);
}
/// <summary>
/// Get user's access token
/// </summary>
/// <param name="appSecret">app's secret key</param>
/// <param name="token">session token</param>
/// <returns></returns>
public async Task<UserKey> UserkeyAsync(string appSecret, string token)
{
var q = new Dictionary<string, object>() {
{ "appSecret", appSecret },
{ "token", token },
};
return await RequestObjectAsync<UserKey>("/api/auth/session/userkey", q);
}
}
}
<file_sep>/Msky/Api/NotesReactions.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class NotesReactions : ApiBase
{
internal NotesReactions(Credential credential) : base(credential) { }
/// <summary>
/// List reactions of note
/// </summary>
/// <param name="noteId">target note ID</param>
/// <param name="limit"></param>
/// <param name="offset"></param>
/// <param name="sort"></param>
/// <returns></returns>
public async Task<BaseObject[]> ListAsync(string noteId, int limit = 10, int offset = 0, SortType sort = SortType.Desc)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
{ "limit", limit },
{ "offset", offset },
{ "sort", sort.ToString().ToLower() },
};
return await RequestArrayAsync<BaseObject>("/api/notes/reactions", q);
}
/// <summary>
/// Create reaction
/// </summary>
/// <param name="noteId">target note ID</param>
/// <param name="reaction">reaction type</param>
public async Task CreateAsync(string noteId, ReactionType reaction)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
{ "reaction", reaction.ToString().ToLower() },
};
await RequestAsync("/api/notes/reactions/create", q);
}
/// <summary>
/// Delete reaction
/// </summary>
/// <param name="noteId">target note ID</param>
public async Task DeleteAsync(string noteId)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
};
await RequestAsync("/api/notes/reactions/delete", q);
}
public enum SortType {Desc, Asc }
/// <summary>
/// リアクション
/// </summary>
public enum ReactionType
{
/// <summary>いいね</summary>
Like,
/// <summary>しゅき</summary>
Love,
/// <summary>笑</summary>
Laugh,
/// <summary>ふぅ~む</summary>
Hmm,
/// <summary>わお</summary>
Surprise,
/// <summary>おめでとう</summary>
Congrats,
/// <summary>おこ</summary>
Angry,
/// <summary>こまこまのこまり</summary>
Confused,
/// <summary>Pudding</summary>
Pudding
}
}
}
<file_sep>/Msky/Entities/Poll.cs
using System;
using System.Collections.Generic;
using Newtonsoft.Json;
namespace Msky.Entities
{
public class Poll : BaseObject
{
[JsonProperty("choices")]
public IEnumerable<Choice> Choices { get; set; }
public class Choice : BaseObject
{
public double Id { get { return Value<double>("id", -1); } }
public string Text { get { return StringOrNull("text"); } }
public double Votes { get { return Value<double>("votes", -1); } }
}
}
}
<file_sep>/README.md
# Msky
Misskey library for .NET Standard 2.0
Under development yet.
## Usage
### Authentication / Authorization
Method1: No Authentication. For use API as anonymous.
``` Csharp
var misskey = new Misskey("https://misskey.example.com");
```
Method2: Authenticate with predefined API key, or past authorized API key.
``` Csharp
var misskey = new Misskey("https://misskey.example.com", "API Key");
```
Method3: Authenticate/Authorize by Authorize feature
``` Csharp
var misskey = new Misskey("https://misskey.example.com");
AuthSession session = await misskey.Auth.Session.GenerateAsync("App's secret key");
Console.WriteLine("Open URL: {0}", session.Url);
try { System.Diagnostics.Process.Start(session.Url); } // Fail on .NET Core
catch { Console.WriteLine("Failed to open url. Please manually open it."); }
Console.WriteLine("Please authorize in your browser, then push Enter.");
Console.ReadLine();
UserKey userkey = await misskey.Auth.Session.UserkeyAsync(appSecret, session.Token);
string apiKey = misskey.UpdateApiKey(userkey.AccessToken, appSecret);
// This apikey may use on method2.
```
### API example
Get server meta data
``` Csharp
Meta meta = await misskey.MetaAsync();
Console.WriteLine(string.Format("Version: {0}", meta.Version));
Console.WriteLine(string.Format("ClientVersion: {0}", meta.ClientVersion));
```
Posting
``` Csharp
await misskey.Notes.CreateAsync("Note text");
```
<file_sep>/Msky/Credential.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Net.Http;
using System.Net;
namespace Msky
{
public class Credential
{
public string BaseUrl { get; set; }
public string ApiKey { get; set; }
public HttpClient Client { get; set; }
public Credential(string baseUrl, string apiKey = null)
{
BaseUrl = baseUrl;
ApiKey = apiKey;
Client = new HttpClient(new HttpClientHandler
{
AutomaticDecompression = DecompressionMethods.GZip | DecompressionMethods.Deflate,
UseCookies = false
});
}
}
}
<file_sep>/Msky/Misskey.cs
using System;
using System.Net.Http;
using System.Threading.Tasks;
using Newtonsoft.Json;
using System.Collections.Generic;
using System.Text;
using System.Linq;
using Msky.Entities;
using Newtonsoft.Json.Linq;
using System.Net;
using Msky.Api;
namespace Msky
{
public class Misskey : ApiBase
{
public Misskey(string baseUrl, string apiKey = null)
: this(new Credential(baseUrl, apiKey))
{
}
public Misskey(Credential credential) : base(credential)
{
Credential = credential;
}
#region meta
/// <summary>
/// Get instance inframation
/// </summary>
public async Task<Meta> MetaAsync()
{
return await RequestObjectAsync<Meta>("/api/meta");
}
#endregion
#region stats
/// <summary>
/// Get instance statistics
/// </summary>
public async Task<Stats> StatsAsync()
{
return await RequestObjectAsync<Stats>("/api/stats");
}
#endregion
#region username/available
/// <summary>
/// Get username available
/// </summary>
/// <param name="username">username</param>
public async Task<UsernameAvailable> UsernameAvailableAsync(string username)
{
var q = new Dictionary<string, object>() {
{ "username", username },
};
return await RequestObjectAsync<UsernameAvailable>("/api/username/available", q);
}
#endregion
#region app
#endregion
public AuthApi Auth => new AuthApi(Credential);
#region aggregation
#endregion
#region sw
#endregion
#region i
/// <summary>
/// Show myself (require auth)
/// </summary>
/// <param name="username"></param>
/// <returns></returns>
public async Task<User> IAsync(string username)
{
return await RequestObjectAsync<User>("/api/i");
}
public I I => new I(Credential);
#endregion
#region othello
#endregion
/// <summary>
/// Mutes
/// </summary>
public Mutes Mute => new Mutes(Credential);
#region notifications
#endregion
#region drive
#endregion
/// <summary>
/// ユーザー
/// </summary>
public Users Users => new Users(Credential);
/// <summary>
/// Notes (post, renote, poll)
/// </summary>
public Notes Notes => new Notes(Credential);
#region following
#endregion
#region messaging
#endregion
#region channels
#endregion
}
}<file_sep>/Msky/Api/AuthApi.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class AuthApi : ApiBase
{
internal AuthApi(Credential credential) : base(credential) { }
public AuthSessionApi Session => new AuthSessionApi(Credential);
}
}
<file_sep>/Msky/Api/NotesFavorites.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class NotesFavorites : ApiBase
{
internal NotesFavorites(Credential credential) : base(credential) { }
public async Task CreateAsync(string noteId)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
};
await RequestAsync("/api/notes/favorites/create", q);
}
public async Task DeleteAsync(string noteId)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
};
await RequestAsync("/api/notes/favorites/delete", q);
}
}
}
<file_sep>/Msky/Entities/BaseObject.cs
using System;
using System.Collections.Generic;
using System.Text;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
using System.Linq;
namespace Msky.Entities
{
[JsonObject]
public class BaseObject : IEntity
{
public BaseObject()
{
_data = new Dictionary<string, JToken>();
}
[JsonExtensionData]
private IDictionary<string, JToken> _data;
public Dictionary<string, JToken> GetDictionary()
{
return new Dictionary<string, JToken>(_data);
}
[JsonIgnore]
public JToken this[string key]
{
get { return GetJToken(key); }
}
public JToken GetJToken(string key)
{
return _data.ContainsKey(key) ? _data[key] : null;
}
public T Value<T>(string key)
{
JToken t = GetJToken(key);
if (t == null) throw new Exception();
return t.Value<T>();
}
public T Value<T>(string key, T defaultValue)
{
JToken t = GetJToken(key);
if (t == null) return defaultValue;
return t.Value<T>();
}
public string StringOrNull(string key)
{
return Value<string>(key, null);
}
public string __Gen1()
{
var sb = new StringBuilder();
foreach(var k in _data.Keys)
{
if (_data[k].Type == JTokenType.Object || _data[k].Type == JTokenType.Array)
{
sb.Append("//");
}
sb.Append("public string ").Append(k).Append(" { get { return StringOrNull(\"").Append(k).Append("\"); } }").AppendLine();
}
return sb.ToString();
}
}
}
<file_sep>/Msky/Api/ApiResponse.cs
using System;
using System.Collections.Generic;
using System.Text;
using Newtonsoft.Json;
namespace Msky.Api
{
public class ApiResponse
{
/// <summary>
/// Raw JSON data
/// </summary>
public string RawJson { get; internal set; }
public dynamic RawData
{
get
{
return JsonConvert.DeserializeObject<dynamic>(RawJson);
}
}
/*
public JArray GetItems()
{
JArray items = JArray.Parse(Json);
return items;
}
*/
}
}
<file_sep>/Msky/Api/ApiBase.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net.Http;
using System.Security.Cryptography;
using System.Text;
using System.Threading.Tasks;
using Newtonsoft.Json;
using Msky.Entities;
namespace Msky.Api
{
public abstract class ApiBase
{
internal Credential Credential { get; set; }
internal ApiBase(Credential credential)
{
Credential = credential;
}
public string UpdateApiKey(string userAccessToken, string appSecret)
{
using (var hash = SHA256.Create())
{
var apiKey = String.Concat(hash
.ComputeHash(Encoding.UTF8.GetBytes(userAccessToken + appSecret))
.Select(item => item.ToString("x2")));
UpdateApiKey(apiKey);
return apiKey;
}
}
public string UpdateApiKey(string apiKey)
{
Credential.ApiKey = apiKey;
return apiKey;
}
/// <summary>
/// Request and get specify type respose
/// </summary>
/// <typeparam name="T">specify type</typeparam>
/// <param name="endpoint">endpoint</param>
/// <returns>specify type</returns>
public async Task<T> RequestObjectAsync<T>(string endpoint)
where T : BaseObject
{
return await RequestObjectAsync<T>(endpoint, new Dictionary<string, object>());
}
/// <summary>
/// Request and get specify type respose
/// </summary>
/// <typeparam name="T">specify type</typeparam>
/// <param name="endpoint">endpoint</param>
/// <param name="queries">queries</param>
/// <returns>specify type</returns>
public async Task<T> RequestObjectAsync<T>(string endpoint, Dictionary<string, object> queries)
where T : BaseObject
{
var res = await RequestAsync(endpoint, queries);
var obj = JsonConvert.DeserializeObject<T>(res.RawJson);
return obj;
}
public async Task<T[]> RequestArrayLSUAsync<T>(string endpoint, Dictionary<string, object> q,
int limit, string sinceId, string untilId) where T : BaseObject
{
q.Add("limit", limit);
// can specify since or until ID
if (!string.IsNullOrEmpty(sinceId))
q.Add("sinceId", sinceId);
else if (!string.IsNullOrEmpty(untilId))
q.Add("untilId", untilId);
return await RequestArrayAsync<T>(endpoint, q);
}
/// <summary>
/// Request and get array of specify type respose
/// </summary>
/// <typeparam name="T">specify type</typeparam>
/// <param name="endpoint">endpoint</param>
/// <param name="queries">queries</param>
/// <returns>array of specify type</returns>
public async Task<T[]> RequestArrayAsync<T>(string endpoint, Dictionary<string, object> queries)
where T : BaseObject
{
var res = await RequestAsync(endpoint, queries);
return JsonConvert.DeserializeObject<T[]>(res.RawJson);
}
/// <summary>
/// Request and get response
/// </summary>
/// <param name="endpoint">endpoint</param>
/// <returns>respose</returns>
public async Task<ApiResponse> RequestAsync(string endpoint)
{
return await RequestAsync(endpoint, new Dictionary<string, object>());
}
/// <summary>
/// Request and get response
/// </summary>
/// <param name="endpoint">endpoint</param>
/// <param name="queries">queries</param>
/// <returns>respose</returns>
public async Task<ApiResponse> RequestAsync(string endpoint, Dictionary<string, object> queries)
{
queries.Add("i", Credential.ApiKey);
// filter null value
var filtered = queries.Keys.Where(x => queries[x] != null).ToDictionary(x => x, x => queries[x]);
var content = new StringContent(JsonConvert.SerializeObject(filtered).ToString(), Encoding.UTF8, "application/json");
if (endpoint[0] != '/') endpoint = '/' + endpoint;
// send request and get response
HttpResponseMessage response;
try
{
response = await Credential.Client.PostAsync(Credential.BaseUrl + endpoint, content);
}
catch (Exception ex)
{
throw new ApiException("API request error", ex);
}
string json;
json = await response.Content.ReadAsStringAsync();
// check status code
if (!response.IsSuccessStatusCode)
{
throw new ApiException(
string.Format("API response error: {0} {1}", response.StatusCode, json ?? "null"), null);
}
return new ApiResponse { RawJson = json };
}
}
}
<file_sep>/Msky/Api/ApiException.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Net.Http;
namespace Msky.Api
{
public class ApiException : Exception
{
public ApiException(string message, Exception innerException)
: base(message, innerException)
{
}
}
}
<file_sep>/Msky/Entities/Stats.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
/// <summary>
/// Instance statistics
/// </summary>
public class Stats : BaseObject
{
/// <summary>
/// Notes count (local+remote)
/// </summary>
public double NotesCount { get { return Value<double>("notesCount", -1); } }
/// <summary>
/// Users count (local+remote)
/// </summary>
public double UsersCount { get { return Value<double>("usersCount", -1); } }
/// <summary>
/// Notes count (local only)
/// </summary>
public double OriginalNotesCount { get { return Value<double>("originalNotesCount", -1); } }
/// <summary>
/// Users count (local only)
/// </summary>
public double OriginalUsersCount { get { return Value<double>("originalUsersCount", -1); } }
}
}
<file_sep>/Msky/Entities/UsernameAvailable.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
public class UsernameAvailable : BaseObject
{
public bool Available { get { return Value<bool>("available"); } }
}
}
<file_sep>/Msky/Entities/Maintener.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
/// <summary>
/// Maintener
/// </summary>
public class Maintener : BaseObject
{
/// <summary>
/// Name
/// </summary>
public string Name { get { return StringOrNull("name"); } }
/// <summary>
/// URL
/// </summary>
public string Url { get { return StringOrNull("url"); } }
public override string ToString()
{
return string.Format("Maintener: {0}({1})", Name ?? "", Url ?? "");
}
}
}
<file_sep>/Msky/Entities/Mute.cs
using System;
using System.Collections.Generic;
using Newtonsoft.Json;
namespace Msky.Entities
{
public class Mute : BaseObject
{
[JsonProperty("users")]
public IEnumerable<User> Users { get; set; }
public string Next { get { return StringOrNull("next"); } }
}
}
<file_sep>/Msky/Entities/Note.cs
using System;
using System.Collections.Generic;
using System.Text;
using Newtonsoft.Json;
namespace Msky.Entities
{
[JsonObject]
public class Note : BaseObject
{
/// <summary>
/// Note ID
/// </summary>
public string Id { get { return StringOrNull("id"); } }
/*
/// <summary>
/// ID of prev note
/// </summary>
public string Prev { get { return StringOrNull("prev"); } }
/// <summary>
/// ID of next note
/// </summary>
public string Next { get { return StringOrNull("next"); } }
*/
/// <summary>
/// Created datetime
/// </summary>
public DateTimeOffset CreatedAt { get { return Value<DateTimeOffset>("createdAt", DateTimeOffset.MinValue); } }
/// <summary>
/// Reply target ID
/// </summary>
public string ReplyId { get { return StringOrNull("replyId"); } }
/// <summary>
/// Notes of reply target
/// </summary>
[JsonProperty("reply")]
public Note Reply { get; set; }
/// <summary>
/// Renote target ID
/// </summary>
public string RenoteId { get { return StringOrNull("renoteId"); } }
/// <summary>
/// Renote target
/// </summary>
[JsonProperty("renote")]
public Note Renote { get; set; }
/// <summary>
/// Text
/// </summary>
public string Text { get { return StringOrNull("text"); } }
/*
/// <summary>
/// Text in HTML
/// </summary>
public string TextHtml { get { return StringOrNull("textHtml"); } }
*/
/// <summary>
/// Poll
/// </summary>
[JsonProperty("poll")]
public Poll Poll { get; set; }
/// <summary>
/// Content warning message
/// </summary>
public string Cw { get { return StringOrNull("cw"); } }
/// <summary>
/// Media IDs
/// </summary>
[JsonProperty("mediaIds")]
public IEnumerable<string> MediaIds { get; set; }
/// <summary>
/// Media objects
/// </summary>
[JsonProperty("media")]
public IEnumerable<Media> Media { get; set; }
/// <summary>
/// Tags
/// </summary>
[JsonProperty("tags")]
public IEnumerable<string> Tags { get; set; }
/// <summary>
/// User ID
/// </summary>
public string UserId { get { return StringOrNull("userId"); } }
/// <summary>
/// User
/// </summary>
[JsonProperty("user")]
public User User { get; set; }
/// <summary>
/// Is noted by mobile
/// </summary>
public bool ViaMobile { get { return Value<bool>("viaMobile", false); } }
//public string geo { get { return StringOrNull("geo"); } }
//public string appId { get { return StringOrNull("appId"); } }
/// <summary>
/// Visibility
/// </summary>
public string Visibility { get { return StringOrNull("visibility"); } }
/*
/// <summary>
/// Visible user IDs
/// </summary>
[JsonProperty("visibleUserIds")]
public IEnumerable<string> VisibleUserIds { get; set; }
*/
/// <summary>
/// My Reaction
/// </summary>
public string MyReaction { get { return StringOrNull("myReaction"); } }
public override string ToString()
{
return string.Format("Note[{0}]: {1}", Id ?? "", CreatedAt);
}
}
}
<file_sep>/Msky/Entities/UserKey.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
/// <summary>
/// auth/session/userkey response
/// </summary>
public class UserKey : BaseObject
{
/// <summary>
/// user's access token
/// </summary>
public string AccessToken { get { return StringOrNull("accessToken"); } }
/// <summary>
/// User
/// </summary>
[JsonProperty("user")]
public User User { get; set; }
}
}
<file_sep>/Msky/Api/I.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class I : ApiBase
{
internal I(Credential credential) : base(credential) { }
/// <summary>
/// Get notifications
/// </summary>
/// <param name="following"></param>
/// <param name="markAsRead"></param>
/// <param name="limit"></param>
/// <param name="sinceId"></param>
/// <param name="untilId"></param>
/// <returns></returns>
public async Task<Notification[]> NotificationsAsync(bool following = false, bool markAsRead = true,
int limit = 10, string sinceId = null, string untilId = null)
{
var q = new Dictionary<string, object>() {
{ "following", following },
{ "markAsRead", markAsRead },
};
return await RequestArrayLSUAsync<Notification>("/api/i/notifications",
q, limit, sinceId, untilId);
}
/// <summary>
/// Get favorites
/// </summary>
/// <param name="limit"></param>
/// <param name="sinceId"></param>
/// <param name="untilId"></param>
/// <returns></returns>
public async Task<Favorites[]> FavoritesAsync(int limit = 10, string sinceId = null, string untilId = null)
{
return await RequestArrayLSUAsync<Favorites>("/api/i/favorites",
new Dictionary<string, object>(), limit, sinceId, untilId);
}
}
}
<file_sep>/Msky/Entities/Cpu.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
/// <summary>
/// CPU
/// </summary>
public class Cpu : BaseObject
{
/// <summary>
/// Processor model
/// </summary>
public string Model { get { return StringOrNull("model"); } }
/// <summary>
/// Number of cores
/// </summary>
public double Cores { get { return Value<double>("cores", -1); } }
public override string ToString()
{
return string.Format("CPU: {0} x{1}", Model ?? "", Cores);
}
}
}
<file_sep>/Msky/Entities/Notification.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
public class Notification : BaseObject
{
/// <summary>
/// Created datetime
/// </summary>
public DateTimeOffset CreatedAt { get { return Value<DateTimeOffset>("createdAt", DateTimeOffset.MinValue); } }
public string type { get { return StringOrNull("type"); } }
public bool isRead { get { return Value<bool>("isRead", false); } }
public string noteId { get { return StringOrNull("noteId"); } }
/// <summary>
/// ID
/// </summary>
public string Id { get { return StringOrNull("id"); } }
public string userId { get { return StringOrNull("userId"); } }
[JsonProperty("user")]
public User User { get; set; }
[JsonProperty("note")]
public Note Note { get; set; }
public override string ToString()
{
return string.Format("Notification: {0} {1}", CreatedAt, type ?? "");
}
}
}
<file_sep>/Msky/Api/Users.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class Users : ApiBase
{
internal Users(Credential credential) : base(credential) { }
#region users
/// <summary>
/// List all users
/// </summary>
/// <param name="limit">max item count(1-100)</param>
/// <param name="offset">start offset(0)</param>
/// <param name="order">order</param>
/// <returns></returns>
public async Task<User[]> ListAsync(int limit = 10, int offset = 0, UsersSortOrder order = UsersSortOrder.Default)
{
var q = new Dictionary<string, object>() {
{ "limit", limit },
{ "offset", offset },
{ "order",
order == UsersSortOrder.FollowerAsc ? "+follower" :
order == UsersSortOrder.FollowerDesc ? "-follower" :
null }
};
var res = await RequestAsync("/api/users", q);
return JsonConvert.DeserializeObject<User[]>(res.RawJson);
}
public enum UsersSortOrder
{
/// <summary>
/// Default(ID order)
/// </summary>
Default,
FollowerAsc,
FollowerDesc
}
#endregion
#region users/show
public async Task<User> UsersShowByIdAsync(string userId)
{
return await RequestObjectAsync<User>("/api/users/show", new Dictionary<string, object>() {
{ "userId", userId },
});
}
public async Task<User> UsersShowByNameAsync(string username, string host = null)
{
return await RequestObjectAsync<User>("/api/users/show", new Dictionary<string, object>() {
{ "username", username },
{ "host", host },
});
}
public async Task<User[]> UsersShowByIdsAsync(string[] userIds)
{
var res = await RequestAsync("/api/users/show", new Dictionary<string, object>() {
{ "userIds", userIds },
});
return JsonConvert.DeserializeObject<User[]>(res.RawJson);
}
#endregion
#region users/search
/// <summary>
/// Search user by name, username
/// </summary>
/// <param name="query"></param>
/// <param name="offset">0-</param>
/// <param name="max">1-30</param>
/// <returns></returns>
public async Task<User[]> UsersSearch(string query, int offset = 0, int max = 10)
{
var q = new Dictionary<string, object>() {
{ "query", query },
{ "offset", offset },
{ "max", max },
};
return await RequestArrayAsync<User>("/api/users/search", q);
}
#endregion
#region users/search_by_username
/// <summary>
/// Search user by username
/// </summary>
/// <param name="query"></param>
/// <param name="offset">0-</param>
/// <param name="max">1-30</param>
/// <returns></returns>
public async Task<User[]> UsersSearchByUsername(string query, int offset = 0, int max = 10)
{
var q = new Dictionary<string, object>() {
{ "query", query },
{ "offset", offset },
{ "max", max },
};
return await RequestArrayAsync<User>("/api/users/search_by_username", q);
}
#endregion
#region users/notes
public class UsersNotesQuerySpec
{
public string SinceId { get; protected set; }
public string UntilId { get; protected set; }
public long SinceDate { get; protected set; }
public long UntilDate { get; protected set; }
public bool IncludeReplies = true;
public bool WithMedia = false;
public int Limit = 10;
public static UsersNotesQuerySpec Default()
{
return new UsersNotesQuerySpec { };
}
public static UsersNotesQuerySpec After(DateTimeOffset dateTime)
{
return new UsersNotesQuerySpec { SinceDate = dateTime.ToUnixTimeMilliseconds() };
}
public static UsersNotesQuerySpec Before(DateTimeOffset dateTime)
{
return new UsersNotesQuerySpec { UntilDate = dateTime.ToUnixTimeMilliseconds() };
}
public static UsersNotesQuerySpec After(string id)
{
return new UsersNotesQuerySpec { SinceId = id };
}
public static UsersNotesQuerySpec Before(string id)
{
return new UsersNotesQuerySpec { UntilId = id };
}
}
/// <summary>
/// Fetch users notes by UserId
/// </summary>
/// <param name="userId">userId</param>
/// <param name="querySpec">querySpec</param>
/// <returns>users notes</returns>
public async Task<Note[]> UsersNotesByIdAsync(string userId, UsersNotesQuerySpec querySpec)
{
return await UsersNotesAsync(querySpec, userId, null, null);
}
/// <summary>
/// Fetch users notes by username and host
/// </summary>
/// <param name="username">username</param>
/// <param name="host">host</param>
/// <param name="querySpec">querySpec</param>
/// <returns>users notes</returns>
public async Task<Note[]> UsersNotesByNameAsync(string username, string host, UsersNotesQuerySpec querySpec)
{
return await UsersNotesAsync(querySpec, null, username, host);
}
protected async Task<Note[]> UsersNotesAsync(UsersNotesQuerySpec qs, string userId = null, string username = null, string host = null)
{
var q = new Dictionary<string, object>() {
{ "userId", userId },
{ "username", username },
{ "host", host },
{ "includeReplies", qs.IncludeReplies },
{ "withMedia", qs.WithMedia },
{ "limit", qs.Limit },
};
if (!string.IsNullOrEmpty(qs.SinceId))
{
q.Add("sinceId", qs.SinceId);
}
else if (!string.IsNullOrEmpty(qs.UntilId))
{
q.Add("untilId", qs.UntilId);
}
else if (qs.SinceDate > 0)
{
q.Add("sinceDate", qs.SinceDate);
}
else if (qs.UntilDate > 0)
{
q.Add("untilDate", qs.UntilDate);
}
return await RequestArrayAsync<Note>("/api/users/notes", q);
}
#endregion
#region users/following
/// <summary>
/// Get UsersFollowing
/// </summary>
/// <param name="userId"></param>
/// <param name="iknow"></param>
/// <param name="limit">1-100</param>
/// <param name="cursor"></param>
/// <returns></returns>
public async Task<UsersWithNext> UsersFollowingAsync(string userId, bool iknow, int limit = 10, string cursor = null)
{
var q = new Dictionary<string, object>() {
{ "userId", userId },
{ "iknow", iknow },
{ "limit", limit },
{ "cursor", cursor },
};
return await RequestUsersWithNextAsync("/api/users/following", q);
}
#endregion
#region users/followers
/// <summary>
/// Get UsersFollowers
/// </summary>
/// <param name="userId"></param>
/// <param name="iknow"></param>
/// <param name="limit">1-100</param>
/// <param name="cursor"></param>
/// <returns></returns>
public async Task<UsersWithNext> UsersFollowersAsync(string userId, bool iknow, int limit = 10, string cursor = null)
{
var q = new Dictionary<string, object>() {
{ "userId", userId },
{ "iknow", iknow },
{ "limit", limit },
{ "cursor", cursor },
};
return await RequestUsersWithNextAsync("/api/users/followers", q);
}
#endregion
#region users/recommendation
#endregion
#region users/get_frequently_replied_users
public async Task<FRUsers[]> UsersGFRUsersAsync(string userId, int limit = 10)
{
var q = new Dictionary<string, object>() {
{ "userId", userId },
{ "limit", limit },
};
return await RequestArrayAsync<FRUsers>("/api/users/get_frequently_replied_users", q);
}
public class FRUsers : BaseObject
{
[JsonProperty("user")]
public User User;
[JsonProperty("weight")]
public double Weight;
public override string ToString()
{
return string.Format("FRUsers: {0} {1:0.000}", User, Weight);
}
}
#endregion
public async Task<UsersWithNext> RequestUsersWithNextAsync(string endpoint, Dictionary<string, object> queries)
{
var res = await RequestAsync(endpoint, queries);
return JsonConvert.DeserializeObject<UsersWithNext>(res.RawJson);
}
public class UsersWithNext
{
[JsonProperty("users")]
public User[] Users;
[JsonProperty("next")]
public string Next;
}
}
}
<file_sep>/Msky/Entities/Media.cs
using System;
using System.Collections.Generic;
using Newtonsoft.Json;
namespace Msky.Entities
{
public class Media : BaseObject
{
public string ID { get { return StringOrNull("id"); } }
public DateTimeOffset CreatedAt { get { return Value<DateTimeOffset>("createdAt", DateTimeOffset.MinValue); } }
public string Name { get { return StringOrNull("name"); } }
public string Type { get { return StringOrNull("type"); } }
public double Datasize { get { return Value<double>("datasize", -1); } }
public string MD5 { get { return StringOrNull("md5"); } }
public string UserId { get { return StringOrNull("userId"); } }
public string FolderId { get { return StringOrNull("folderId"); } }
public string Comment { get { return StringOrNull("comment"); } }
public string URL { get { return StringOrNull("url"); } }
[JsonProperty("properties")]
public MediaProperties Properties { get; set; }
public override string ToString()
{
return string.Format("Media: {0}", URL ?? "");
}
}
}
<file_sep>/Msky/Api/NotesPolls.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class NotesPolls : ApiBase
{
internal NotesPolls(Credential credential) : base(credential) { }
public async Task VoteAsync(string noteId, string choice)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
{ "choice", choice },
};
await RequestAsync("/api/notes/polls/vote", q);
}
public async Task<Note[]> RecommendationAsync(int limit = 10, int offset = 0)
{
var q = new Dictionary<string, object>() {
{ "limit", limit },
{ "offset", offset },
};
return await RequestArrayAsync<Note>("/api/notes/polls/recommendation", q);
}
}
}
<file_sep>/Msky/Entities/MediaProperties.cs
using System;
using System.Collections.Generic;
using Newtonsoft.Json;
namespace Msky.Entities
{
public class MediaProperties : BaseObject
{
public double Width { get { return Value<double>("width", -1); } }
public double Height { get { return Value<double>("height", -1); } }
public override string ToString()
{
return string.Format("Properties: {0}x{1}", Width, Height);
}
}
}
<file_sep>/Msky/Entities/AuthSession.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
/// <summary>
/// auth/session/generate response
/// </summary>
public class AuthSession : BaseObject
{
/// <summary>
/// session token
/// </summary>
public string Token { get { return StringOrNull("token"); } }
/// <summary>
/// auth URL for client
/// </summary>
public string Url { get { return StringOrNull("url"); } }
}
}
<file_sep>/Msky/Api/Mutes.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class Mutes : ApiBase
{
internal Mutes(Credential credential) : base(credential) { }
/// <summary>
/// Get mutes (require auth)
/// </summary>
/// <param name="iknow"></param>
/// <param name="limit"></param>
/// <param name="cursor"></param>
/// <returns></returns>
public async Task<Mute[]> ListAsync(bool iknow = false, int limit = 30, string cursor = null)
{
var q = new Dictionary<string, object>() {
{ "iknow", iknow },
{ "limit", limit },
{ "cursor", cursor },
};
return await RequestArrayAsync<Mute>("/api/mute/list", q);
}
/// <summary>
/// Mute a user (require auth)
/// </summary>
/// <param name="userId"></param>
/// <returns></returns>
public async Task CreateAsync(string userId)
{
var q = new Dictionary<string, object>() {
{ "userId", userId },
};
await RequestObjectAsync<BaseObject>("/api/mute/create", q);
}
/// <summary>
/// Unmute a user (require auth)
/// </summary>
/// <param name="userId">user to unmute</param>
/// <returns></returns>
public async Task DeleteAsync(string userId)
{
var q = new Dictionary<string, object>() {
{ "userId", userId },
};
await RequestObjectAsync<BaseObject>("/api/mute/delete", q);
}
}
}
<file_sep>/Msky/Entities/Meta.cs
using System;
using Newtonsoft.Json;
namespace Msky.Entities
{
/// <summary>
/// Instance inframation
/// </summary>
public class Meta : BaseObject
{
/// <summary>
/// Maintainer
/// </summary>
[JsonProperty("maintainer")]
public Maintener Maintainer { get; set; }
/// <summary>
/// Misskey version
/// </summary>
public string Version { get { return StringOrNull("version"); } }
/// <summary>
/// Misskey client version
/// </summary>
public string ClientVersion { get { return StringOrNull("clientVersion"); } }
/// <summary>
/// Is process listen on https ?
/// </summary>
public bool Secure { get { return Value<bool>("secure", false); } }
/// <summary>
/// Host machine name
/// </summary>
public string Machine { get { return StringOrNull("machine"); } }
/// <summary>
/// Host OS name
/// </summary>
public string Os { get { return StringOrNull("os"); } }
/// <summary>
/// Host Node version
/// </summary>
public string Node { get { return StringOrNull("node"); } }
/// <summary>
/// Host CPU information
/// </summary>
[JsonProperty("cpu")]
public Cpu Cpu { get; set; }
public override string ToString()
{
return string.Format("Meta: v{0} / clinet v{1}", Version ?? "Unknown", ClientVersion ?? "Unknown");
}
}
}
<file_sep>/Msky/Api/Notes.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Msky.Entities;
using Newtonsoft.Json;
namespace Msky.Api
{
public class Notes : ApiBase
{
internal Notes(Credential credential) : base(credential) { }
#region notes
public class NotesQuerySpec
{
/// <summary>
/// Fetch since this ID (Can't use with UntilId)
/// </summary>
public string SinceId { get; protected set; }
/// <summary>
/// Fetch until this ID (Can't use with SinceId)
/// </summary>
public string UntilId { get; protected set; }
/// <summary>
/// Limited to reply
/// </summary>
public bool Reply = false;
/// <summary>
/// Limited to renote
/// </summary>
public bool Renote = false;
/// <summary>
/// Limited to media
/// </summary>
public bool Media = false;
/// <summary>
/// Limited to poll
/// </summary>
public bool Poll = false;
/// <summary>
/// Limit 1-100, default:10
/// </summary>
public int Limit = 10;
public static NotesQuerySpec Default()
{
return new NotesQuerySpec { };
}
public static NotesQuerySpec After(string id)
{
return new NotesQuerySpec { SinceId = id };
}
public static NotesQuerySpec Before(string id)
{
return new NotesQuerySpec { UntilId = id };
}
}
/// <summary>
/// List all notes
/// </summary>
/// <param name="querySpec">query spec</param>
/// <returns>array of note</returns>
public async Task<Note[]> ListAsync(NotesQuerySpec querySpec)
{
var q = new Dictionary<string, object>() {
{ "reply", querySpec.Reply },
{ "renote", querySpec.Renote },
{ "media", querySpec.Media },
{ "poll", querySpec.Poll },
{ "limit", querySpec.Limit },
};
// can specify since or until ID
if (!string.IsNullOrEmpty(querySpec.SinceId))
q.Add("sinceId", querySpec.SinceId);
else if (!string.IsNullOrEmpty(querySpec.UntilId))
q.Add("untilId", querySpec.UntilId);
return await RequestArrayAsync<Note>("/api/notes", q);
}
#endregion
/// <summary>
/// Show note
/// </summary>
/// <param name="noteId">target note ID</param>
/// <returns>Note</returns>
public async Task<Note> ShowAsync(string noteId)
{
return await RequestObjectAsync<Note>("/api/notes/show", new Dictionary<string, object>() {
{ "noteId", noteId },
});
}
/// <summary>
/// Get context of note
/// </summary>
/// <param name="noteId">target note ID</param>
/// <param name="limit">limit (1-100)</param>
/// <param name="offset">offset</param>
/// <returns>Context of note</returns>
public async Task<BaseObject[]> ContextAsync(string noteId, int limit = 10, int offset = 0)
{
return await RequestArrayAsync<BaseObject>("/api/notes/context", new Dictionary<string, object>() {
{ "noteId", noteId },
{ "limit", limit },
{ "offset", offset },
});
}
/// <summary>
/// Get replies of note
/// </summary>
/// <param name="noteId">target note ID</param>
/// <param name="limit">limit (1-100)</param>
/// <param name="offset">offset</param>
/// <returns>array of replies</returns>
public async Task<Note[]> RepliesAsync(string noteId, int limit = 10, int offset = 0)
{
return await RequestArrayAsync<Note>("/api/notes/replies", new Dictionary<string, object>() {
{ "noteId", noteId },
{ "limit", limit },
{ "offset", offset },
});
}
#region create
/// <summary>
/// Create new renote
/// </summary>
/// <param name="renoteId">renote target</param>
/// <returns>Created renote</returns>
public async Task<CreateResponse> CreateRenoteAsync(string renoteId)
{
return await CreateAsync(new CreateSpec
{
RenoteId = renoteId,
});
}
/// <summary>
/// Create new note (require auth)
/// </summary>
/// <param name="text">Text</param>
/// <param name="mediaIDs">Media IDs (Optional)</param>
/// <param name="cw">Warning message (Optional)</param>
/// <param name="visibility">Visibility</param>
/// <param name="replyId">ReplyId (Optional)</param>
/// <param name="pollChpoices">Poll choices (1~49chars x 2~10)</param>
/// <returns>Created note</returns>
public async Task<CreateResponse> CreateAsync(string text, string[] mediaIDs = null, string cw = null,
VisibilityType visibility = VisibilityType.Public, string replyId = null, string[] pollChpoices = null)
{
return await CreateAsync(new CreateSpec
{
Text = text,
MediaIds = mediaIDs,
Cw = cw,
Visibility = visibility.ToString().ToLower(),
ReplyId = replyId,
});
}
/// <summary>
/// Create new note, renote or poll
/// </summary>
/// <param name="createSpec"></param>
/// <returns></returns>
public async Task<CreateResponse> CreateAsync(CreateSpec createSpec)
{
var q = new Dictionary<string, object>() {
{ "visibility", createSpec.Visibility },
{ "visibleUserIds", createSpec.VisibleUserIds },
{ "text", createSpec.Text },
{ "cw", createSpec.Cw },
{ "viaMobile", createSpec.ViaMobile },
{ "tags", createSpec.Tags },
{ "geo", createSpec.Geo },
{ "mediaIds", createSpec.MediaIds },
{ "renoteId", createSpec.RenoteId },
{ "replyId", createSpec.ReplyId },
{ "channelId", createSpec.ChannelId },
};
if (createSpec.PollChoices != null)
q.Add("poll", new { choices = createSpec.PollChoices });
var createdNote = await RequestObjectAsync<Note>("/api/notes/create", q);
return new CreateResponse { CreatedNote = createdNote };
}
public class CreateSpec
{
/// <summary>
/// "public", "home", "followers", "specified", "private"
/// </summary>
public string Visibility = "public";
/// <summary>
/// (null or ge 1)
/// </summary>
public string[] VisibleUserIds = null;
public string Text = null;
public string Cw = null;
public bool ViaMobile = false;
public string[] Tags = null;
public object Geo = null;
public string[] MediaIds = null;
public string RenoteId = null;
public string ReplyId = null;
public string ChannelId = null;
/// <summary>
/// Poll choices (1~49chars x 2~10)
/// </summary>
public string[] PollChoices = null;
}
public class CreateResponse
{
public Note CreatedNote;
}
#endregion
#region renotes
public async Task<Note[]> RenotesAsync(string noteId, int limit = 10, string sinceId = null, string untilId = null)
{
var q = new Dictionary<string, object>() {
{ "noteId", noteId },
};
return await RequestArrayLSUAsync<Note>("/api/notes/renotes",
q, limit, sinceId, untilId);
}
#endregion
#region search
#endregion
#region timeline
/// <summary>
/// Get (home)timeline require auth
/// </summary>
/// <param name="qs"></param>
/// <returns>timeline</returns>
public async Task<Note[]> TimelineAsync(TimelineQuerySpec qs)
{
return await AnyTimelineAsync("/api/notes/timeline", qs);
}
public async Task<Note[]> LocalTimelineAsync(TimelineQuerySpec qs)
{
return await AnyTimelineAsync("/api/notes/local-timeline", qs);
}
public async Task<Note[]> GlobalTimelineAsync(TimelineQuerySpec qs)
{
return await AnyTimelineAsync("/api/notes/global-timeline", qs);
}
public async Task<Note[]> ListTimelineAsync(TimelineQuerySpec qs)
{
return await AnyTimelineAsync("/api/notes/user-list-timeline", qs);
}
protected async Task<Note[]> AnyTimelineAsync(string endpoint, TimelineQuerySpec qs)
{
var q = new Dictionary<string, object>() {
{ "includeMyRenotes", qs.includeMyRenotes },
{ "includeRenotedMyNotes", qs.includeRenotedMyNotes },
{ "limit", qs.Limit },
};
if (!string.IsNullOrEmpty(qs.SinceId))
q.Add("sinceId", qs.SinceId);
else if (!string.IsNullOrEmpty(qs.UntilId))
q.Add("untilId", qs.UntilId);
else if (qs.SinceDate > 0)
q.Add("sinceDate", qs.SinceDate);
else if (qs.UntilDate > 0)
q.Add("untilDate", qs.UntilDate);
return await RequestArrayAsync<Note>(endpoint, q);
}
public class TimelineQuerySpec
{
public string SinceId { get; protected set; }
public string UntilId { get; protected set; }
public long SinceDate { get; protected set; }
public long UntilDate { get; protected set; }
public bool includeMyRenotes = true;
public bool includeRenotedMyNotes = true;
/// <summary>
/// Limit 1-100, default:10
/// </summary>
public int Limit = 10;
public static TimelineQuerySpec Default()
{
return new TimelineQuerySpec { };
}
public static TimelineQuerySpec After(DateTimeOffset dateTime)
{
return new TimelineQuerySpec { SinceDate = dateTime.ToUnixTimeMilliseconds() };
}
public static TimelineQuerySpec Before(DateTimeOffset dateTime)
{
return new TimelineQuerySpec { UntilDate = dateTime.ToUnixTimeMilliseconds() };
}
public static TimelineQuerySpec After(string id)
{
return new TimelineQuerySpec { SinceId = id };
}
public static TimelineQuerySpec Before(string id)
{
return new TimelineQuerySpec { UntilId = id };
}
}
#endregion
/// <summary>
/// auth
/// </summary>
/// <param name="following"></param>
/// <param name="limit"></param>
/// <param name="sinceId"></param>
/// <param name="untilId"></param>
/// <returns></returns>
public async Task<BaseObject[]> MentionsAsync(bool following = false, int limit = 10, string sinceId = null, string untilId = null)
{
var q = new Dictionary<string, object>() {
{ "following", following },
};
return await RequestArrayLSUAsync<BaseObject>("/api/notes/mentions",
q, limit, sinceId, untilId);
}
#region trend
public async Task<Note[]> TrendAsync(int limit = 10, int offset = 0,
bool reply = false, bool renote = false, bool media = false, bool poll = false)
{
var q = new Dictionary<string, object>() {
{ "limit", limit },
{ "offset", offset },
{ "reply", reply },
{ "renote", renote },
{ "media", media },
{ "poll", poll },
};
return await RequestArrayAsync<Note>("/api/notes/trend", q);
}
#endregion
/// <summary>
/// リアクション(いいね, しゅき など)
/// </summary>
public NotesReactions Reactions => new NotesReactions(Credential);
/// <summary>
/// お気に入り
/// </summary>
public NotesFavorites Favorites => new NotesFavorites(Credential);
/// <summary>
/// 投票
/// </summary>
public NotesPolls Polls => new NotesPolls(Credential);
/// <summary>
/// 公開範囲
/// </summary>
public enum VisibilityType
{
/// <summary>公開</summary>
Public,
/// <summary>ホーム(home only)</summary>
Home,
/// <summary>フォロワー(followers only)</summary>
Followers,
/// <summary>ダイレクト(specified users only)</summary>
Specified,
/// <summary>非公開</summary>
Private
}
}
}
| 3fedc46de57aac7ad81f1d4286c6c0532720f453 | [
"Markdown",
"C#"
] | 30 | C# | mei23/Msky | 42300c1dc7b500e754683a9fefc265af39a7cdf0 | 454debeffd88e59ea563539e57299ec2cff6b250 |
refs/heads/master | <file_sep>import Foundation
import UIKit
final class AppCoordinator: NSObject,
PhotoListViewControllerDelegate,
AddPhotoViewControllerDelegate,
UIImagePickerControllerDelegate,
UINavigationControllerDelegate
{
private weak var navigationController: UINavigationController?
private weak var photoListViewController: PhotoListViewController?
private var feedDownloader = FeedDownloader()
init(navigationController: UINavigationController) {
self.navigationController = navigationController
self.photoListViewController = navigationController.topViewController as? PhotoListViewController
}
func start() {
let photoListViewModel = PhotoListViewModel(feedDownloader: feedDownloader)
self.photoListViewController?.viewModel = photoListViewModel
self.photoListViewController?.delegate = self
}
// MARK: PhotoListViewControllerDelegate
func photoListViewControllerDidSelect(_ photo: Photo) {
let storyboard = self.photoListViewController?.storyboard
if let photoViewController = storyboard?.instantiateViewController(withIdentifier: "PhotoViewController") as? PhotoViewController {
photoViewController.photo = photo
self.navigationController?.pushViewController(photoViewController, animated: true)
}
}
func photoListViewControllerDidSelectAddPhoto() {
let sourceType = UIImagePickerController.SourceType.savedPhotosAlbum
guard UIImagePickerController.isSourceTypeAvailable(sourceType) else { return }
let imagePickerController = UIImagePickerController()
imagePickerController.sourceType = sourceType
imagePickerController.delegate = self
self.navigationController?.present(imagePickerController, animated: true)
}
// MARK: AddPhotoViewControllerDelegate
func addPhotoViewController(_ addPhotoViewController: AddPhotoViewController, didFinishWithError: Bool) {
if didFinishWithError {
let alertController = UIAlertController(title: "Invalid photo", message: "DogCommunity accepts only photos of dogs", preferredStyle: .alert)
alertController.addAction(UIAlertAction(title: "OK", style: .default, handler: { _ in
self.navigationController?.dismiss(animated: true)
}))
addPhotoViewController.present(alertController, animated: true)
} else {
self.photoListViewController?.reloadData()
self.navigationController?.dismiss(animated: true)
}
}
// MARK: UIImagePickerControllerDelegate
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
guard let image = info[.originalImage] as? UIImage else { return }
self.navigationController?.dismiss(animated: true) {
let storyboard = self.photoListViewController?.storyboard
if let addPhotoNavigationViewController = storyboard?.instantiateViewController(withIdentifier: "AddPhotoNavigationViewController") as? UINavigationController,
let addPhotoViewController = addPhotoNavigationViewController.viewControllers.first as? AddPhotoViewController {
addPhotoViewController.viewModel = AddPhotoViewModel(image: image, feedDownloader: self.feedDownloader)
addPhotoViewController.delegate = self
self.navigationController?.present(addPhotoNavigationViewController, animated: true)
}
}
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
self.navigationController?.dismiss(animated: true)
}
}
<file_sep>import UIKit
extension UIRefreshControl {
func makeSureIsRefreshing() {
if !isRefreshing {
beginRefreshing()
}
}
func makeSureIsNotRefreshing() {
if isRefreshing {
endRefreshing()
}
}
}
<file_sep># DogCommunity
This is a sample app for my CoreML talk. It is an app for dog owners to share photos of their dogs. This app doesn't use any real backend. It's main functionality (apart from showing and adding photos) is verifying if the added photo is a dog. It will reject cat photos.
### Prerequisites
You need the app and some photos of dogs and cats on your device or simulator.
### CoreML
Model was created using [CreateML]() based on [the Kaggle dataset](https://www.kaggle.com/c/dogs-vs-cats/data).
This is the code responsible for the classification:
```swift
let model = try VNCoreMLModel(for: CatOrDog().model)
let request = VNCoreMLRequest(model: model) { request, error in
self.processRequest(request, completion: completion)
}
let handler = VNImageRequestHandler(cgImage: image.cgImage!)
try handler.perform([request])
```
## License
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details
## Acknowledgments
* App icon is made by Freepik from [www.flaticon.com](https://www.flaticon.com)
* Dog photos are part of [Kaggle competition Dogs vs. Cats](https://www.kaggle.com/c/dogs-vs-cats)
<file_sep>import Quick
import Nimble
@testable import DogCommunity
class UIRefreshControlUtilitiesSpec: QuickSpec {
class MockUIRefreshControl: UIRefreshControl {
private(set) var beginRefreshingCalled = false
private(set) var endRefreshingCalled = false
var isRefreshingOverride: Bool = false
override var isRefreshing: Bool {
get {
return isRefreshingOverride
}
}
override func beginRefreshing() {
beginRefreshingCalled = true
}
override func endRefreshing() {
endRefreshingCalled = true
}
}
override func spec() {
var sut: MockUIRefreshControl!
beforeEach {
sut = MockUIRefreshControl()
}
describe("makeSureIsRefreshing") {
context("refreshing is in progress") {
beforeEach {
sut.isRefreshingOverride = true
sut.makeSureIsRefreshing()
}
it("doesn't call beginRefreshing") {
expect(sut.beginRefreshingCalled).to(beFalsy())
}
it("doesn't call endRefreshing") {
expect(sut.endRefreshingCalled).to(beFalsy())
}
}
context("refreshing is not in progress") {
beforeEach {
sut.makeSureIsRefreshing()
}
it("calls beginRefreshing") {
expect(sut.beginRefreshingCalled).to(beTruthy())
}
it("doesn't call endRefreshing") {
expect(sut.endRefreshingCalled).to(beFalsy())
}
}
}
describe("makeSureIsNotRefreshing") {
context("refreshing is in progress") {
beforeEach {
sut.isRefreshingOverride = true
sut.makeSureIsNotRefreshing()
}
it("doesn't call beginRefreshing") {
expect(sut.beginRefreshingCalled).to(beFalsy())
}
it("calls endRefreshing") {
expect(sut.endRefreshingCalled).to(beTruthy())
}
}
context("refreshing is not in progress") {
beforeEach {
sut.makeSureIsNotRefreshing()
}
it("doesn't call beginRefreshing") {
expect(sut.beginRefreshingCalled).to(beFalsy())
}
it("doesn't call endRefreshing") {
expect(sut.endRefreshingCalled).to(beFalsy())
}
}
}
}
}
<file_sep>import Foundation
protocol PhotoListViewModelDelegate: class {
func viewModelDidChangeStateTo(_ viewState: PhotoListViewModel.ViewState)
}
class PhotoListViewModel {
enum ViewState {
case `init`, loading, empty, ready
}
private(set) var photos = [Photo]()
private var viewState = ViewState.`init` {
didSet {
self.delegate?.viewModelDidChangeStateTo(viewState)
}
}
weak var delegate: PhotoListViewModelDelegate? {
didSet {
self.delegate?.viewModelDidChangeStateTo(viewState)
}
}
private var feedDownloader: FeedDownloader
init(feedDownloader: FeedDownloader = FeedDownloader()) {
self.feedDownloader = feedDownloader
}
var numberOfItems: Int {
return photos.count
}
func load() {
viewState = .loading
DispatchQueue.global(qos: .userInitiated).async {
self.photos = self.feedDownloader.downloadFeed()
self.viewState = self.photos.isEmpty ? .empty : .ready
}
}
}
<file_sep>import Quick
import Nimble
@testable import DogCommunity
class AppCoordinatorSpec: QuickSpec {
class MockUINavigationController: UINavigationController {
var passedViewController: UIViewController?
var passedAnimatedParam: Bool?
override func pushViewController(_ viewController: UIViewController, animated: Bool) {
passedViewController = viewController
passedAnimatedParam = animated
}
}
class MockUIStoryboard: UIStoryboard {
var viewController: UIViewController!
private(set) var passedIdentifier: String?
override func instantiateViewController(withIdentifier identifier: String) -> UIViewController {
passedIdentifier = identifier
return viewController
}
}
class MockPhotoListViewController: PhotoListViewController {
var storyboardOverride: UIStoryboard?
override var storyboard: UIStoryboard? {
return storyboardOverride
}
}
override func spec() {
var mockNavigationController: MockUINavigationController!
var mockPhotoListViewController: MockPhotoListViewController!
var sut: AppCoordinator!
beforeEach {
mockNavigationController = MockUINavigationController()
mockPhotoListViewController = MockPhotoListViewController()
mockNavigationController.viewControllers = [mockPhotoListViewController]
sut = AppCoordinator(navigationController: mockNavigationController)
}
describe("starting the app") {
beforeEach {
sut.start()
}
it("sets the photo list view controller view model") {
expect(mockPhotoListViewController.viewModel).to(beAKindOf(PhotoListViewModel.self))
}
it("sets itself as an delegate of the photo list view controller") {
expect(mockPhotoListViewController.delegate).to(beIdenticalTo(sut))
}
}
describe("PhotoListViewControllerDelegate") {
it("implements this protocol") {
expect(sut).to(beAKindOf(PhotoListViewControllerDelegate.self))
}
describe("photoListViewControllerDidSelect(_:)") {
var photo: Photo!
var mockStoryboard: MockUIStoryboard!
var photoViewController: PhotoViewController!
beforeEach {
photo = Photo(URL: URL(string: "https://google.com")!, title: nil, author: nil, aspectRatio: nil)
mockStoryboard = MockUIStoryboard()
photoViewController = PhotoViewController()
mockPhotoListViewController.storyboardOverride = mockStoryboard
mockStoryboard.viewController = photoViewController
sut.photoListViewControllerDidSelect(photo)
}
it("pushes correct view controller") {
expect(mockNavigationController.passedViewController).to(beIdenticalTo(photoViewController))
}
it("makes the push with animation") {
expect(mockNavigationController.passedAnimatedParam).to(beTruthy())
}
it("instantiates view controller with correct identifier") {
expect(mockStoryboard.passedIdentifier).to(equal("PhotoViewController"))
}
it("sets the photo on the view controller") {
expect(photoViewController.photo).to(equal(photo))
}
}
}
}
}
<file_sep>import Foundation
fileprivate func url(of photoName: String) -> URL {
return Bundle.main.url(forResource: photoName, withExtension: "jpg", subdirectory: "photos")!
}
class FeedDownloader {
var photos: [Photo] = []
init() {
}
func downloadFeed() -> [Photo] {
self.photos = [
Photo(URL: url(of: "dog.0"),
title: "Bella",
author: "Oliver",
aspectRatio: 1.33),
Photo(URL: url(of: "dog.1"),
title: "Bailey",
author: "George",
aspectRatio: 0.65),
Photo(URL: url(of: "dog.2"),
title: "Lucy",
author: "Chloe",
aspectRatio: 0.93),
Photo(URL: url(of: "dog.3"),
title: "Max",
author: "Jack",
aspectRatio: 1.33),
Photo(URL: url(of: "dog.4"),
title: "Molly",
author: "Jacob",
aspectRatio: 1.04),
Photo(URL: url(of: "dog.5"),
title: "Buddy",
author: "Emily",
aspectRatio: 1.32),
Photo(URL: url(of: "dog.6"),
title: "Daisy",
author: "Charlie",
aspectRatio: 1.02),
Photo(URL: url(of: "dog.7"),
title: "Rocky",
author: "Megan",
aspectRatio: 1.13),
Photo(URL: url(of: "dog.8"),
title: "Maggie",
author: "Jessica",
aspectRatio: 0.93),
Photo(URL: url(of: "dog.9"),
title: "Jake",
author: "Harry",
aspectRatio: 0.73),
]
return self.photos
}
}
<file_sep>import Foundation
struct Photo: Equatable {
let URL: URL
let title: String?
let author: String?
let aspectRatio: Float?
}
<file_sep>import UIKit
import AlamofireImage
class PhotoViewController: UIViewController {
@IBOutlet var imageView: UIImageView!
var photo: Photo!
override func viewDidLoad() {
super.viewDidLoad()
title = photo.title
imageView.af.setImage(withURL: photo.URL)
}
}
<file_sep>import Quick
import Nimble
@testable import DogCommunity
class PhotoListViewModelSpec: QuickSpec {
class MockPhotoListViewModelDelegate: PhotoListViewModelDelegate {
private(set) var viewStates: [PhotoListViewModel.ViewState] = []
func viewModelDidChangeStateTo(_ viewState: PhotoListViewModel.ViewState) {
self.viewStates.append(viewState)
}
}
class MockFeedDownloader: FeedDownloader {
override func downloadFeed() -> [Photo] {
return photos
}
}
override func spec() {
var mockFeedDownloader: MockFeedDownloader!
var mockPhotoListViewModelDelegate: MockPhotoListViewModelDelegate!
var sut: PhotoListViewModel!
beforeEach {
mockFeedDownloader = MockFeedDownloader()
sut = PhotoListViewModel(feedDownloader: mockFeedDownloader)
mockPhotoListViewModelDelegate = MockPhotoListViewModelDelegate()
sut.delegate = mockPhotoListViewModelDelegate
}
describe("photo loading") {
context("not started yet") {
it("has state init") {
expect(mockPhotoListViewModelDelegate.viewStates).to(equal([.`init`]))
}
it("has no items") {
expect(sut.numberOfItems).to(equal(0))
}
it("has empty photo list") {
expect(sut.photos).to(beEmpty())
}
}
context("no photos loaded") {
beforeEach {
sut.load()
}
it("has moved through all states and returned empty results") {
expect(mockPhotoListViewModelDelegate.viewStates).toEventually(equal([.`init`, .loading, .empty]))
expect(sut.numberOfItems).to(equal(0))
expect(sut.photos).to(beEmpty())
}
}
context("some photos loaded") {
beforeEach {
mockFeedDownloader.photos = [Photo(URL: URL(string: "https://google.com")!, title: nil, author: nil, aspectRatio: nil)]
sut.load()
}
it("has state init") {
expect(mockPhotoListViewModelDelegate.viewStates).toEventually(equal([.`init`, .loading, .ready]))
}
it("has no items") {
expect(sut.numberOfItems).toEventually(equal(1))
}
it("has empty photo list") {
expect(sut.photos).toEventually(equal(mockFeedDownloader.photos))
}
}
}
}
}
<file_sep>import UIKit
protocol AddPhotoViewControllerDelegate: class {
func addPhotoViewController(_ addPhotoViewController: AddPhotoViewController, didFinishWithError: Bool)
}
class AddPhotoViewController: UIViewController {
@IBOutlet private var imageView: UIImageView!
@IBOutlet private var nameTextField: UITextField!
@IBOutlet private var ownerTextField: UITextField!
var viewModel: AddPhotoViewModel!
weak var delegate: AddPhotoViewControllerDelegate?
override func viewDidLoad() {
super.viewDidLoad()
imageView.image = viewModel.image
let nameLabel = UILabel(frame: CGRect(x: 0, y: 0, width: 25, height: 20))
nameLabel.text = "🐶"
nameTextField.leftView = nameLabel
nameTextField.leftViewMode = .always
let ownerLabel = UILabel(frame: CGRect(x: 0, y: 0, width: 25, height: 20))
ownerLabel.text = "🧑"
ownerTextField.leftView = ownerLabel
ownerTextField.leftViewMode = .always
}
// MARK: IBActions
@IBAction func cancelBarButtonItemAction() {
self.delegate?.addPhotoViewController(self, didFinishWithError: false)
}
@IBAction func doneBarButtonItemAction() {
viewModel.addImage(title: nameTextField.text, owner: ownerTextField.text) { success in
self.delegate?.addPhotoViewController(self, didFinishWithError: !success)
}
}
}
<file_sep>import UIKit
import Vision
class AddPhotoViewModel {
private let feedDownloader: FeedDownloader
let image: UIImage
init(image: UIImage, feedDownloader: FeedDownloader = FeedDownloader()) {
self.image = image
self.feedDownloader = feedDownloader
}
func addImage(title: String?, owner: String?, completion: @escaping (Bool) -> Void) {
verifyPhoto { valid in
if valid, let photo = self.createPhoto(title: title, author: owner) {
self.feedDownloader.photos.insert(photo, at: 0)
completion(true)
} else {
completion(false)
}
}
}
private func createPhoto(title: String?, author: String?) -> Photo? {
do {
guard let data = image.pngData() else { return nil }
let documentDirURL = try FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true)
let fileName = UUID().uuidString
let fileURL = documentDirURL.appendingPathComponent(fileName).appendingPathExtension("png")
try data.write(to: fileURL)
let photo = Photo(URL: fileURL, title: title, author: author, aspectRatio: Float(image.size.width / image.size.height))
return photo
} catch {
print(error)
}
return nil
}
private func verifyPhoto(_ completion: @escaping (Bool) -> Void) {
DispatchQueue.global().async {
do {
let model = try VNCoreMLModel(for: CatOrDog().model)
let request = VNCoreMLRequest(model: model)
let handler = VNImageRequestHandler(cgImage: self.image.cgImage!)
try handler.perform([request])
if let classification = request.results?.first as? VNClassificationObservation,
classification.identifier == "Dog" {
DispatchQueue.main.async { completion(true) }
} else {
throw NSError(domain: "dogcommunity.photo.cat", code: 0)
}
} catch {
DispatchQueue.main.async {
completion(false)
}
}
}
}
}
<file_sep>import UIKit
import AlamofireImage
import FontAwesome_swift
class PhotoListCollectionViewCell: UICollectionViewCell {
private static var placeholderImage = UIImage.fontAwesomeIcon(name: .image, style: .solid, textColor: .lightGray, size: CGSize(width: 100, height: 100))
@IBOutlet private var imageView: UIImageView!
@IBOutlet private var placeholderImageView: UIImageView!
@IBOutlet private var titleLabel: UILabel!
@IBOutlet private var authorLabel: UILabel!
override func awakeFromNib() {
super.awakeFromNib()
placeholderImageView.image = PhotoListCollectionViewCell.placeholderImage
}
override func prepareForReuse() {
placeholderImageView.alpha = 1
imageView.image = nil
titleLabel.text = nil
authorLabel.text = nil
}
func configure(with photo: Photo) {
titleLabel.text = photo.title.map { "🐶 " + $0 }
authorLabel.text = photo.author.map { "🧑 " + $0 }
let imageTransition = UIImageView.ImageTransition.custom(duration: 0.5, animationOptions: .transitionCrossDissolve, animations: { imageView, image in
imageView.image = image
self.placeholderImageView.alpha = 0
}, completion: nil)
self.imageView.af.setImage(withURL: photo.URL, imageTransition: imageTransition) { _ in
self.placeholderImageView.alpha = 0
}
}
}
<file_sep>import UIKit
import CHTCollectionViewWaterfallLayout
private let cellNibName = "PhotoListCollectionViewCell"
protocol PhotoListViewControllerDelegate: class {
func photoListViewControllerDidSelect(_ photo: Photo)
func photoListViewControllerDidSelectAddPhoto()
}
class PhotoListViewController: UIViewController, UICollectionViewDataSource, CHTCollectionViewDelegateWaterfallLayout, PhotoListViewModelDelegate {
@IBOutlet private var collectionView: UICollectionView!
@IBOutlet private var emptyView: UIView!
@IBOutlet private var refreshControl: UIRefreshControl!
var viewModel: PhotoListViewModel!
weak var delegate: PhotoListViewControllerDelegate?
override func viewDidLoad() {
super.viewDidLoad()
let layout = CHTCollectionViewWaterfallLayout()
layout.columnCount = columnCount
layout.minimumColumnSpacing = 10
layout.minimumInteritemSpacing = 10
layout.sectionInset = UIEdgeInsets(top: 10, left: 10, bottom: 10, right: 10)
collectionView.collectionViewLayout = layout
collectionView.register(UINib(nibName: cellNibName, bundle: nil), forCellWithReuseIdentifier: cellNibName)
collectionView.refreshControl = refreshControl
viewModel.delegate = self
reloadData()
}
override func traitCollectionDidChange(_ previousTraitCollection: UITraitCollection?) {
if let layout = self.collectionView.collectionViewLayout as? CHTCollectionViewWaterfallLayout {
layout.columnCount = columnCount
}
}
private var columnCount: Int {
if self.traitCollection.horizontalSizeClass == .regular {
return 3
} else if self.traitCollection.verticalSizeClass == .compact {
return 4
}
return 2
}
// MARK: Public
func reloadData() {
viewModel.load()
}
// MARK: IBActions
@IBAction private func refreshControlValueChanged() {
reloadData()
}
@IBAction private func addBarButtonItemAction() {
self.delegate?.photoListViewControllerDidSelectAddPhoto()
}
// MARK: UICollectionViewDataSource
func numberOfSections(in collectionView: UICollectionView) -> Int {
return 1
}
func collectionView(_ collectionView: UICollectionView, numberOfItemsInSection section: Int) -> Int {
return viewModel.numberOfItems
}
func collectionView(_ collectionView: UICollectionView, cellForItemAt indexPath: IndexPath) -> UICollectionViewCell {
let cell = collectionView.dequeueReusableCell(withReuseIdentifier: cellNibName, for: indexPath) as! PhotoListCollectionViewCell
let photo = viewModel.photos[indexPath.row]
cell.configure(with: photo)
return cell
}
// MARK: UICollectionViewDelegate
func collectionView(_ collectionView: UICollectionView, didSelectItemAt indexPath: IndexPath) {
let photo = viewModel.photos[indexPath.row]
self.delegate?.photoListViewControllerDidSelect(photo)
}
// MARK: CHTCollectionViewDelegateWaterfallLayout
func collectionView(_ collectionView: UICollectionView!, layout collectionViewLayout: UICollectionViewLayout!, sizeForItemAt indexPath: IndexPath!) -> CGSize {
let photo = viewModel.photos[indexPath.row]
guard let aspectRatio = photo.aspectRatio else {
return .zero
}
return CGSize(width: CGFloat(aspectRatio), height: 1)
}
// MARK: PhotoListViewModelDelegate
func viewModelDidChangeStateTo(_ viewState: PhotoListViewModel.ViewState) {
DispatchQueue.main.async {
switch viewState {
case .`init`:
self.emptyView.isHidden = true
self.refreshControl.makeSureIsNotRefreshing()
case .loading:
self.emptyView.isHidden = true
self.refreshControl.makeSureIsRefreshing()
case .empty:
self.emptyView.isHidden = false
self.refreshControl.makeSureIsNotRefreshing()
case .ready:
self.emptyView.isHidden = true
self.collectionView.reloadData()
self.refreshControl.makeSureIsNotRefreshing()
}
}
}
}
<file_sep>source 'https://github.com/CocoaPods/Specs.git'
platform :ios, '13.0'
inhibit_all_warnings!
target 'DogCommunity' do
use_frameworks!
pod 'AlamofireImage', '4.0.3'
pod 'FontAwesome.swift', '1.8.3'
pod 'CHTCollectionViewWaterfallLayout', '0.9.8'
pod 'IQKeyboardManagerSwift', '6.5.5'
target 'DogCommunityTests' do
inherit! :search_paths
pod 'Quick', '2.2.0'
pod 'Nimble', '8.0.5'
end
end
| 01c63a02b924c8f88aa448719c35121fbe0d8e3b | [
"Swift",
"Ruby",
"Markdown"
] | 15 | Swift | piotrtobolski/DogCommunity | b36552e066f896ac676b48dc2650f53c2ecc3fd9 | 7202b2018036422f12d130fa3e9c086f081ff24f |
refs/heads/master | <file_sep>import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='tf2-initializer',
version='0.5',
scripts=['tf2-initializer'],
author='<NAME>',
author_email='<EMAIL>',
description='A tf2 project initializer',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
'importlib_resources'
],
url='https://github.com/zankner/tf2-initializer',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: Unix'
]
)
<file_sep>#!/usr/bin/env python
import os
from importlib_resources import path
import shutil
import actuation
import preprocess
import models
dir_name = input('What is the name of your project (return to use current dir): ')
if not os.path.exists(os.path.join(os.getcwd(), dir_name)):
os.mkdir(os.path.join(os.getcwd(), dir_name))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'actuation')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'actuation'))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'models')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'models'))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'preprocess')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'preprocess'))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'checkpoints')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'checkpoints'))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'data')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'data'))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'data', 'train')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'data', 'train'))
if not os.path.exists(os.path.join(os.getcwd(), dir_name, 'data', 'val')):
os.mkdir(os.path.join(os.getcwd(), dir_name, 'data', 'val'))
with path(actuation, 'train.py') as train:
shutil.copy(train,
os.path.join(os.getcwd(), dir_name, 'actuation', 'train.py'))
with path(models, 'network.py') as network:
shutil.copy(network,
os.path.join(os.getcwd(), dir_name, 'models', 'network.py'))
with path(preprocess, 'process.py') as process:
shutil.copy(process,
os.path.join(os.getcwd(), dir_name, 'preprocess', 'process.py'))
<file_sep># tf2-initializer
Project sturcture to initialize tf2 projects
## Install
To install the current version run the following:
`pip install tf2-initializer`
This package currently only supports python versions >= 3.7.
## Usage
Running tf2-initializer will generate a project structure for building a custom subclassed model in tensorflow 2.0.
This project includes support for pre-processing data, defining a model, and training a model.
To run the initializer enter the following into your terminal:
`tf2-initializer`
Running the above will prompt you to enter the name of the directory where you would like the tf2-initializer to be spawned.
- If you would like to use the current directory as the root of your project, simply hit return
| 005e0cc75539841ea86b3a78a980510ec1a87a48 | [
"Markdown",
"Python"
] | 3 | Python | zankner/tf2-initializer | 00dc6e0c982546780762934420f4fd4d7d429b06 | aaaf896124be278b3795f3aa0697afb93b3aa2e6 |
refs/heads/master | <file_sep><?php
namespace Spy\TimelineBundle\Entity;
use Spy\Timeline\Model\Timeline as BaseTimeline;
/**
* Timeline entity for Doctrine ORM.
*
* @uses BaseTimeline
* @author <NAME> <<EMAIL>>
*/
class Timeline extends BaseTimeline
{
}
<file_sep>SpyTimelineBundle 2.0
=====================
Integrate [timeline](https://github.com/stephpy/timeline) on Symfony2.
[Milestone for release](https://github.com/stephpy/TimelineBundle/issues?milestone=1&page=1&state=open)
==============================
Supports 2.* Symfony Framework.
[](http://travis-ci.org/stephpy/TimelineBundle)
Build timeline/wall for an entity easily. [Demo Application](https://github.com/stephpy/timeline-app)
There is too a notification system, you can easily know how many unread notifications you have, mark as read one/all, etc ...
[Read the Documentation](https://github.com/stephpy/TimelineBundle/blob/master/Resources/doc/index.markdown)
Launch tests:
```
composer install --dev
bin/atoum -d Tests/Units
```
<file_sep><?php
namespace Spy\TimelineBundle\Document;
use Spy\Timeline\Model\ActionComponent as BaseActionComponent;
/**
* ActionComponent Document for Doctrine ODM.
*
* @uses BaseActionComponent
* @author <NAME> <<EMAIL>>
*/
class ActionComponent extends BaseActionComponent
{
}
<file_sep><?php
namespace Spy\TimelineBundle\Document;
use Spy\Timeline\Model\Timeline as BaseTimeline;
/**
* Timeline Document for Doctrine ODM.
*
* @uses BaseTimeline
* @author <NAME> <<EMAIL>>
*/
class Timeline extends BaseTimeline
{
}
<file_sep><?php
namespace Spy\TimelineBundle\DependencyInjection\Compiler;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface;
/**
* AddRegistryCompilerPass
*
* @uses CompilerPassInterface
* @author <NAME> <<EMAIL>>
*/
class AddRegistryCompilerPass implements CompilerPassInterface
{
/**
* {@inheritdoc}
*/
public function process(ContainerBuilder $container)
{
foreach (array('orm', 'odm') as $driver) {
$id = sprintf('spy_timeline.action_manager.%s', $driver);
if ($container->hasDefinition($id)) {
$actionManager = $container->getDefinition($id);
foreach (array('doctrine', 'doctrine_mongodb') as $id) {
if ($container->hasDefinition($id)) {
$registry = $container->getDefinition($id);
$actionManager->addMethodCall('addRegistry', array($registry));
}
}
}
}
}
}
<file_sep><?php
namespace Spy\TimelineBundle\Entity;
use Spy\Timeline\Model\ActionComponent as BaseActionComponent;
/**
* ActionComponent entity for Doctrine ORM.
*
* @uses BaseActionComponent
* @author <NAME> <<EMAIL>>
*/
class ActionComponent extends BaseActionComponent
{
}
<file_sep><?php
namespace Spy\TimelineBundle\Entity;
use Spy\Timeline\Model\Component as BaseComponent;
/**
* Component entity for Doctrine ORM.
*
* @uses BaseComponent
* @author <NAME> <<EMAIL>>
*/
class Component extends BaseComponent
{
}
<file_sep><?php
namespace Spy\TimelineBundle\Driver\Doctrine;
use Doctrine\Common\Persistence\ObjectManager;
use Doctrine\Common\Persistence\ManagerRegistry;
use Spy\Timeline\Model\ActionInterface;
use Spy\Timeline\ResultBuilder\ResultBuilderInterface;
use Spy\Timeline\Driver\AbstractActionManager as BaseActionManager;
/**
* AbstractActionManager
*
* @author <NAME> <<EMAIL>>
*/
abstract class AbstractActionManager extends BaseActionManager
{
/**
* @var ObjectManager
*/
protected $objectManager;
/**
* @var ResultBuilderInterface
*/
protected $resultBuilder;
/**
* @var string
*/
protected $actionClass;
/**
* @var string
*/
protected $componentClass;
/**
* @var string
*/
protected $actionComponentClass;
/**
* @var array
*/
protected $registries;
/**
* @param ObjectManager $objectManager objectManager
* @param ResultBuilderInterface $resultBuilder resultBuilder
* @param string $actionClass actionClass
* @param string $componentClass componentClass
* @param string $actionComponentClass actionComponentClass
*/
public function __construct(ObjectManager $objectManager, ResultBuilderInterface $resultBuilder, $actionClass, $componentClass, $actionComponentClass)
{
$this->objectManager = $objectManager;
$this->resultBuilder = $resultBuilder;
$this->actionClass = $actionClass;
$this->componentClass = $componentClass;
$this->actionComponentClass = $actionComponentClass;
$this->registries = array();
}
/**
* {@inheritdoc}
*/
public function updateAction(ActionInterface $action)
{
$this->objectManager->persist($action);
$this->objectManager->flush();
$this->deployActionDependOnDelivery($action);
}
/**
* {@inheritdoc}
*/
public function createComponent($model, $identifier = null, $flush = true)
{
list ($model, $identifier, $data) = $this->resolveModelAndIdentifier($model, $identifier);
if (empty($model) || null === $identifier || '' === $identifier) {
if (is_array($identifier)) {
$identifier = implode(', ', $identifier);
}
throw new \Exception(sprintf('To create a component, you have to give a model (%s) and an identifier (%s)', $model, $identifier));
}
$component = new $this->componentClass();
$component->setModel($model);
$component->setData($data);
$component->setIdentifier($identifier);
$this->objectManager->persist($component);
if ($flush) {
$this->flushComponents();
}
return $component;
}
/**
* {@inheritdoc}
*/
public function flushComponents()
{
$this->objectManager->flush();
}
/**
* {@inheritdoc}
*/
public function addRegistry(ManagerRegistry $registry)
{
$this->registries[] = $registry;
}
/**
* resolveModelAndIdentifier
*
* @param mixed $model model
* @param mixed $identifier identifier
*
* @return array(string, array|string)
*/
protected function resolveModelAndIdentifier($model, $identifier)
{
if (!is_object($model) && (null === $identifier || '' === $identifier)) {
throw new \LogicException('Model has to be an object or a scalar + an identifier in 2nd argument');
}
$data = null;
if (is_object($model)) {
$data = $model;
$modelClass = get_class($model);
$metadata = $this->getClassMetadata($modelClass);
// if object is linked to doctrine
if (null !== $metadata) {
$fields = $metadata->getIdentifier();
if (!is_array($fields)) {
$fields = array($fields);
}
$many = count($fields) > 1;
$identifier = array();
foreach ($fields as $field) {
$getMethod = sprintf('get%s', ucfirst($field));
$value = (string) $model->{$getMethod}();
//Do not use it: https://github.com/stephpy/TimelineBundle/issues/59
//$value = (string) $metadata->reflFields[$field]->getValue($model);
if (empty($value)) {
throw new \Exception(sprintf('Field "%s" of model "%s" return an empty result, model has to be persisted.', $field, $modelClass));
}
$identifier[$field] = $value;
}
if (!$many) {
$identifier = current($identifier);
}
$model = $metadata->name;
} else {
if (!method_exists($model, 'getId')) {
throw new \LogicException('Model must have a getId method.');
}
$identifier = $model->getId();
$model = $modelClass;
}
}
if (is_scalar($identifier)) {
$identifier = (string) $identifier;
} elseif (!is_array($identifier)) {
throw new \InvalidArgumentException('Identifier has to be a scalar or an array');
}
return array($model, $identifier, $data);
}
protected function getClassMetadata($class)
{
foreach ($this->registries as $registry) {
if ($manager = $registry->getManagerForClass($class)) {
return $manager->getClassMetadata($class);
}
}
return null;
}
}
| 085dbc34bfbb3d0f9c5ad567b840a119130ee1a0 | [
"Markdown",
"PHP"
] | 8 | PHP | infogenie/TimelineBundle | ed8359f9ca457f6f961a3f779385de6023521c0e | 685cac8872f76830104f129d1fe69453b533a4cf |
refs/heads/main | <file_sep>let c=true;
const profile = () =>{
if(c){
document.getElementById('DP').style.display='block';
c=false;
}else {
c=true;
document.getElementById('DP').style.display='none';
}
}
console.log('KKKKKKKKKk')
const renderData = () => {
let user = JSON.parse(sessionStorage.getItem('userData'))
document.getElementById('name').innerHTML='Name : ' + user.name;
document.getElementById('email').innerHTML='Email : ' + user.email;
document.getElementById('accountNumber').innerHTML='Account Number: ' + user.accountNumber;
document.getElementById('balance').innerHTML='Balance : ' + user.balance;
document.getElementById('welcome').innerHTML = 'Welcome ' + user.name;
}
const userData = ()=> {
let user = document.getElementById('user');
let userData = user.innerHTML
let allUsers = document.getElementById('allUsers');
let allUsersArray = allUsers.innerHTML
console.log(userData)
sessionStorage.setItem('userData',userData)
sessionStorage.setItem('allUsersArray',allUsersArray)
user.remove()
allUsers.remove()
renderData();
}
const logout = () => {
sessionStorage.clear();
}
// let c=1;
// document.getElementById('profile').addEventListener('click',()=> {
// if(c){
// document.getElementById('DP').style.display='none';
// c=0;
// }else {
// c=1;
// document.getElementById('DP').style.display='none';
// }
// })<file_sep>module.exports={
MongoURI : 'mongodb+srv://budhranikunal9:Kunal@2001@bank123.0lmct.mongodb.net/Sparks?retryWrites=true&w=majority'
}<file_sep>const allCustomers = () => {
const allCustomers = JSON.parse(sessionStorage.getItem('allUsersArray'))
const customerBody = document.getElementById('customerBody')
let html = ""
console.log(allCustomers)
allCustomers.forEach((customer)=>{
html+=`<tr>
<td>
${customer.name}
</td>
<td>
${customer.email}
</td>
<td>
${customer.accountNumber}
</td>
<td>
<button onclick="payRedirect(${customer.accountNumber})">Pay</button>
</td>
</tr>`
})
customerBody.innerHTML = html;
}
const payRedirect = ( accountNumber ) => {
sessionStorage.setItem('accountNumber', accountNumber);
window.location.href= "/pay"
// windows.location.href = "/pay"
}<file_sep>const express = require('express');
const mongoose = require('mongoose');
const flash = require('connect-flash');
const session = require('express-session');
const passport =require('passport');
const app = express();
//DB Config
const db = require('./config/keys').MongoURI
//Connect Mongo
mongoose.connect(db, {useNewUrlParser : true , useUnifiedTopology : true , useFindAndModify: false }).then(()=>console.log('MongoDB Connected')).catch(err=>console.log(err));
//Passport Config
require('./config/passport')(passport);
//ejs
app.set('view engine','ejs');
//To include script in js
app.use(express.static(__dirname));
//MIDDLE WARES
// BodyParser
app.use(express.urlencoded({extended: false}));
app.use(express.json({extended : false}))
//Express session
app.use(session({
secret:'secret',
resave:true,
saveUninitialized:true
}))
//Passport Middleware
app.use(passport.initialize());
app.use(passport.session());
//connect flash
app.use(flash());
//GlobalVars
app.use((req,res,next)=>{
res.locals.success_msg = req.flash('success_msg');
res.locals.error_msg = req.flash('error_msg');
res.locals.error = req.flash('error');
next();
})
//Routes
app.use('/',require('./routes/index'));
app.use('/users',require('./routes/users'));
const Port = process.env.PORT || 3000;
app.listen(Port , console.log('Hey there you are on port 3000'));<file_sep>let user = JSON.parse(sessionStorage.getItem('userData'));
document.getElementById('balance').innerHTML = 'Balance in your account is ' + user.balance
document.getElementById('user').value = sessionStorage.getItem('userData');
const getAccountNumber = () => {
const accountNumber = parseInt(sessionStorage.getItem('accountNumber'))
document.getElementById('accountNumber').value = accountNumber;
}
// const fetchData = () => {
// let accountNumber = document.getElementById('accountNumber').value;
// let amount = document.getElementById('amount').value;
// let user = JSON.parse(sessionStorage.getItem('userData'))
// fetch('http://localhost:3000/pay', {
// method: "POST",
// headers: {
// 'Content-Type' : 'application/json'
// },
// body: {
// accountNumber : accountNumber,
// amount : amount,
// user : user
// }
// })
// }<file_sep>const express = require('express');
const router = express.Router();
const {ensureAuthenticated} = require('../config/auth')
const fetch = require('node-fetch');
//User Model
const User = require('../models/User');
//Welcome
router.get('/',(req,res)=>{
res.render('welcome');
});
//Dashboard
router.get('/dashboard',(req,res)=>{
let allUsers
User.find({ })
.then( user => {
// console.log(user)
allUsers = user
// console.log(allUsers,'^^^^^^^^^^^');
res.render('dashboard',{
user : JSON.stringify(req.user),
allUsers : JSON.stringify(allUsers)
});
})
// console.log(allUsers,'************');
});
//Payment
router.get('/pay',(req,res)=> {
res.render('pay');
})
//Payment Handel
router.post('/pay',(req,res)=> {
const {accountNumber,amount,user} = req.body;
let errors = [];
// let selfUser = req.user;
// console.log(JSON.stringify(user));
// console.log(user);
if(!user) {
errors.push({msg : 'Login To Pay'})
return res.render('login', {errors})
}
let balance = JSON.parse(user).balance;
// console.log(balance);
let selfAccountNumber = JSON.parse(user).accountNumber
if(!accountNumber || !amount){
errors.push({msg:'Fill All Fields'});
}
if(amount>balance) {
errors.push({msg:'Insufficient Balance'});
}
if(amount<0) {
errors.push({msg:'Amount Cannot Be Negative'});
}
if(errors.length>0) {
res.render('pay',{errors});
}
else{
User.findOne({accountNumber : accountNumber})
.then(user=> {
if(!user) {
errors.push({msg:'Account Number does not exist.'})
res.render('pay',{errors})
}
else{
let updateBalance= parseInt(user.balance)+parseInt(amount); //why to use parseint?
// console.log(updateBalance)
let selfUpdateBalance = parseInt(balance)-parseInt(amount);
// console.log(selfUpdateBalance)
let date = new Date().toLocaleString("en-IN", { timeZone: "Asia/Kolkata" })
console.log(date);
let recipt = {
dateAndTime : date,
status : 'Credit',
amount : amount
}
let selfRecipt = {
dateAndTime : date,
status : 'Debit',
amount : amount
}
updateHistory=user.history.push()
User.findOneAndUpdate({accountNumber:accountNumber},{balance:updateBalance})
.then(user=> {
let userHistory = user.history;
userHistory.push(recipt)
// console.log(user.history)
// console.log(userHistory)
User.updateOne({accountNumber:accountNumber},{history: userHistory})
.then(user => {
// console.log(user.history)
})
})
.catch(err => console.log(err))
User.findOneAndUpdate({accountNumber:selfAccountNumber},{balance:selfUpdateBalance})
.then(user=> {
let userHistory = user.history;
userHistory.push(selfRecipt)
User.updateOne({accountNumber: selfAccountNumber},{history: userHistory})
.then(user => {
// console.log(user.history)
})
req.flash('success_msg','Payment Successfull');
// errors.push({msg : 'Payment successfull'})
res.redirect('/dashboard')
})
.catch(err => console.log(err))
}
})
}
})
//All Customers
router.get('/allCustomers', (req,res) => {
res.render('allCustomers')
})
//History
router.get('/history',(req,res) => {
res.render('history')
})
module.exports = router; | a581a463164328f157330daa822a5c55f2db712e | [
"JavaScript"
] | 6 | JavaScript | Kunal-Budhrani/Sparks-Bank | f0de7cf9aac0656740d55a68b76a4abbbe3ea7fb | 049206ae677f797350b78d5fcb1409ee4fc11565 |
refs/heads/master | <repo_name>pnisarg/selfCheckOutSystem<file_sep>/Database/Database Browser - Windows Only/readme.txt
All credit goes to http://sqlitebrowser.sourceforge.net/index.html
Simply run the exe and open the database.<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/BulkProduct.java
/*
* Creator: <NAME>
*
* Created on May 10, 2006
* Updated on January 17, 2008, September 12, 2012, September 30, 2012
*
* The BulkProduct class is for products with a BIC code. It implements the ProductInfo interface.
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountNegativeException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountTooLargeException;
/**
* The BulkProduct class represents a grocery product which is sold by weight,
* such as produce, deli, meat, etc. The details of the product are stored at
* the time of construction, and can be retrieved using accessor functions.
*
*/
public class BulkProduct implements ProductInfo {
/**
* The flat rate discount for the product.
*/
private double flatRateDiscount = 0;
/**
* The BIC representing the product's 5-digit bulk item code.
*/
private BIC myBIC;
/**
* The price per unit weight of the product
*/
private double myUnitPrice;
/**
* The String description of the product.
*/
private String myDescription;
/**
* Determine if the product's price incurs tax or not.
*/
private double taxRate;
/**
* Indicates discount on item if it is on sale.
*/
private double discount;
/**
* @param description
* The text description of the product.
* @param BICcode
* A BIC representing the product's 5-digit bulk item code.
* @param price
* The unit price of the product.
* @throws SaleDiscountException
*/
public BulkProduct(String description, BIC BICcode, double price)
throws SaleDiscountException {
this(description, BICcode, price, 0, 0);
}
/**
* @param description
* The text description of the product.
* @param BICcode
* A BIC representing the product's 5-digit bulk item code.
* @param price
* The unit price of the product.
* @param taxRate
* The amount of tax incurred.
* @throws SaleDiscountException
*/
public BulkProduct(String description, BIC BICcode, double price,
double taxRate) throws SaleDiscountException {
this(description, BICcode, price, taxRate, 0);
}
/**
* @param description
* The text description of the product.
* @param BICcode
* A BIC representing the product's 5-digit bulk item code.
* @param price
* The unit price of the product.
* @param taxRate
* The amount of tax incurred.
* @param discount
* The discount on the unit price.
* @throws SaleDiscountException
*/
public BulkProduct(String description, BIC BICcode, double price,
double taxRate, double discount) throws SaleDiscountException {
this.taxRate = taxRate;
myDescription = description;
myBIC = BICcode;
myUnitPrice = price;
this.discount = discount;
validateDiscount();
}
/**
* Validate the discount. Invalid discounts include negative discounts or
* discounts that are too large
*
* @throws SaleDiscountException
* The type of discount error.
*/
private void validateDiscount() throws SaleDiscountException {
if (this.discount > myUnitPrice) {
throw new SaleDiscountTooLargeException();
}
if (this.discount < 0) {
throw new SaleDiscountNegativeException();
}
}
/**
* Accessor function returning the product's BIC.
*/
public BIC getBIC() {
return myBIC;
}
/*
* (non-Javadoc)
*
* @see edu.uci.ics121.SelfCheckOut.App.ProductInfo#getCode()
*/
public Code getCode() {
return getBIC();
}
/*
* (non-Javadoc)
*
* @see edu.uci.ics121.SelfCheckOut.App.ProductInfo#getPrice()
*/
public double getPrice() {
return myUnitPrice;
}
/*
* (non-Javadoc)
*
* @see edu.uci.ics121.SelfCheckOut.App.ProductInfo#getDescription()
*/
public String getDescription() {
return myDescription;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#isTaxed()
*/
public boolean isTaxed() {
return taxRate > 0;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#getTaxRate()
*/
public double getTaxRate() {
return taxRate;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#isOnSale()
*/
public boolean isOnSale() {
return discount > 0;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#getDiscount()
*/
public double getDiscount() {
return discount;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#getFlatRateDiscount()
*/
public double getFlatRateDiscount() {
return this.flatRateDiscount;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#setFlatRateDiscount()
*/
public void setFlatRateDiscount(double flatRateDiscount){
this.flatRateDiscount = flatRateDiscount;
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Exceptions/InvalidWeightException.java
package ca.utoronto.csc301.SelfCheckOut.Exceptions;
/**
* An exception that is thrown when an impossible weight is recorded by the
* sensor. For example a weight can not be negative or 0 for an item.
*
* @author Nisarg
*
*/
public class InvalidWeightException extends Exception {
private static final long serialVersionUID = 1L;
public InvalidWeightException() {
super();
}
public InvalidWeightException(String message) {
super(message);
}
public InvalidWeightException(String message, Throwable cause) {
super(message, cause);
}
public InvalidWeightException(Throwable cause) {
super(cause);
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/IntegrationTests/InstantiatingSelfCheckOut.java
package ca.utoronto.csc301.SelfCheckOut.IntegrationTests;
import ca.utoronto.csc301.SelfCheckOut.App.*;
import ca.utoronto.csc301.SelfCheckOut.Devices.*;
import static org.junit.Assert.*;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class InstantiatingSelfCheckOut {
SelfCheckOut firstSCO;
BaggingArea firstBA;
Database firstPDB;
PaymentCollector firstPC;
@Before
public void setUp() throws Exception {
// create a SelfCheckOut
firstBA = new BaggingArea();
firstPC = new PaymentCollector();
firstPDB = new Database("Database/TestSelfCheckOut.db");
firstSCO = new SelfCheckOut(firstBA, firstPC, firstPDB);
}
@After
public void tearDown() throws Exception {
firstSCO = null;
firstBA = null;
firstPDB = null;
firstPC = null;
}
@Test
public void instantiateSCOWithNoArgs() throws Exception {
SelfCheckOut noArgSelfCheckOut;
noArgSelfCheckOut = new SelfCheckOut();
assertNotNull(noArgSelfCheckOut.getBaggingArea());
assertNotNull(noArgSelfCheckOut.getProductDB());
assertNotNull(noArgSelfCheckOut.getPaymentCollector());
}
/*
* Don't need this, because it's been called in the setUp
*
* public void testSelfCheckOutBaggingAreaPaymentCollectorProductDB() {
* fail("Not yet implemented"); }
*/
/*
* This is not implemented, because the notifyBaggingAreaEvent only changes
* a private variable.
*
* public void testNotifyBaggingAreaEvent() { fail("Not yet implemented"); }
*/
@Test
public void gettingBaggingArea() {
assertSame(firstBA, firstSCO.getBaggingArea());
}
@Test
public void gettingGetProductDB() {
assertSame(firstPDB, firstSCO.getProductDB());
}
@Test
public void gettingGetPaymentCollector() {
assertSame(firstPC, firstSCO.getPaymentCollector());
}
/**
* Test default ID of a selfcheckout.
*/
@Test
public void getSCOidDefault() {
assertEquals(0, firstSCO.getSCOid(), 0);
}
/**
* Test ID assigned to a selfcheckout.
*/
@Test
public void getSCOid() {
try {
SelfCheckOut secondSCO = new SelfCheckOut(firstBA, firstPC,
firstPDB, 1);
assertEquals(1, secondSCO.getSCOid());
} catch (Exception e) {
}
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Devices/BaggingArea.java
/*
* Creator: <NAME>
*
* Created on May 10, 2006
* Updated on January 17, 2008, September 2012
*
* The BaggingArea class can simulate the action of adding an item to the bagging area, and detect
* a change in weight.
*/
package ca.utoronto.csc301.SelfCheckOut.Devices;
import java.util.Vector;
/**
* The BaggingArea class represents a wrapper for a hardware driver of the
* sensors in a bagging area. Because we're not really using such a device, our
* class provides methods to change the weight reported by the bagging area, and
* to zero that weight if we're resetting the system.<br>
* <br>
* The BaggingArea uses an <i>Observer</i> (or <i>Listener</i>) design pattern.
* Objects which implement the <code>BaggingAreaListener</code> interface may
* use the <code>attach()</code> method to register with the BaggingArea. When a
* weight change event occurs, the BaggingArea reports the event to its
* observers using their notify methods. In this example, we do not support
* detaching from the BaggingArea.<br>
* <br>
* In the business logic of our system (implemented in SelfCheckOut) we use
* known product weights to ascertain if the customer has placed an item in the
* bagging area after scanning it. Until we see a (correct) weight change, we
* would keep telling them to bag the item, and we would refuse to accept scans
* of additional items.
*
*/
public class BaggingArea {
/**
* totalWeight records the current weight being sensed in the BaggingArea.
*/
private double totalWeight;
/**
* observers is a Vector of BaggingAreaListeners which will be notified of
* weight change events.
*/
private Vector<BaggingAreaListener> observers;
/**
* This simple constructor initializes a zero weight and an empty set of
* observers.
*/
public BaggingArea() {
observers = new Vector<BaggingAreaListener>();
totalWeight = 0;
}
/**
* Returns the current weight being registered by the bagging area 'sensor'.
*/
public double getTotalWeight() {
return totalWeight;
}
/**
* changeWeight() is the method we use to simulate the bagging area scales
* detecting a change in weight. This is a gross simplification, but will
* suffice for our system. A small time delay is included to represent the
* time taken by the action. Note that a negative weight corresponds to the
* customer removing an item from the bagging area.
*
* @param weight
* The amount of weight change in this event.
*/
public void changeWeight(double weight) {
totalWeight = totalWeight + weight;
BaggingAreaEvent baEvent = new BaggingAreaEvent(totalWeight, weight);
notifyObserver(baEvent); // notify the observer that the weight has
// changed
}
/**
* The zeroWeight() method zeroes the scales of the BaggingArea, simulating
* the customer leaving with their purchases.
*/
public void zeroWeight() {
changeWeight(-totalWeight);
}
/**
* notifyObserver() passes the information of the weight change event along
* to the observer objects as a BaggingAreaEvent, which includes the current
* weight in the the bagging area as well as the most recent change. The
* information is passed by calling the notifyBaggingAreaEvent() method on
* the Listener.
*
* @param event
* An event containing the current weight in the area and the
* most recent change.
*/
private void notifyObserver(BaggingAreaEvent event) {
for (int index = 0; index < observers.size(); index++) {
observers.get(index).notifyBaggingAreaEvent(this, event);
}
}
/**
* The attach() method registers a BaggingreaListener to receive all future
* BaggingAreaEvents.
*
* @param bal
* The Listener (often the calling object) to be attached.
*/
public void attach(BaggingAreaListener bal) {
if (bal != null) {
observers.add(bal);
}
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Exceptions/SaleDiscountTooLargeException.java
package ca.utoronto.csc301.SelfCheckOut.Exceptions;
/**
* An exception that indicates an invalid sale has occurred. This happens if the
* discount is larger than the price of the item.
*
* @author <NAME>
*
*/
public class SaleDiscountTooLargeException extends SaleDiscountException {
private static final long serialVersionUID = 1L;
public SaleDiscountTooLargeException() {
super();
}
public SaleDiscountTooLargeException(String message) {
super(message);
}
public SaleDiscountTooLargeException(String message, Throwable cause) {
super(message, cause);
}
public SaleDiscountTooLargeException(Throwable cause) {
super(cause);
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/SelfCheckOutTest.java
/* Creator: <NAME>
*
* Created on September 26, 2012
* Updated on September 26, 2012
*
* This class contains JUnit test cases for SelfCheckOut.java.
*
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import static org.junit.Assert.*;
import static org.junit.Assert.fail;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import ca.utoronto.csc301.SelfCheckOut.Devices.BaggingArea;
import ca.utoronto.csc301.SelfCheckOut.Devices.PaymentCollector;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.AddWhileBaggingException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.IncorrectStateException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidBICException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidProductException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidUPCException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidWeightException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountException;
public class SelfCheckOutTest {
static SelfCheckOut sco;
static BaggingArea bArea;
static PaymentCollector pCollector;
static Database pdb;
static UPC secondUPC, nonDbUPC;
static BIC secondBIC, nonDbBIC;
static double secondUPCWeight;
static double secondBICWeight;
static double secondBulkTaxRate = 1;
static double secondPackagedTaxRate = 1;
static double EPSILON = 1e-13;
@BeforeClass
public static void classSetUp() throws Exception {
bArea = new BaggingArea();
pCollector = new PaymentCollector();
pdb = new Database("Database/TestSelfCheckOut.db");
sco = new SelfCheckOut(bArea, pCollector, pdb);
// create some packaged items
try {
secondUPC = new UPC("786936224306");
nonDbUPC = new UPC("012398235414");
} catch (InvalidUPCException e) {
fail("Invalid UPC");
}
// create some bulk items
try {
secondBIC = new BIC("22222");
nonDbBIC = new BIC("12346");
} catch (InvalidBICException e) {
fail("Invalid BIC");
}
secondBulkTaxRate += pdb.lookUpItem(secondBIC).getTaxRate();
secondPackagedTaxRate = 1;
secondPackagedTaxRate += pdb.lookUpItem(secondUPC).getTaxRate();
secondBICWeight = 2.61;
secondUPCWeight = 1.35;
}
@AfterClass
public static void classTearDown() throws Exception {
bArea = null;
pCollector = null;
pdb = null;
sco = null;
}
@Before
public void setUp() throws Exception {
}
@After
public void tearDown() throws Exception {
sco.resetAll();
}
// ===== Test for exceptions =====
// ===============================
/*
* Add a BIC item that doesn't exist in the database.
*/
@Test(expected = InvalidProductException.class)
public void testAddNonExistingBICItem() throws Exception {
sco.addItem(nonDbBIC, 10);
}
/*
* Add a UPC item that doesn't exist in the database.
*/
@Test(expected = InvalidProductException.class)
public void testAddNonExistingUPCItem() throws Exception {
sco.addItem(nonDbUPC);
}
/*
* Add a BIC item with a negative weight.
*/
@Test(expected = InvalidWeightException.class)
public void testAddBICItemNegativeWeight() throws Exception {
int negWeight = -10;
sco.addItem(secondBIC, negWeight);
}
/*
* Add an item without bagging the previous one.
*/
@Test(expected = AddWhileBaggingException.class)
public void testAddWithoutBaggingFirst() throws Exception {
sco.addItem(secondUPC);
sco.addItem(secondBIC, secondBICWeight);
}
// ===== End of tests for exceptions =====
// =======================================
// ===== Tests on empty SelfCheckOut =====
// =======================================
/*
* Test getTotalCost method on an empty selfCheckOut
*/
@Test
public void testGetTotalCostEmptySco() throws Exception {
assertEquals(sco.getTotalCost(), 0, EPSILON);
}
/*
* Test getTotalTax method on an empty selfCheckOut
*/
@Test
public void testGetTotalTaxEmptySco() throws Exception {
assertEquals(sco.getTotalTax(), 0, EPSILON);
}
/*
* Test getSubTotal method on an empty selfCheckOut
*/
@Test
public void testGetSubTotalEmptySco() throws Exception {
assertEquals(sco.getSubTotal(), 0, EPSILON);
}
/*
* Test getTotalDiscount method on an empty selfCheckOut
*/
@Test
public void testGetTotalDiscountEmptySco() throws Exception {
assertEquals(sco.getTotalDiscount(), 0, EPSILON);
}
// ===== End of tests on empty SelfCheckOut =====
// =======================================
/*
* Test getSCOid method
*/
@Test
public void testGetSCOid() throws Exception {
assertEquals(sco.getSCOid(), 0, EPSILON);
}
/*
* Add a BIC item that exists in the database.
*/
@Test
public void testAddValidBICItem() throws Exception {
}
/*
* Add a UPC item that exists in the database.
*/
@Test
public void testAddValidUPCItem() throws Exception {
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/GroceryItemTest.java
/* Creator: <NAME>
*
* Created on September 23, 2012
* Updated on September 23, 2012
*
* This class contains JUnit test cases for GroceryItem.java.
*
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import static org.junit.Assert.*;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidBICException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidWeightException;
public class GroceryItemTest {
double EPSILON = 1e-15; // a really small number
double unitPrice, firstPrice, firstWeight;
double differentPrice, differentWeight;
String firstDescription, firstCode, differentDescription, differentCode;
BIC firstBIC;
UPC differentUPC;
ProductInfo firstInfo, differentInfo;
GroceryItem firstGroceryItem, differentGroceryItem;
@Before
public void setUp() throws Exception {
/* Instantiate a BulkProduct. */
firstCode = "11111";
firstBIC = new BIC(firstCode);
firstDescription = "Banana";
unitPrice = 0.69;
firstInfo = new BulkProduct(firstDescription, firstBIC, unitPrice);
/* Instantiate the related GroceryItem. */
firstWeight = 10;
firstPrice = unitPrice * firstWeight;
firstGroceryItem = new GroceryItem(firstInfo, firstPrice, firstWeight);
/* Instantiate a PackagedProduct. */
differentCode = "786936224306";
differentUPC = new UPC(differentCode);
differentDescription = "Kellogg Cereal";
differentPrice = 3.52;
differentWeight = 1.35;
differentInfo = new PackagedProduct(differentDescription, differentUPC,
differentPrice, differentWeight);
/* Instantiate the related GroceryItem. */
differentGroceryItem = new GroceryItem(differentInfo, differentPrice,
differentWeight);
}
@After
public void tearDown() throws Exception {
/* Tear down the BulkProduct. */
firstCode = null;
firstBIC = null;
firstDescription = null;
unitPrice = 0.0;
firstInfo = null;
/* Tear down the related GroceryItem. */
firstWeight = 0.0;
firstPrice = 0.0;
firstGroceryItem = null;
/* Tear down the PackagedProduct. */
differentCode = null;
differentUPC = null;
differentDescription = null;
differentPrice = 0.0;
differentWeight = 0.0;
differentInfo = null;
/* Tear down the related GroceryItem. */
differentGroceryItem = null;
}
/**
* Test the constructor with a negative weight.
*
* @throws InvalidWeightException
* @throws InvalidBICException
*/
@Test(expected = InvalidWeightException.class)
public void constructWithNegativeWeight() throws InvalidWeightException,
InvalidBICException {
double NewWeight = -10; // negative weight!
GroceryItem NewGroceryItem = new GroceryItem(firstInfo, firstPrice,
NewWeight);
}
@Test
public void testGetPrice() {
// first a test with a correct price
assertEquals(firstPrice, firstGroceryItem.getPrice(), EPSILON);
// now test with a different price
assertFalse(differentPrice == firstGroceryItem.getPrice());
}
@Test
public void testGetWeight() {
// first a test with a correct weight
assertEquals(firstWeight, firstGroceryItem.getWeight(), EPSILON);
// now test with a different weight
assertFalse(differentWeight == firstGroceryItem.getWeight());
}
@Test
public void testGetInfo() {
// first a test with the correct ProductInfo
assertEquals(firstInfo, firstGroceryItem.getInfo());
// now test with a different ProductInfo
assertNotSame(firstInfo, differentGroceryItem.getInfo());
}
}
<file_sep>/Database/dropDB.sql
-- This script drops all tables in the database.
drop table if exists TaxLog;
drop table if exists ProductCategory;
drop table if exists BulkProduct;
drop table if exists PackagedProduct;
drop table if exists ImpulseProducts;
drop table if exists Sale;
<file_sep>/Database/setupDB.sql
/**
* TaxLog keeps a running total for the tax received from purchases for a particular date.
* This makes it convenient for the store owner when it comes time to pay the government those taxes.
*/
create table if not exists TaxLog(
date Date,
totalTax double default 0.00,
primary key(date)
);
/**
* Sale keeps records of which items are receiving a discount.
* It determines which items to apply the sale to, when this sale starts, when this sale ends
* and the type of sale.
*/
create table if not exists Sale(
saleID integer primary key, --A unique id for the sale.
code VARCHAR(32), -- This code refers to the code of the item the sale is to be applied to.
startDate Date, -- This is when the sale starts.
endDate Date, -- This is when the sale ends.
percentReduction double default 0.00, -- This determines the percent discount.
flatReduction double default 0.00 -- This determines the flat rate discount.
);
/**
* ProductCategory contains all the different category's a product could be in
* and the tax rate for that category.
*/
create table if not exists ProductCategory(
category varchar(64),
taxRate double default 0.00,
primary key(category)
);
/**
* BulkProduct contains all the bulk product information along with their category.
* Category is a foreign key referencing ProductCategory; this makes it easy to natural join with
* ProductCategory to obtain the current tax rate for any product.
*/
create table if not exists BulkProduct(
BIC char(5),
descrip varchar(64),
unitPrice double,
category varchar(64),
wholesalePrice double,
primary key(BIC),
foreign key(category) REFERENCES ProductCategory(category)
);
/**
* PackagedProduct contains all the packaged product information along with their category.
* Category is a foreign key referencing ProductCategory; this makes it easy to natural join with
* ProductCategory to obtain the current tax rate for any product.
*/
create table if not exists PackagedProduct(
UPC char(12),
descrip varchar(64),
price double,
weight double,
category varchar(64),
wholesalePrice double,
primary key(UPC)
foreign key(category) REFERENCES ProductCategory(category)
);
/**
* ImpulseProducts contain any impulse buy items and the amount that have been purchased since the started date.
* note: totalBought for bulk items represent total weight bought
*/
create table if not exists ImpulseProducts(
SelfCheckoutID int,
descrip varchar(64),
code varchar(12),
totalBought double,
profitMargin double,
started Date,
primary key(SelfCheckoutID, code)
);
/**
* UserAccounts contain all username, password, full name, and privileges of employees in the store
* that can access the administritive view.
* privilege is a int, where 0 = search only access, 1 = all access.
*/
create table if not exists UserAccounts(
username varchar(64),
password varchar(512),
name varchar(64),
privilege int,
primary key(username)
);
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Exceptions/UnrecognizedPayTypeException.java
package ca.utoronto.csc301.SelfCheckOut.Exceptions;
/**
* Thrown when given type of payment is unrecognised
*/
public class UnrecognizedPayTypeException extends Exception {
private static final long serialVersionUID = 1L;
public UnrecognizedPayTypeException() {
}
public UnrecognizedPayTypeException(String message) {
super(message);
}
public UnrecognizedPayTypeException(Throwable cause) {
super(cause);
}
public UnrecognizedPayTypeException(String message, Throwable cause) {
super(message, cause);
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Exceptions/IncorrectStateException.java
/*
*
* Creator: <NAME>
* Course: Inf111, Winter 2008
*
* Created on May 10, 2006
* Updated on January 17, 2008
*
* Copyright, 2006, 2008 University of California.
*/
package ca.utoronto.csc301.SelfCheckOut.Exceptions;
/**
* The IncorrectStateException is not normally used directly, but acts as a
* parent class for those exceptions which represent violations of the business
* rules of the states a SelfCheckOut object can be in.
*
*/
public class IncorrectStateException extends Exception {
private static final long serialVersionUID = 1L;
public IncorrectStateException() {
super();
}
public IncorrectStateException(String message) {
super(message);
}
public IncorrectStateException(String message, Throwable cause) {
super(message, cause);
}
public IncorrectStateException(Throwable cause) {
super(cause);
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/DatabaseTest.java
package ca.utoronto.csc301.SelfCheckOut.App;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Vector;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.DatabaseConnectionException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.JDCBDriverException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountNegativeException;
public class DatabaseTest {
static Database pdb;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
pdb = new Database("Database/TestSelfCheckOut.db");
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
pdb = null;
}
/**
* Test isSafeToRemoveFromProductCategory for removing an ID that is not
* being referred to by any product in the DB.
*/
@Test
public void safeToRemove() {
assertTrue(pdb.isSafeToRemoveFromProductCategory("electronics"));
}
/**
* Test isSafeToRemoveFromProductCategory for removing an ID that is being
* referred to by a product in Bulk products.
*/
@Test
public void notSafeToRemoveBulk() {
assertFalse(pdb.isSafeToRemoveFromProductCategory("fruit"));
}
/**
* Test isSafeToRemoveFromProductCategory for removing an ID that is being
* referred to by a product in Packaged products.
*/
@Test
public void notSafeToRemovePackaged() {
assertFalse(pdb.isSafeToRemoveFromProductCategory("magazine"));
}
/**
* Test isSafeToRemoveFromProductCategory for removing an ID that is being
* referred to by products in both Bulk and Packaged products
*/
@Test
public void notSafeToRemoveBoth() {
assertFalse(pdb.isSafeToRemoveFromProductCategory("prepared food"));
}
/**
* Test lookUpItemByCategory with a category that exists in the database.
*/
@Test
public void lookUpExistingCategory() throws Exception {
assertTrue(pdb.lookUpCategory("veggie"));
}
/**
* Test lookUpItemByCategory with a category that doesn't exist in the
* database.
*/
@Test
public void lookUpNonExistingCategory() throws Exception {
assertFalse(pdb.lookUpCategory("doesNotExist"));
}
/**
* Test listTable with a table that does not exist.
*/
@Test
public void listMissingTable() {
// query the database using a table that doesn't exist
assertEquals(pdb.listTable("fakeTable"), null);
}
/**
* Test listTable with a table that does exist.
*/
@Test
public void listTable() {
// query the database
ResultSet rs = pdb.listTable("ProductCategory");
// check if a result was returned
assertTrue(rs != null);
try {
// check if there is a row in the result
assertTrue(rs.next());
} catch (SQLException e) {
fail("Could not find table");
}finally{
try {
if (rs != null)rs.close();
} catch (SQLException ignore) {
}
}
}
/**
* Test update database with an ill formed query.
*/
@Test(expected=SQLException.class)
public void updateDatabaseIllFormedQuery() throws SQLException {
// check if updating the database with an ill formed query will
// result in a SQLException
pdb.updateDatabase("select *from234");
}
/**
* Test update database with a well formed query.
*/
@Test
public void updateDatabaseWellFormedQuery() throws SQLException {
// check if updating the database with an well formed query will succeed
pdb.updateDatabase("update ProductCategory set taxRate = 0.00" +
" where category='veggie'");
}
/**
* Test Generating a report to a file in the src directory.
*/
@Test
public void GenerateReportToFile() {
// check the success of generating the report
assertTrue(pdb.generateReportToFile("report.txt"));
}
/**
* Test getting a report.
*/
@Test
public void GenerateReport() {
// check if the report has at least one character
assertTrue(pdb.generateReport().length() > 0);
}
/**
* Test getting all products in the database.
*/
@Test
public void getAllProducts() {
Vector<String> products = pdb.getAllProducts();
//check if any products were obtained
assertTrue(products.size() > 0);
}
/**
* Test getting all tax categories in the database.
*/
@Test
public void getAllCategories() {
Vector<String> categories = pdb.getAllTaxCategories();
//check if any products were obtained
assertTrue(categories.size() > 0);
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Gui/SearchPopUpGUI.java
package ca.utoronto.csc301.SelfCheckOut.Gui;
import java.awt.Component;
import java.awt.Dimension;
import java.awt.FlowLayout;
import java.awt.Font;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.ItemEvent;
import java.awt.event.ItemListener;
import java.util.Locale;
import java.util.Vector;
import javax.swing.AbstractButton;
import javax.swing.BorderFactory;
import javax.swing.BoxLayout;
import javax.swing.JButton;
import javax.swing.JCheckBox;
import javax.swing.JComboBox;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JTextArea;
import javax.swing.JTextField;
import javax.swing.KeyStroke;
import javax.swing.event.DocumentEvent;
import javax.swing.event.DocumentListener;
import javax.swing.plaf.basic.BasicComboBoxUI;
import javax.swing.text.DefaultCaret;
import javax.swing.text.Document;
/**
* GUI for search box
*/
public class SearchPopUpGUI extends JFrame implements ActionListener, ItemListener {
/**
*
*/
private static final long serialVersionUID = 1L;
/**
* Toggle between simple search view and advanced search view
*/
protected JCheckBox toggleAdvancedButton;
/**
* JPanels for underlying panel, view settings, simple search,
* advanced search
*/
protected JPanel basePanel, viewSettingPanel, simpleSearchPanel,
advancedSearchPanel;
/**
* TODO change this for final release
* The default text that will be displayed when search box is opened.
*/
protected String outputTextBoxDefaultMsg =
"Search result goes here or maybe select matching cells" +
" in AdminView spreadsheet";
/**
* Label for the general search box in both simple and advanced search view
*/
protected String generalSearchLabel = "Search For";
/**
* Search button that's used in both simple and advanced search view
*/
protected JButton searchButton;
/**
* The custom JComboBox search bar in simple search view
*/
protected JComboBox simpleSearchBar;
/**
* A list of Strings to be displayed in search suggestion
*/
protected Vector<String> searchSuggest = new Vector<String>();
/**
* Search text field in advanced search view
*/
protected JTextField advSearchTextField;
/**
* Selection menu in advanced search view
*/
protected JComboBox advSearchList;
/**
* The types of search that can be performed in advanced view; listed in
* the selection menu "advSearchList"
*/
// TODO change to real table names in SCO.db and display user friendly
// names from AdminTableModel.mapColumnNames()
protected String[] searchType =
{ "Product Description", "BIC", "UPC",
"Sale Percent Discount (%)", "etc..." };
/**
* Scrollable text box for displaying search results
*/
protected JScrollPane textBoxScrollPane;
/**
* Text box used for displaying search result
*/
protected JTextArea outputTextBox;
/**
* Create and display the pop-up search window
*/
public SearchPopUpGUI() {
super("Search Box");
// Don't use DISPOSE_ON_CLOSE so that previous view setting is retained
setDefaultCloseOperation(JFrame.HIDE_ON_CLOSE);
int sizeX = 700;
int sizeY = 500;
setPreferredSize(new Dimension(sizeX, sizeY));
// Set up panels
basePanel = new JPanel();
this.add(basePanel);
basePanel.setLayout(new BoxLayout(basePanel, BoxLayout.Y_AXIS));
basePanel.setVisible(true);
viewSettingPanel = createviewSettingPanel();
viewSettingPanel.setVisible(true);
// setMaximumSize for formatting relative size of each panel
viewSettingPanel.setMaximumSize(new Dimension(sizeX, 1));
basePanel.add(viewSettingPanel);
simpleSearchPanel = createSimpleSearchPanel();
simpleSearchPanel.setVisible(true);
// setMaximumSize for formatting relative size of each panel
simpleSearchPanel.setMaximumSize(new Dimension(sizeX, 1));
basePanel.add(simpleSearchPanel);
advancedSearchPanel = createAdvancedSearchPanel();
advancedSearchPanel.setVisible(false);
// setMaximumSize for formatting relative size of each panel
advancedSearchPanel.setMaximumSize(new Dimension(sizeX, 1));
basePanel.add(advancedSearchPanel);
textBoxScrollPane = createOutputTextBoxScrollPane();
textBoxScrollPane.setVisible(true);
// doesn't need setMaximumSize; take up rest of space
basePanel.add(textBoxScrollPane);
pack();
setVisible(true);
}
/**
* Redisplay the search box if the user previously closed it
*/
public void openSearchBox() {
outputTextBox = new JTextArea(outputTextBoxDefaultMsg);
this.setVisible(true);
}
/**
* Create the JPanel for objects associated with search settings.
* Currently there is only 1 setting.
* @return the JPanel object
*/
private JPanel createviewSettingPanel() {
JPanel panel = new JPanel(new FlowLayout(FlowLayout.RIGHT));
// Set up check box
toggleAdvancedButton = new JCheckBox("Advanced View", false);
toggleAdvancedButton.addItemListener(this);
panel.add(toggleAdvancedButton);
return panel;
}
/**
* Create the JPanel for objects associated with simple search view
* @return the JPanel object
*/
private JPanel createSimpleSearchPanel() {
JPanel panel = new JPanel();
panel.setBorder(BorderFactory.createTitledBorder("Simple Search"));
panel.setLayout(new GridBagLayout());
// Custom JComboBox search bar; allows for search suggestion
simpleSearchBar = new JComboBox(searchSuggest);
simpleSearchBar.setEditable(true);
simpleSearchBar.setActionCommand("simpleSearch");
simpleSearchBar.addActionListener(this);
// code by <NAME> from http://stackoverflow.com/questions/
// 822432/how-to-make-jcombobox-look-like-a-jtextfield
simpleSearchBar.setUI(new BasicComboBoxUI() {
@Override
protected JButton createArrowButton() {
return new JButton() {
@Override
public int getWidth() {
return 0;
}
};
}
});
// code by <NAME> from http://stackoverflow.com/questions/
// 8949466/detecting-jcombobox-editing
final Component editor = simpleSearchBar
.getEditor().getEditorComponent();
if (editor instanceof JTextField) {
Document doc = ((JTextField) editor).getDocument();
doc.addDocumentListener(new DocumentListener() {
@Override
public void changedUpdate(DocumentEvent arg0) {
// User changed font style of text; irrelevant in this case
}
@Override
public void insertUpdate(DocumentEvent arg0) {
// User typed a new character in search bar; handle this
// in updateSearchSuggest()
updateSearchSuggest(((JTextField) editor).getText());
}
@Override
public void removeUpdate(DocumentEvent arg0) {
// User deleted text from search bar
updateSearchSuggest(((JTextField) editor).getText());
}
});
} else {
throw new RuntimeException("Java Swing implementation changed");
}
// Search bar label
JLabel simpleSearchFieldLabel = new JLabel(generalSearchLabel + ": ");
simpleSearchFieldLabel.setLabelFor(simpleSearchBar);
// Search button
searchButton = new JButton("Search");
searchButton.setVerticalTextPosition(AbstractButton.CENTER);
searchButton.setHorizontalTextPosition(AbstractButton.CENTER);
searchButton.setActionCommand("simpleSearch");
searchButton.addActionListener(this);
// Search panel layout
GridBagConstraints gc = new GridBagConstraints();
// Row 1
gc.fill = GridBagConstraints.HORIZONTAL;
gc.gridx = 0;
gc.gridy = 0;
gc.weightx = 0.0;
panel.add(simpleSearchFieldLabel, gc);
gc.gridx = 1;
gc.weightx = 0.5;
panel.add(simpleSearchBar, gc);
gc.gridx = 2;
gc.weightx = 0.25;
panel.add(searchButton, gc);
return panel;
}
/**
* Create the JPanel for objects associated with advanced search view
* @return the JPanel object
*/
private JPanel createAdvancedSearchPanel() {
JPanel panel = new JPanel();
panel.setBorder(BorderFactory.createTitledBorder("Advanced Search"));
panel.setLayout(new GridBagLayout());
// Text field
advSearchTextField = new JTextField(20);
advSearchTextField.setActionCommand("advSearch");
advSearchTextField.addActionListener(this);
// Search text field label
JLabel advsSearchFieldLabel = new JLabel(generalSearchLabel + ": ");
advsSearchFieldLabel.setLabelFor(advSearchTextField);
// Selection box
advSearchList = new JComboBox(searchType);
// Search button
searchButton = new JButton("Search");
searchButton.setVerticalTextPosition(AbstractButton.CENTER);
searchButton.setHorizontalTextPosition(AbstractButton.CENTER);
searchButton.setActionCommand("advSearch");
searchButton.addActionListener(this);
// Search panel layout
GridBagConstraints gc = new GridBagConstraints();
// Row 1
gc.fill = GridBagConstraints.HORIZONTAL;
gc.gridx = 0;
gc.gridy = 0;
gc.weightx = 0.0;
panel.add(advsSearchFieldLabel, gc);
gc.gridx = 1;
gc.weightx = 0.5;
panel.add(advSearchTextField, gc);
gc.gridx = 2;
gc.weightx = 0.25;
panel.add(advSearchList, gc);
gc.gridx = 3;
panel.add(searchButton, gc);
return panel;
}
/**
* Create the JScrollPane for the search result output text box
* @return the JScrollPane object
*/
private JScrollPane createOutputTextBoxScrollPane() {
outputTextBox = new JTextArea(outputTextBoxDefaultMsg);
outputTextBox.setFont(new Font("Serif", Font.PLAIN, 15));
outputTextBox.setLineWrap(true);
outputTextBox.setWrapStyleWord(true);
outputTextBox.setEditable(false);
// Add scroll to the text area
JScrollPane textBoxScrollPane = new JScrollPane(outputTextBox);
textBoxScrollPane.setVerticalScrollBarPolicy(
JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED);
textBoxScrollPane.setBorder(
BorderFactory.createTitledBorder("Search Result"));
return textBoxScrollPane;
}
/**
* Updates the list of search suggestions in custom JComboBox search bar
* based on the text that are currently in the search bar.
*/
private void updateSearchSuggest(String input) {
input = input.toLowerCase();
System.out.println("\"" + input + "\"");
// prevents UP/DOWN arrow key from "selecting" the item in the menu and
// cause it to "move" around the menu instead
// code by <NAME> from
// http://tips4java.wordpress.com/2009/05/17/combo-box-no-action
simpleSearchBar.putClientProperty("JComboBox.isTableCellEditor", true);
// TODO potential GUI bug where user mouse-overs something and then erase text field
// To duplicate bug:
// type "bb" -> mouse-over one search suggestion -> hold Backspace until empty -> type "a"
// source of problem: interaction between selection through keyboard and JComboBox
// also problems with interaction between mouse-over and JComboBox
// this is only a simple example, we will use SQL queries
// instead of IF-ELSE statements in final release
if (input.isEmpty()) {
searchSuggest.clear();
simpleSearchBar.hidePopup();
} else if (input.startsWith("a")) {
searchSuggest.clear();
searchSuggest.add("apple");
searchSuggest.add("avocado");
searchSuggest.add("awesome");
simpleSearchBar.hidePopup();
simpleSearchBar.showPopup();
} else if (input.startsWith("b")) {
searchSuggest.clear();
searchSuggest.add("banana");
searchSuggest.add("Broccoli");
simpleSearchBar.hidePopup();
simpleSearchBar.showPopup();
} else if (input.startsWith("what")) {
searchSuggest.clear();
searchSuggest.add("What is the answer to life, the universe, and everything");
simpleSearchBar.hidePopup();
simpleSearchBar.showPopup();
} else {
// unrecognised input, show empty search suggestion
searchSuggest.clear();
searchSuggest.add("Unrecognized input...");
simpleSearchBar.hidePopup();
simpleSearchBar.showPopup();
}
}
/**
*
* @param input - the text that user searched for
* @param type - column name (or empty String if not applicable)
* @return String containing data of what the user searched for
*/
private String queryDatabase(String input, String type) {
// TODO change to real SQL queries, do something about type
return "\nYou searched for: \"" + input + "\" under the category \"" +
type + "\"";
}
/**
* Output text box will automatically scroll to bottom; creating a
* method for this will save a few lines of code
* @param msg - the message to be displayed in the text box
*/
private void writeToTextBox(String msg) {
outputTextBox.append("\n" + msg);
outputTextBox.setCaretPosition(outputTextBox.getDocument().getLength());
}
@Override
public void actionPerformed(ActionEvent ae) {
String action = ae.getActionCommand();
if (action == "advSearch") {
String output = queryDatabase(advSearchTextField.getText(),
(String) advSearchList.getSelectedItem());
writeToTextBox(output);
advSearchTextField.setText("");
} else if (action == "simpleSearch") {
// user pressed the Enter key, selected one of the search
// suggestions, or pressed the Search button
String input = (String) simpleSearchBar.getSelectedItem();
if (input == null || input.isEmpty()) {
return;
}
// TODO maybe write guessColumnName(input) for "String type" arg
// of queryDatabase such as "type = BIC if input.size() == 5"
writeToTextBox(queryDatabase(input, "UNDETERMINED"));
if (input == "What is the answer to life, the universe, and everything") {
writeToTextBox("\n42");
}
Component editor = simpleSearchBar.getEditor().getEditorComponent();
((JTextField) editor).setText("");
simpleSearchBar.setSelectedIndex(-1);
searchSuggest.clear();
simpleSearchBar.hidePopup();
}
}
@Override
public void itemStateChanged(ItemEvent ie) {
Object obj = ie.getItemSelectable();
if (obj == toggleAdvancedButton) {
// Change to simple/advanced search view depending on whether
// or not toggleAdvancedB button is selected.
advSearchTextField.setText("");
Component editor = simpleSearchBar.getEditor().getEditorComponent();
((JTextField) editor).setText("");
searchSuggest.clear();
if (toggleAdvancedButton.isSelected()) {
simpleSearchPanel.setVisible(false);
advancedSearchPanel.setVisible(true);
} else {
simpleSearchPanel.setVisible(true);
advancedSearchPanel.setVisible(false);
}
}
}
/**
* Show the pop-up search box.
* TODO remove main() after integration with SelfCheckOutAdministratorView
*/
public static void main(String[] args) {
// note that the previous simple/advanced view is remembered even if
// the search pop-up window is closed
new SearchPopUpGUI();
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/CheckOutCartTest.java
/*
* Creator: <NAME>
*
* Created on Monday 24, September 2012
*
* This class contains JUnit test cases for CheckoutCart.java.
*
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import java.util.Enumeration;
import static org.junit.Assert.*;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class CheckOutCartTest {
double EPSILON = 1e-15;
double unitPrice, firstPrice, firstWeight;
double differentPrice, differentWeight;
String firstDescription, firstCode, differentDescription, differentCode;
BIC firstBIC;
UPC differentUPC;
ProductInfo firstInfo, differentInfo;
GroceryItem firstGroceryItem, differentGroceryItem;
@Before
public void setUp() throws Exception {
/* Instantiate a BulkProduct. */
firstCode = "11111";
firstBIC = new BIC(firstCode);
firstDescription = "Banana";
unitPrice = 0.69;
firstInfo = new BulkProduct(firstDescription, firstBIC, unitPrice, 0.0);
/* Instantiate the related GroceryItem. */
firstWeight = 10;
firstPrice = unitPrice * firstWeight;
firstGroceryItem = new GroceryItem(firstInfo, firstPrice, firstWeight);
/* Instantiate a PackagedProduct. */
differentCode = "786936224306";
differentUPC = new UPC(differentCode);
differentDescription = "Kellogg Cereal";
differentPrice = 3.52;
differentWeight = 1.35;
differentInfo = new PackagedProduct(differentDescription, differentUPC,
differentPrice, differentWeight, 0.15);
/* Instantiate the related GroceryItem. */
differentGroceryItem = new GroceryItem(differentInfo, differentPrice,
differentWeight);
}
@After
public void tearDown() throws Exception {
/* Tear down the BulkProduct. */
firstCode = null;
firstBIC = null;
firstDescription = null;
unitPrice = 0.0;
firstInfo = null;
/* Tear down the related GroceryItem. */
firstWeight = 0.0;
firstPrice = 0.0;
firstGroceryItem = null;
/* Tear down the PackagedProduct. */
differentCode = null;
differentUPC = null;
differentDescription = null;
differentPrice = 0.0;
differentWeight = 0.0;
differentInfo = null;
/* Tear down the related GroceryItem. */
differentGroceryItem = null;
}
@Test
public void CheckOutWithNoItem() {
CheckOutCart instance = new CheckOutCart();
double ExtotalWeight = 0.0;
double ExtotalPrice = 0.0;
// test with correct TotalWeight
assertEquals(ExtotalWeight, instance.getTotalWeight(), EPSILON);
// test with correct TotalPrice
assertEquals(ExtotalPrice, instance.getTotalCost(), EPSILON);
}
@Test
public void CheckOutOneItem() {
CheckOutCart instance = new CheckOutCart();
instance.addItemToCart(firstGroceryItem);
double ExtotalWeight = 10;
double ExtotalPrice = 6.9;
double ETotalTax = 0.0;
double ESubTotal = 6.9;
String product;
Enumeration<GroceryItem> listItemsInCart;
GroceryItem groceryItem;
// test with correct TotalWeight
assertEquals(ExtotalWeight, instance.getTotalWeight(), EPSILON);
// test with different TotalWeight
assertFalse(differentWeight == instance.getTotalWeight());
// test with correct TotalPrice
assertEquals(ExtotalPrice, instance.getTotalCost(), EPSILON);
// test with different TotalPrice
assertFalse(differentPrice == instance.getTotalCost());
// test TotalTax
assertEquals(ETotalTax, instance.getTotalTax(), EPSILON);
// test SubTotal
assertEquals(ESubTotal, instance.getSubTotal(), EPSILON);
// check items added to cart
listItemsInCart = instance.listItems();
groceryItem = listItemsInCart.nextElement();
product = groceryItem.getInfo().getDescription();
assertEquals(firstDescription, product);
}
@Test
public void CheckOutMoreThanOneItem() {
CheckOutCart instance = new CheckOutCart();
instance.addItemToCart(firstGroceryItem); // non-taxed item
instance.addItemToCart(differentGroceryItem); // taxed item
double ExtotalWeight = 11.35;
double ExtotalPrice = 10.948; // considering tax at 15%
double ETotalTax = 0.528;
double garbage = 12.25;
double ESubTotal = 10.420;
String product;
Enumeration<GroceryItem> listItemsInCart;
GroceryItem groceryItem;
// test with correct TotalWeight
assertEquals(ExtotalWeight, instance.getTotalWeight(), EPSILON);
// test with different TotalWeight
assertFalse(garbage == instance.getTotalWeight());
// test with correct TotalPrice
assertEquals(ExtotalPrice, instance.getTotalCost(), EPSILON);
// test with Different TotalPrice
assertFalse(garbage == instance.getTotalCost());
// checks SubTotal(TotalPrice - TotalTax)
assertEquals(ESubTotal, instance.getSubTotal(), EPSILON);
// test TotalTax
assertEquals(ETotalTax, instance.getTotalTax(), EPSILON);
// check items added to cart
listItemsInCart = instance.listItems();
groceryItem = listItemsInCart.nextElement();
// first product
product = groceryItem.getInfo().getDescription();
assertEquals(firstDescription, product);
groceryItem = listItemsInCart.nextElement();
// second product
product = groceryItem.getInfo().getDescription();
assertEquals(differentDescription, product);
}
}
<file_sep>/README.txt
Link to Wiki page (refer to this page for all fo the artifacts)
https://stanley.cdf.toronto.edu/drproject/csc301-2012-09/Victoria/wiki/FinalRelease
<file_sep>/Database/runall.sql
/**
*
* This file combines the scripts drop, setup, insert so you only have to run this file instead of all 3.
*
* Used for easier testing.
*/
-- This script drops all tables in the database.
drop table if exists TaxLog;
drop table if exists ProductCategory;
drop table if exists BulkProduct;
drop table if exists PackagedProduct;
drop table if exists ImpulseProducts;
drop table if exists Sale;
/**
* TaxLog keeps a running total for the tax recieved from purchases for a particular date.
* This makes it convenient for the store owner when it comes time to pay the government those taxes.
*/
create table if not exists TaxLog(
date Date,
totalTax double default 0.00,
primary key(date)
);
/**
* Sale is information regarding which items are on sale, and by how much.
* It also contains information on when the sale is taking place
*/
create table if not exists Sale(
saleID integer primary key,
code VARCHAR(32),
startDate Date,
endDate Date,
percentReduction double default 0.00,
flatReduction double default 0.00
);
/**
* ProductCategory contains all the different category's a product could be in
* and the tax rate for that category.
*/
create table if not exists ProductCategory(
category varchar(64),
taxRate double default 0.00,
primary key(category)
);
/**
* BulkProduct contains all the bulk product information along with their category.
* Category is a foreign key referencing ProductCategory; this makes it easy to natural join with
* ProductCategory to obtain the current tax rate for any product.
*/
create table if not exists BulkProduct(
BIC char(5),
descrip varchar(64),
unitPrice double,
category varchar(64),
wholesalePrice double,
primary key(BIC),
foreign key(category) REFERENCES ProductCategory(category)
);
/**
* PackagedProduct contains all the packaged product information along with their category.
* Category is a foreign key referencing ProductCategory; this makes it easy to natural join with
* ProductCategory to obtain the current tax rate for any product.
*/
create table if not exists PackagedProduct(
UPC char(12),
descrip varchar(64),
price double,
weight double,
category varchar(64),
wholesalePrice double,
primary key(UPC)
foreign key(category) REFERENCES ProductCategory(category)
);
/**
* ImpulseProducts contain any impulse buy items and the amount that have been purchased since the started date.
* note: totalBought for bulk items represent total weight bought
*/
create table if not exists ImpulseProducts(
SelfCheckoutID int,
descrip varchar(64),
code varchar(12),
totalBought double,
profitMargin double,
started Date,
primary key(SelfCheckoutID, code)
);
--This script adds the default items into the database.
--Many of these items are used for testing purposes and are fake data.
insert into ProductCategory values ('veggie', 0.00);
insert into ProductCategory values ('fruit', 0.10);
insert into ProductCategory values ('candy', 0.08);
insert into ProductCategory values ('utensil', 0.05);
insert into ProductCategory values ('chocolate', 0.15);
insert into ProductCategory values ('magazine', 0.13);
insert into ProductCategory values ('prepared food', 0.15);
insert into BulkProduct values ('11111', 'Banana', 0.69, 'veggie', 0.49);
insert into BulkProduct values ('22222', 'Orange', 0.99, 'fruit', 0.69);
insert into BulkProduct values ('33333', 'Spinach', 0.99, 'veggie', 0.55);
insert into BulkProduct values ('44444', 'Fuji Apple', 2.79, 'fruit', 1.99);
insert into BulkProduct values ('55555', 'Kiwi', 1.29, 'fruit', 0.85);
insert into BulkProduct values ('66666', 'Jellybean', 0.05, 'candy', 0.02);
insert into BulkProduct values ('77777', 'Plastic fork', 0.25, 'utensil', 0.10);
insert into BulkProduct values('88888', 't-bone steak',9.99,'prepared food', 6.78); --sale
insert into BulkProduct values('99999', 'liver', 10, 'prepared food', 8.69); --sale
insert into BulkProduct values('12345', 'chicken', 4.50, 'prepared food', 4.00); --sale
insert into BulkProduct values('12121', 'fries', 10, 'prepared food', 8.69); --sale
insert into BulkProduct values('21212', 'stale fries', 2.00, 'prepared food', 1.00); --sale
insert into BulkProduct values('06660', 'fries', 10, 'prepared food', 8.69); --sale
insert into BulkProduct values('06661', 'stale fries', 2.00, 'prepared food', 1.00); --sale
insert into PackagedProduct values ('786936224306', 'Kellogg Cereal', 3.52, 1.35, 'prepared food', 3.0);
insert into PackagedProduct values ('717951000842', 'Coca Cola (12 pack)', 3.20, 4, 'prepared food', 3.0);
insert into PackagedProduct values ('024543213710', 'Ice Cream', 4.00, 2.2, 'prepared food', 3.23);
insert into PackagedProduct values ('085392132225', 'Oreo Cookies', 3.50, 0.8, 'prepared food', 2.0);
insert into PackagedProduct values ('737666003167', 'Sees Chocolate', 4.50, 1, 'chocolate', 4.0);
insert into PackagedProduct values ('780166035718', 'Wired', 3.33, 0.6, 'magazine', 1.25); --sale
insert into PackagedProduct values ('796030114977', 'Brownies', 6.00, 2, 'prepared food', 4.50); --sale
insert into PackagedProduct values ('712345678904', 'Bread', 10, 4, 'prepared food', 5.00); --sale
insert into PackagedProduct values ('086637677174', 'mystery box', 99.99, 2, 'prepared food', 10); --sale
insert into PackagedProduct values ('012345678905', 'ketchup', 2, 4, 'prepared food', 1.00); --sale
insert into ImpulseProducts values (1, 'Sees Chocolate', '737666003167', 1, 0.5, '2012-10-14');
insert into ImpulseProducts values (1, 'Wired', '780166035718', 2, 4.16, '2012-10-20');
insert into ImpulseProducts values (1, 'Jellybean', '66666', 5, 0.15, '2012-10-10');
insert into ImpulseProducts values (1, 'Plastic fork', '77777', 14, 2.1, '2012-10-20');
insert into ImpulseProducts values (2, 'Sees Chocolate', '737666003167', 27, 0.5, '2012-10-14');
insert into ImpulseProducts values (2, 'Wired', '780166035718', 2, 4.16, '2012-10-11');
insert into ImpulseProducts values (2, 'Jellybean', '66666', 3, 0.09, '2012-10-12');
insert into ImpulseProducts values (2, 'Plastic fork', '77777', 57, 8.55, '2012-10-13');
insert into ImpulseProducts values (3, 'Sees Chocolate', '737666003167', 4, 2.0, '2012-09-14');
insert into ImpulseProducts values (3, 'Wired', '780166035718', 53, 110.24, '2012-05-14');
insert into ImpulseProducts values (3, 'Jellybean', '66666', 53, 1.59, '2012-08-14');
insert into ImpulseProducts values (3, 'Plastic fork', '77777', 33, 4.95, '2012-09-28');
insert into Sale values(null,'88888','1000-01-01','9999-01-01',0.20,0);
insert into Sale values(null,'780166035718','1000-00-01','9999-01-01',0.20,0);
insert into Sale values(null,'796030114977','1000-00-01','9999-01-01',0.0, 2.00);
insert into Sale values(null,'99999','1000-00-01','9999-01-01',0.0, 2.00);
insert into Sale values(null,'12345','1000-00-01','9999-01-01',0.2, 2.00);
insert into Sale values(null,'712345678904','1000-00-01','9999-01-01',0.2, 2.00);
insert into Sale values(null,'12121','1000-00-01','9999-01-01',1.0, 0);
insert into Sale values(null,'21212','1000-00-01','9999-01-01', 0, 2.00);
insert into Sale values(null,'086637677174','1000-00-01','9999-01-01', 1.0 , 0);
insert into Sale values(null,'012345678905','1000-00-01','9999-01-01',0, 2.00);
insert into Sale values(null,'06660','1000-00-01','9999-01-01', 1.5 , 0);
insert into Sale values(null,'06661','1000-00-01','9999-01-01',0, -200.00);
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/PackagedProduct.java
/*
* Creator: <NAME>
*
* Created on May 10, 2006
* Updated on January 17, 2008, September 12, 2012
*
* The PackagedProduct class is for products with a UPC code. It implements the ProductInfo interface.
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountNegativeException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountTooLargeException;
/**
* A PackedProduct represents a single UPC-code-bearing product in the store.
* Packaged products are sold as discrete single units, and never by weight.
* Note the difference between 'items' and 'products': A 'product' is a type of
* good sold at the store, whereas an 'item' is a particular box of that
* product.
*
*/
public class PackagedProduct implements ProductInfo {
/**
* The flat rate discount for the product.
*/
private double flatRateDiscount = 0;
/**
* The UPC for this product.
*/
private UPC myUPC;
/**
* The price for a box of the product.
*/
private double myPrice;
/**
* The estimated weight for a box of the product.
*/
private double myWeight;
/**
* A text description of the product.
*/
private String myDescription;
/**
* The amount of tax incurred on this product.
*/
private double taxRate;
/**
* The discount taken off the product's total price.
*/
private double discount;
/**
* This constructor stores all relevant details of the product, which can be
* retrieved using accessor methods.
*
* @param descrip
* A text description of the product.
* @param UPCcode
* A unique 12-digit UPC code for the product.
* @param productCost
* The cost of the product.
* @param productWeight
* The estimated weight of the product.
* @throws SaleDiscountException
*/
public PackagedProduct(String descrip, UPC UPCcode, double productCost,
double productWeight) throws SaleDiscountException {
this(descrip, UPCcode, productCost, productWeight, 0, 0);
}
/**
* This constructor stores all relevant details of the product, which can be
* retrieved using accessor methods.
*
* @param descrip
* A text description of the product.
* @param UPCcode
* A unique 12-digit UPC code for the product.
* @param productCost
* The cost of the product.
* @param productWeight
* The estimated weight of the product.
* @param taxRate
* The amount of tax incurred on this product.
* @throws SaleDiscountException
*/
public PackagedProduct(String descrip, UPC UPCcode, double productCost,
double productWeight, double taxRate) throws SaleDiscountException {
this(descrip, UPCcode, productCost, productWeight, taxRate, 0);
}
/**
* This constructor stores all relevant details of the product, which can be
* retrieved using accessor methods.
*
* @param descrip
* A text description of the product.
* @param UPCcode
* A unique 12-digit UPC code for the product.
* @param productCost
* The cost of the product.
* @param productWeight
* The estimated weight of the product.
* @param taxRate
* The amount of tax incurred on this product.
* @param discount
* The total discount/sale on the product
* @throws SaleDiscountException
*/
public PackagedProduct(String descrip, UPC UPCcode, double productCost,
double productWeight, double taxRate, double discount)
throws SaleDiscountException {
this.taxRate = taxRate;
myDescription = descrip;
myUPC = UPCcode;
myPrice = productCost;
myWeight = productWeight;
this.discount = discount;
validateDiscount();
}
/**
* Validate the discount. Invalid discounts include negative discounts or
* discounts that are too large
*
* @throws SaleDiscountException
* The type of discount error.
*/
private void validateDiscount() throws SaleDiscountException {
if (this.discount > myPrice) {
throw new SaleDiscountTooLargeException();
}
if (this.discount < 0) {
throw new SaleDiscountNegativeException();
}
}
/**
* An accessor method which returns the unique UPC of the product.
*/
public UPC getUPC() {
return myUPC;
}
/**
* An accessor method which returns the unique Code (UPC) of the product.
*/
public Code getCode() {
return getUPC();
}
/**
* An accessor method which returns the price of the product.
*/
public double getPrice() {
return myPrice;
}
/**
* An accessor method which returns the weight of the product.
*/
public double getWeight() {
return myWeight;
}
/**
* An accessor method which returns the text description of the product.
*/
public String getDescription() {
return myDescription;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#isTaxed()
*/
public boolean isTaxed() {
return taxRate > 0;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#getTaxedRate()
*/
public double getTaxRate() {
return taxRate;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#isOnSale()
*/
public boolean isOnSale() {
return discount > 0;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#getDiscount()
*/
public double getDiscount() {
return discount;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#getFlatRateDiscount()
*/
public double getFlatRateDiscount() {
return this.flatRateDiscount;
}
/*
* (non-Javadoc)
*
* @see ca.utoronto.csc301.SelfCheckOut.App.ProductInfo#setFlatRateDiscount()
*/
public void setFlatRateDiscount(double flatRateDiscount){
this.flatRateDiscount = flatRateDiscount;
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/SelfCheckOut.java
/*
* Creator: <NAME>
*
* Created on May 10, 2006
* Updated on January 17, 2008, September 12, 2012
*
* The SelfCheckOut class contains functions that can be called by the real user interface
* of the self checkout systems.
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import java.sql.SQLException;
import java.util.*;
import ca.utoronto.csc301.SelfCheckOut.Devices.BaggingArea;
import ca.utoronto.csc301.SelfCheckOut.Devices.BaggingAreaEvent;
import ca.utoronto.csc301.SelfCheckOut.Devices.BaggingAreaListener;
import ca.utoronto.csc301.SelfCheckOut.Devices.PaymentCollector;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.AddWhileBaggingException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.AddWhilePayingException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.IncorrectStateException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidProductException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidWeightException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountException;
import ca.utoronto.csc301.SelfCheckOut.Gui.Actions;
/**
* The SelfCheckOut class contains the business logic of the sales point, and
* keeps track of the state of the current customer's checkout. The class
* contains methods to handle adding items to the customer's cart, accepting
* payment, and receiving events from the BaggingArea.
*
*/
public class SelfCheckOut implements BaggingAreaListener {
/**
* This enumeration represents the four states of the SelfCheckOut system:<br>
* <code>READY</code> means the system is awaiting a new customer<br>
* <code>ADDING</code> means the system is prepared for another item to be
* added<br>
* <code>BAGGING</code> means the system is awaiting notification that the
* item has been placed in the bagging area<br>
* <code>DONEADDING</code> means the system is waiting for the customer to
* pay for their items.<br>
* Attempts to add items while <code>BAGGING</code> or
* <code>DONEADDING</code> result in errors.
*
* @author <NAME>
*
*/
public enum checkOutState {
READY, ADDING, BAGGING, DONEADDING
}; // different states of the system
/**
* The cart containing items the customer has scanned.
*/
private CheckOutCart checkOutCart;
/**
* The associated BaggingArea, which will notify SelfCheckOut when it
* detects a weight change.
*/
public BaggingArea baggingArea;
/**
* An object representing the credit card or cash accepting device.
*/
private PaymentCollector paymentCollector;
/**
* The database of products in the store.
*/
private Database DB;
/**
* The current state of the system.
*/
private checkOutState transactionState;
/**
* unique ID used to differentiate self checkout stands
*/
private int selfCheckOutID;
/**
* The receipt of the check out cart.
*/
private String receipt;
/**
* The argument-less constructor makes the necessary utility classes and
* passes them to the argumented constructor.
*
* @throws Exception
*/
public SelfCheckOut() throws Exception {
this(new BaggingArea(), new PaymentCollector(), new Database());
}
/**
* This is the chief constructor. It records the provided BaggingArea,
* PaymentCollector and ProductDB, and attaches itself to the BaggingArea so
* that it receives notifications of BaggingAreaEvents.
*
* @param bagging
* @param payment
* @param db
* @throws Exception
*/
public SelfCheckOut(BaggingArea bagging, PaymentCollector payment,
Database db) throws Exception {
checkOutCart = new CheckOutCart();
baggingArea = bagging;
baggingArea.attach(this);
paymentCollector = payment;
DB = db;
transactionState = checkOutState.READY;
}
/**
* Secondary constructor. When ID is given, it is assigned to selfcheckout.
* When ID is not given, defaults to 0. ID represents which selfcheckout a
* customer is currently using.
*
* @param bagging
* @param payment
* @param db
* @param id
* @throws Exception
*/
public SelfCheckOut(BaggingArea bagging, PaymentCollector payment,
Database db, int ID) throws Exception {
checkOutCart = new CheckOutCart();
baggingArea = bagging;
baggingArea.attach(this);
paymentCollector = payment;
DB = db;
transactionState = checkOutState.READY;
selfCheckOutID = ID;
}
/**
* This version of addItem() accepts a UPC code and adds the corresponding
* PackagedProduct to the customer's cart. It looks the code up in the DB.
*
* @param upcCode
* The UPC of the scanned item.
* @return The GroceryItem which is also added to the CheckOutCart.
* @throws IncorrectStateException
* Thrown when addItem() is called during Bagging or once
* payment is initiated.
* @throws InvalidProductException
* Thrown when a product corresponding to the UPC is not found.
* @throws InvalidWeightException
*/
public GroceryItem addItem(UPC upcCode) throws IncorrectStateException,
InvalidProductException, InvalidWeightException,
SaleDiscountException {
/*
* if weight change is not ok, or transactionState is BAGGING or
* DONEADDING, don't allow customer to add!
*/
if (transactionState == checkOutState.BAGGING) {
// user should place the previous item in the bagging area first.
throw new AddWhileBaggingException();
} else if (transactionState == checkOutState.DONEADDING) {
// user has chosen to pay, and cannot add more items
throw new AddWhilePayingException();
} else {
// returns a ProductInfo object
ProductInfo info = DB.lookUpItem(upcCode);
if (info == null) {
throw new InvalidProductException();
} else {
// create a new GroceryItem object
GroceryItem newItem = new GroceryItem(info,
((PackagedProduct) info).getPrice(),
((PackagedProduct) info).getWeight());
// add the new GroceryItem object to vector
checkOutCart.addItemToCart(newItem);
transactionState = checkOutState.BAGGING;
return newItem;
}
}
}
// This function will be called to add a BulkProduct object to checkOutCart.
// It accepts an BIC code
// object since this is what the user interface will pass. This function
// will use it to find the product
// information, create a GroceryItem object to add to the cart.
/**
* This version of addItem() accepts a BIC and weight, and adds the
* corresponding BulkProduct to the customer's cart. It looks the code up in
* the DB.
*
* @param bicCode
* The BIC of the scanned item.
* @param weight
* The amount of the BulkProduct being purchased.
* @return The GroceryItem which is also added to the CheckOutCart.
* @throws IncorrectStateException
* Thrown when addItem() is called during Bagging or once
* payment is initiated.
* @throws InvalidProductException
* Thrown when a product corresponding to the BIC is not found.
* @throws InvalidWeightException
*/
public GroceryItem addItem(BIC bicCode, double weight)
throws IncorrectStateException, InvalidProductException,
InvalidWeightException, SaleDiscountException {
/*
* if weight change is not ok, or transactionState is BAGGING or
* DONEADDING, don't allow customer to add!
*/
if (transactionState == checkOutState.BAGGING) {
// user should place the previous item in the bagging area first.
throw new AddWhileBaggingException();
} else if (transactionState == checkOutState.DONEADDING) {
// user has chosen to pay, and cannot add more items
throw new AddWhilePayingException();
} else {
// returns a ProductInfo object
ProductInfo info = DB.lookUpItem(bicCode);
if (info == null) {
throw new InvalidProductException();
} else {
// create a new GroceryItem object
GroceryItem newItem = new GroceryItem(info,
((BulkProduct) info).getPrice() * weight, weight);
// add the new GroceryItem object to the cart
checkOutCart.addItemToCart(newItem);
transactionState = checkOutState.BAGGING;
return newItem;
}
}
}
// Calls relevant function in CheckOutCart to return the Enumeration
// containing items in the CheckOutcart
/**
* This method retrieves an enumeration of all the items currently in the
* cart and returns it.
*/
public Enumeration<GroceryItem> listItemsInCart() {
return checkOutCart.listItems();
}
/**
* This method returns the current cost total of all items in the cart.
*/
public double getTotalCost() {
return checkOutCart.getTotalCost();
}
/**
* This method returns the total amount of tax for all items in the cart.
*/
public double getTotalTax() {
return checkOutCart.getTotalTax();
}
/**
* Return the total amount of money saved from items on sale in the current
* check out cart.
*
* @return The total amount of money saved from items on sale in the current
* check out cart.
*/
public double getTotalDiscount() {
return checkOutCart.getTotalDiscount();
}
/**
* Return the self checkout ID.
*
* @return the self checkout ID.
*/
public int getSCOid() {
return selfCheckOutID;
}
/**
* Return the total cost of all items in the cart with out tax.
*
* @return The total cost of all items in the cart with out tax.
*/
public double getSubTotal() {
return checkOutCart.getSubTotal();
}
/**
* When the bagging area detects a change in total weight, this function is
* called to change the state of the system (transactionState). Normally we
* would do a weight check here to ascertain if the predicted and actual
* bagging area weights match. Since that functionality is not implemented
* in this example, we simply change state to ADDING
*
* @param be
* The attached BaggingArea which is sending the event.
* @param event
* The BaggingAreaEvent, which includes the total weight and most
* recent change in the bagging area.
*/
public void notifyBaggingAreaEvent(BaggingArea be, BaggingAreaEvent event) {
transactionState = checkOutState.ADDING;
}
/**
* This function handles the payment aspect of the self checkout machine. It
* will not reset the transactionState of the machine even after customer
* finished paying so that the customer will have time to look at the
* receipt.
*
* @param logTax
* Log the tax or not.
* @return the cart corresponding to the just-completed transaction or null
* if customer didn't pay in full yet.
*/
public CheckOutCart payForGroceries(boolean logTax, String cardID,
double payAmount, int payType) throws Exception {
// make a copy of the old cart
CheckOutCart oldCheckOutCart = checkOutCart;
if (transactionState == checkOutState.BAGGING) {
// user should place the previous item in the bagging area first.
throw new AddWhileBaggingException();
}
transactionState = checkOutState.DONEADDING;
paymentCollector.setTotalCost(getTotalCost());
paymentCollector.collect(cardID, payAmount, payType);
// check if customer has fully paid for grocery items
if (paymentCollector.isFinishedPaying()) {
// here is where we would record the transaction into our store's
// inventory
// and financial records, if we were simulating that part of the
// system.
if (logTax) {
DB.logTax(checkOutCart.getTotalTax());
}
// check all items being bought to see if they are impulse buy
// products
try {
DB.updateImpulseProducts(oldCheckOutCart, getSCOid());
} catch (SQLException e) {
}
setReceipt();
checkOutCart = new CheckOutCart();
transactionState = checkOutState.READY;
}
return oldCheckOutCart;
}
/**
* Accessor method for the cart's receipt.
*
* @return The Receipt.
*/
public String getReceipt() {
return receipt;
}
/**
* Set the receipt for the check out cart.
*/
private void setReceipt() {
String receipt = "========== Receipt ==========";
receipt += "\nShopping cart "
+ (new Actions()).printShoppingCart(listItemsInCart());
receipt += "\n\nSub-Total $" + getSubTotal();
receipt += "\nTax $" + getTotalTax();
receipt += "\nDiscount $" + getTotalDiscount();
receipt += "\nTotal Cost $" + getTotalCost();
receipt += "\nTotal Paid $" + this.getPaymentCollector().getTotalPaid();
receipt += "\nChange $" + getPaymentCollector().getChange();
this.receipt = receipt;
}
/**
* Handling payment is not part of this assignment. This function just
* returns the cart indicating the payment was received. It also clears the
* shopping cart and resets the system state. If we implemented this part of
* the system, we would throw an exception to indicate a failed transaction
* rather than returning null.
*
* @return the cart corresponding to the just-completed transaction.
* @throws Exception
*/
public CheckOutCart payForGroceries() throws Exception {
// Default do not log tax (so payForGroceries can be used in testing)
return payForGroceries(false, "", 0.0, 1);
}
/**
* An accessor method which returns the BaggingArea associated with this
* SelfCheckout. Useful if the application wants to also receive bagging
* events, for example.
*/
public BaggingArea getBaggingArea() {
return baggingArea;
}
/**
* An accessor method which returns the ProductDB associated with this
* SelfCheckOut. Useful if the application wants to add items to the
* database or to look up items.
*/
public Database getProductDB() {
return DB;
}
/**
* An accessor method which returns the PaymentCollector associated with
* this SelfCheckOut. Useful if... useful if.... All right, not particularly
* useful.
*/
public PaymentCollector getPaymentCollector() {
return paymentCollector;
}
/**
* A utility method for resetting the SelfCheckOut to the initial state with
* an empty cart. Useful when testing.
*/
public void resetAll() {
// replace the old cart with a new one
checkOutCart = new CheckOutCart();
// reset our state to waiting for a customer.
transactionState = checkOutState.READY;
}
}
<file_sep>/Database/insertDB.sql
--This script adds the default items into the database.
--Many of these items are used for testing purposes and are fake data.
insert into ProductCategory values ('veggie', 0.00);
insert into ProductCategory values ('fruit', 0.10);
insert into ProductCategory values ('candy', 0.08);
insert into ProductCategory values ('utensil', 0.05);
insert into ProductCategory values ('chocolate', 0.15);
insert into ProductCategory values ('magazine', 0.13);
insert into ProductCategory values ('prepared food', 0.15);
insert into BulkProduct values ('11111', 'Banana', 0.69, 'veggie', 0.49);
insert into BulkProduct values ('22222', 'Orange', 0.99, 'fruit', 0.69);
insert into BulkProduct values ('33333', 'Spinach', 0.99, 'veggie', 0.55);
insert into BulkProduct values ('44444', 'Fuji Apple', 2.79, 'fruit', 1.99);
insert into BulkProduct values ('55555', 'Kiwi', 1.29, 'fruit', 0.85);
insert into BulkProduct values ('66666', 'Jellybean', 0.05, 'candy', 0.02);
insert into BulkProduct values ('77777', 'Plastic fork', 0.25, 'utensil', 0.10);
insert into BulkProduct values('88888', 't-bone steak',9.99,'prepared food', 6.78); --sale
insert into BulkProduct values('99999', 'liver', 10, 'prepared food', 8.69); --sale
insert into BulkProduct values('12345', 'chicken', 4.50, 'prepared food', 4.00); --sale
insert into BulkProduct values('12121', 'fries', 10, 'prepared food', 8.69); --sale
insert into BulkProduct values('21212', 'stale fries', 2.00, 'prepared food', 1.00); --sale
insert into BulkProduct values('06660', 'fries', 10, 'prepared food', 8.69); --sale
insert into BulkProduct values('06661', 'stale fries', 2.00, 'prepared food', 1.00); --sale
insert into PackagedProduct values ('786936224306', 'Kellogg Cereal', 3.52, 1.35, 'prepared food', 3.0);
insert into PackagedProduct values ('717951000842', 'Coca Cola (12 pack)', 3.20, 4, 'prepared food', 3.0);
insert into PackagedProduct values ('024543213710', 'Ice Cream', 4.00, 2.2, 'prepared food', 3.23);
insert into PackagedProduct values ('085392132225', 'Oreo Cookies', 3.50, 0.8, 'prepared food', 2.0);
insert into PackagedProduct values ('737666003167', 'Sees Chocolate', 4.50, 1, 'chocolate', 4.0);
insert into PackagedProduct values ('780166035718', 'Wired', 3.33, 0.6, 'magazine', 1.25); --sale
insert into PackagedProduct values ('796030114977', 'Brownies', 6.00, 2, 'prepared food', 4.50); --sale
insert into PackagedProduct values ('712345678904', 'Bread', 10, 4, 'prepared food', 5.00); --sale
insert into PackagedProduct values ('086637677174', 'mystery box', 99.99, 2, 'prepared food', 10); --sale
insert into PackagedProduct values ('012345678905', 'ketchup', 2, 4, 'prepared food', 1.00); --sale
insert into ImpulseProducts values (1, 'Sees Chocolate', '737666003167', 1, 0.5, '2012-10-14');
insert into ImpulseProducts values (1, 'Wired', '780166035718', 2, 4.16, '2012-10-20');
insert into ImpulseProducts values (1, 'Jellybean', '66666', 5, 0.15, '2012-10-10');
insert into ImpulseProducts values (1, 'Plastic fork', '77777', 14, 2.1, '2012-10-20');
insert into ImpulseProducts values (2, 'Sees Chocolate', '737666003167', 27, 0.5, '2012-10-14');
insert into ImpulseProducts values (2, 'Wired', '780166035718', 2, 4.16, '2012-10-11');
insert into ImpulseProducts values (2, 'Jellybean', '66666', 3, 0.09, '2012-10-12');
insert into ImpulseProducts values (2, 'Plastic fork', '77777', 57, 8.55, '2012-10-13');
insert into ImpulseProducts values (3, 'Sees Chocolate', '737666003167', 4, 2.0, '2012-09-14');
insert into ImpulseProducts values (3, 'Wired', '780166035718', 53, 110.24, '2012-05-14');
insert into ImpulseProducts values (3, 'Jellybean', '66666', 53, 1.59, '2012-08-14');
insert into ImpulseProducts values (3, 'Plastic fork', '77777', 33, 4.95, '2012-09-28');
insert into Sale values(null,'88888','1000-01-01','9999-01-01',0.20,0);
insert into Sale values(null,'780166035718','1000-00-01','9999-01-01',0.20,0);
insert into Sale values(null,'796030114977','1000-00-01','9999-01-01',0.0, 2.00);
insert into Sale values(null,'99999','1000-00-01','9999-01-01',0.0, 2.00);
insert into Sale values(null,'12345','1000-00-01','9999-01-01',0.2, 2.00);
insert into Sale values(null,'712345678904','1000-00-01','9999-01-01',0.2, 2.00);
insert into Sale values(null,'12121','1000-00-01','9999-01-01',1.0, 0);
insert into Sale values(null,'21212','1000-00-01','9999-01-01', 0, 2.00);
insert into Sale values(null,'086637677174','1000-00-01','9999-01-01', 1.0 , 0);
insert into Sale values(null,'012345678905','1000-00-01','9999-01-01',0, 2.00);
insert into Sale values(null,'06660','1000-00-01','9999-01-01', 1.5 , 0);
insert into Sale values(null,'06661','1000-00-01','9999-01-01',0, -200.00);
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/CheckOutCart.java
/*
* Creator: <NAME>
*
* Created on May 10, 2006
* Updated on January 17, 2008, September 12, 2012
*
* The CheckOutCart class maintains the items added as well as the total weight (tracked by the system, NOT
* the bagging area) and cost of the items.
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import java.util.Vector;
import java.util.*;
/**
* The CheckOutCart class stores a Vector of GroceryItems which the customer has
* scanned. It represents the products which the customer has scanned and bagged
* so far in the transaction.
*
*/
public class CheckOutCart {
/**
* A Vector of GroceryItems.
*/
private Vector<GroceryItem> myItems; // stores the items added
/**
* The cost of the items in the cart
*/
private double totalCost;
/**
* The tax for all items in the cart that are taxable.
*/
private double totalTax;
/**
* The predicted weight of the items in the cart.
*/
private double totalWeight; // total weight of the items, tracked by the
// system. Can be used to compare with the
// weight in the bagging area.
/**
* The total amount of money saved from one or more products on sale.
*/
private double totalDiscount;
/**
* Creates a new CheckOutCart with an empty item list, 0 cost and 0 weight.
*/
public CheckOutCart() {
myItems = new Vector<GroceryItem>();
totalCost = 0;
totalWeight = 0;
totalTax = 0;
totalDiscount = 0;
}
/**
* Accessor method which returns the cost of the items in the cart.
*/
public double getTotalCost() {
return totalCost;
}
/**
* Accessor method which returns the predicted weight of the items in the
* cart.
*/
public double getTotalWeight() {
return totalWeight;
}
/**
* Add a single item to the cart, and add its weight and cost to the running
* totals.
*
* @param newItem
*/
public void addItemToCart(GroceryItem newItem) {
ProductInfo info = newItem.getInfo();
myItems.add(newItem);
double price = newItem.getPrice();
if (info.isOnSale()) {
if (info instanceof PackagedProduct) {
price -= info.getDiscount();
} else {
price -= info.getDiscount() * newItem.getWeight();
}
totalDiscount += (newItem.getPrice() - price);
}
totalCost += price;
if (info.isTaxed()) {
totalCost += price * info.getTaxRate();
totalTax += price * info.getTaxRate();
}
totalWeight = totalWeight + newItem.getWeight();
}
/**
* This method returns an enumeration of the GroceryItems in the cart. We
* don't return the Vector since we don't want external code to alter our
* cart.
*/
public Enumeration<GroceryItem> listItems() {
Enumeration<GroceryItem> enumItems = myItems.elements();
return enumItems;
}
/**
* Return the total tax incurred on all items in the cart.
*
* @return The total tax incurred on all items in the cart.
*/
public double getTotalTax() {
return totalTax;
}
/**
* Return the sub-total: the total cost of all items in the cart without
* tax.
*
* @return The total cost of all items in the cart without tax.
*/
public double getSubTotal() {
return totalCost - totalTax;
}
/**
* Return the total amount of money saved from items on sale in the check
* out cart.
*
* @return The total amount of money saved from items on sale.
*/
public double getTotalDiscount() {
return totalDiscount;
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/App/BIC.java
/*
* Creator: <NAME>
*
* Created on May 10, 2006
* Updated on January 17, 2008, September 12, 2012
*
* The BIC class maintains the BIC code. It verifies the BIC code upon creation of an object.
*/
package ca.utoronto.csc301.SelfCheckOut.App;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidBICException;
/**
* The BIC class represents a Bulk Item Code. This is an identifying code
* (implementing the Code interface) which would be affixed to a Bulk grocery
* item, such as produce or meat. These items are sold by weight, and generally
* do not carry a UPC. The BIC is a wrapper for a 5-digit String which holds the
* actual numeric code.
*
*/
public class BIC implements Code {
/**
* Contains the 5-digit numeric code which this object represents.
*/
private String myBulkItemCode;
/**
* This constructor checks that the code String is a valid code, and then
* stores it. In the case that the code string is invalid, an
* InvalidBICException is thrown.
*
* @param bulkItemCode
* A String containing a 5-digit bulk item code.
* @throws InvalidBICException
*/
public BIC(String bulkItemCode) throws InvalidBICException {
if (bulkItemCode == null) {
/*
* If we don't catch a null here, the following checkLength() call
* might throw a NullPointerException. Run-time exceptions are
* generally quite serious bugs. By throwing a custom exception, we
* know that the caller can handle any problems here.
*/
throw new InvalidBICException("BIC must not be null");
} else if (checkLength(bulkItemCode) == false) {
/*
* We also throw an exception for illegal string lengths. The
* message is largely for debugging purposes.
*/
throw (new InvalidBICException("BIC length must be 5"));
} else if (hasNonDigits(bulkItemCode) == true) {
throw (new InvalidBICException("BIC must not contain non-digits"));
} else {
myBulkItemCode = bulkItemCode;
}
}
/*
* (non-Javadoc)
*
* @see edu.uci.ics121.SelfCheckOut.App.Code#getCode()
*/
public String getCode() {
return myBulkItemCode;
}
/*
* (non-Javadoc)
*
* @see
* edu.uci.ics121.SelfCheckOut.App.Code#equals(edu.uci.ics121.SelfCheckOut
* .App.Code)
*/
public boolean equals(Code comparedCode) {
return (myBulkItemCode == comparedCode.getCode());
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#hashCode()
*/
public int hashCode() {
/*
* Return the corresponding integer value of the last character of the
* bulk item code. This is not actually being used as a hash in this
* case.
*/
return (myBulkItemCode.charAt(myBulkItemCode.length() - 1)) - 48;
}
/**
* Checks that the length of the provided string is exactly 5.
*
* @param code
* @return <code>true</code> if the string length is 5; <code>false</code>
* otherwise.
*/
private boolean checkLength(String code) {
if (code.length() == 5) {
return true;
} else {
return false;
}
}
/**
* Checks if the provided string contains non-digit characters.
*
* @param code
* @return <code>true</code> if the string string has non-digit characters;
* <code>false</code> otherwise.
*/
public boolean hasNonDigits(String code) {
// convert BIC String to chars
char[] charsOfBIC = code.toCharArray();
final int length = charsOfBIC.length;
for (int i = 0; i < length; i++) {
if (charsOfBIC[i] < 48 || charsOfBIC[i] > 57) {
return true;
}
}
return false;
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/IntegrationTests/PurchaseLargeNumberOfItems.java
package ca.utoronto.csc301.SelfCheckOut.IntegrationTests;
import static org.junit.Assert.*;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import ca.utoronto.csc301.SelfCheckOut.App.BIC;
import ca.utoronto.csc301.SelfCheckOut.App.GroceryItem;
import ca.utoronto.csc301.SelfCheckOut.App.Database;
import ca.utoronto.csc301.SelfCheckOut.App.SelfCheckOut;
import ca.utoronto.csc301.SelfCheckOut.App.UPC;
import ca.utoronto.csc301.SelfCheckOut.Devices.BaggingArea;
import ca.utoronto.csc301.SelfCheckOut.Devices.BaggingAreaEvent;
import ca.utoronto.csc301.SelfCheckOut.Devices.PaymentCollector;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.IncorrectStateException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidBICException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidProductException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidUPCException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidWeightException;
import ca.utoronto.csc301.SelfCheckOut.Exceptions.SaleDiscountException;
public class PurchaseLargeNumberOfItems {
static SelfCheckOut firstSCO;
static BaggingArea firstBA;
static Database firstPDB;
static PaymentCollector firstPC;
static double EPSILON = 1e-15;
static BIC firstBIC, secondBIC, thirdBIC, fourthBIC, fifthBIC;
static UPC firstUPC, secondUPC, thirdUPC, fourthUPC;
static double firstUPCWeight, secondUPCWeight, thirdUPCWeight,
fourthUPCWeight;
static double firstWeight, secondWeight, thirdWeight, fourthWeight,
fifthWeight;
static GroceryItem gi;
static double firstBulkTaxRate = 1, secondBulkTaxRate = 1,
thirdBulkTaxRate = 1, fourthBulkTaxRate = 1, fifthBulkTaxRate = 1;
static double firstPackagedTaxRate = 1, secondPackagedTaxRate = 1,
thirdPackagedTaxRate = 1, fourthPackagedTaxRate = 1;
@BeforeClass
public static void classSetUp() throws Exception {
// create a SelfCheckOut
firstBA = new BaggingArea();
firstPC = new PaymentCollector();
firstPDB = new Database("Database/TestSelfCheckOut.db");
firstSCO = new SelfCheckOut(firstBA, firstPC, firstPDB);
// create a packaged item
try {
firstUPC = new UPC("786936224306");
secondUPC = new UPC("717951000842");
thirdUPC = new UPC("024543213710");
fourthUPC = new UPC("085392132225");
} catch (InvalidUPCException e) {
fail("Invalid UPC");
}
// create a bulk item
try {
firstBIC = new BIC("11111");
secondBIC = new BIC("22222");
thirdBIC = new BIC("33333");
fourthBIC = new BIC("44444");
fifthBIC = new BIC("55555");
} catch (InvalidBICException e) {
fail("Invalid BIC");
}
firstBulkTaxRate = 1;
secondBulkTaxRate = 1;
thirdBulkTaxRate = 1;
fourthBulkTaxRate = 1;
fifthBulkTaxRate = 1;
firstBulkTaxRate += firstPDB.lookUpItem(firstBIC).getTaxRate();
secondBulkTaxRate += firstPDB.lookUpItem(secondBIC).getTaxRate();
thirdBulkTaxRate += firstPDB.lookUpItem(thirdBIC).getTaxRate();
fourthBulkTaxRate += firstPDB.lookUpItem(fourthBIC).getTaxRate();
fifthBulkTaxRate += firstPDB.lookUpItem(fifthBIC).getTaxRate();
firstPackagedTaxRate = 1;
secondPackagedTaxRate = 1;
thirdPackagedTaxRate = 1;
fourthPackagedTaxRate = 1;
firstPackagedTaxRate += firstPDB.lookUpItem(firstUPC).getTaxRate();
secondPackagedTaxRate += firstPDB.lookUpItem(secondUPC).getTaxRate();
thirdPackagedTaxRate += firstPDB.lookUpItem(thirdUPC).getTaxRate();
fourthPackagedTaxRate += firstPDB.lookUpItem(fourthUPC).getTaxRate();
firstWeight = 2.61;
secondWeight = 112.39;
thirdWeight = 225.21;
fourthWeight = 400.80;
fifthWeight = 45.50;
firstUPCWeight = 1.35;
secondUPCWeight = 4;
thirdUPCWeight = 2.2;
fourthUPCWeight = 0.8;
}
@AfterClass
public static void classTearDown() {
firstSCO = null;
firstBA = null;
firstPDB = null;
firstPC = null;
}
@Before
public void setUp() throws Exception {
// gi = firstSCO.addItem(firstBIC, firstWeight);
}
@After
public void tearDown() {
firstSCO.resetAll();
}
@Test
public void PurchasingLargeNumberOfproducts() throws InvalidUPCException {
double temp, currentWeight;
BaggingAreaEvent event1;
BaggingAreaEvent event2;
BaggingAreaEvent event3;
BaggingAreaEvent event4;
BaggingAreaEvent event5;
BaggingAreaEvent event6;
BaggingAreaEvent event7;
BaggingAreaEvent event8;
try {
firstSCO.addItem(firstBIC, firstWeight);
event1 = new BaggingAreaEvent(0, firstWeight);
firstSCO.notifyBaggingAreaEvent(firstBA, event1);
firstSCO.addItem(secondBIC, secondWeight);
assertEquals(firstSCO.getTotalCost(),
1.8009 + (111.2661 * secondBulkTaxRate), 0.1);
currentWeight = firstWeight + secondWeight;
event2 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- secondWeight));
firstSCO.notifyBaggingAreaEvent(firstBA, event2);
firstSCO.addItem(thirdBIC, thirdWeight);
assertEquals(firstSCO.getTotalCost(), (222.9579 * thirdBulkTaxRate)
+ 1.8009 + (111.2661 * secondBulkTaxRate), 0.1);
temp = (222.9579 * thirdBulkTaxRate) + 1.8009
+ (111.2661 * secondBulkTaxRate);
currentWeight += thirdWeight;
event3 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- thirdWeight));
firstSCO.notifyBaggingAreaEvent(firstBA, event3);
firstSCO.addItem(fourthBIC, fourthWeight);
assertEquals(firstSCO.getTotalCost(),
(1118.232 * fourthBulkTaxRate) + temp, 0.1);
temp = (1118.232 * fourthBulkTaxRate) + temp;
currentWeight += fourthWeight;
event4 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- fourthWeight));
firstSCO.notifyBaggingAreaEvent(firstBA, event4);
firstSCO.addItem(fifthBIC, fifthWeight);
assertEquals(firstSCO.getTotalCost(), (58.695 * fifthBulkTaxRate)
+ temp, 0.1);
temp = (58.695 * fifthBulkTaxRate) + temp;
currentWeight += fifthWeight;
// since we do not have very large database we will add same product
// number of times
event5 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- fifthWeight));
for (int i = 0; i < 100; i++) {
firstSCO.notifyBaggingAreaEvent(firstBA, event5);
firstSCO.addItem(firstUPC);
}
assertEquals(firstSCO.getTotalCost(), temp
+ (352 * firstPackagedTaxRate), 0.1);
temp = temp + (352 * firstPackagedTaxRate);
currentWeight += (firstUPCWeight * 100);
event6 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- (firstUPCWeight * 100)));
for (int i = 0; i < 200; i++) {
firstSCO.notifyBaggingAreaEvent(firstBA, event6);
firstSCO.addItem(secondUPC);
}
assertEquals(firstSCO.getTotalCost(), temp
+ (640 * secondPackagedTaxRate), 0.1);
temp = temp + (640 * secondPackagedTaxRate);
currentWeight += (secondUPCWeight * 200);
event7 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- (secondUPCWeight * 200)));
for (int i = 0; i < 500; i++) {
firstSCO.notifyBaggingAreaEvent(firstBA, event7);
firstSCO.addItem(thirdUPC);
}
assertEquals(firstSCO.getTotalCost(), temp
+ (2000 * thirdPackagedTaxRate), 0.1);
temp = temp + (2000 * thirdPackagedTaxRate);
currentWeight += (thirdUPCWeight * 500);
event8 = new BaggingAreaEvent(currentWeight, Math.abs(currentWeight
- (thirdUPCWeight * 500)));
for (int i = 0; i < 500; i++) {
firstSCO.notifyBaggingAreaEvent(firstBA, event8);
firstSCO.addItem(fourthUPC);
}
assertEquals(firstSCO.getTotalCost(), temp
+ (1750 * fourthPackagedTaxRate), 0.1);
temp = temp + (1750 * fourthPackagedTaxRate);
currentWeight += (fourthUPCWeight * 500);
assertEquals(firstSCO.getSubTotal(), 6254.9518, 0.01); // check
// sub-total
// [price
// without
// tax]
assertEquals(firstSCO.getTotalTax(), 840.1193, 0.001); // check
// total tax
assertEquals(firstSCO.getTotalCost(), temp, 0.01); // check total
// price
} catch (IncorrectStateException e) {
fail("Incorrect State");
} catch (InvalidProductException e) {
fail("Invalid Product");
} catch (InvalidWeightException e) {
fail("Invalid Weight");
} catch (SaleDiscountException e) {
fail("Invalid Sale");
}
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Gui/StarterGUI.java
/**
* The GUI of the main menu.
*
* @author <NAME>
*
*/
package ca.utoronto.csc301.SelfCheckOut.Gui;
import java.awt.BorderLayout;
import java.awt.Font;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import java.awt.Insets;
import java.awt.Toolkit;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.AbstractButton;
import javax.swing.JButton;
import javax.swing.JFrame;
import javax.swing.JPanel;
public class StarterGUI extends JFrame implements ActionListener {
/**
* Class serial version
*/
private static final long serialVersionUID = 1L;
/**
* Button for customer action
*/
protected JButton customerButton;
/**
* Button for employee action
*/
protected JButton employeeButton;
/**
* Button for administrator action
*/
protected JButton adminButton;
/**
* Path to the home icon
*/
protected static final String HomeIconPath = "pic/home.png";
/**
* Path to the shopping cart icon
*/
protected static final String ScoIconPath = "pic/shopping_cart.png";
/**
* Text that will be displayed on top of the window.
*/
protected static final String windowTitle = "Main menu";
/**
* Button tool-tips
*/
protected static final String customerTooltip = "Click this button to start the SelfCheckOut application";
protected static final String employeeTooltip = "Click this button to start the login application for employees";
protected static final String adminTooltip = "Click this button to start the login application for administrators";
/**
* The constructor of StarterGUI. Instantiates all the elements within.
*/
public StarterGUI() {
super(windowTitle);
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
add(addPanel());
// Display the window
setLocationRelativeTo(null); // center frame
setIconImage(Toolkit.getDefaultToolkit().getImage(HomeIconPath)); // add custom icon
pack();
setSize(500,200);
setVisible(true);
}
/**
* Add all the components of the panel.
*/
private JPanel addPanel() {
instantiateButtons();
JPanel mainPane = new JPanel(new BorderLayout());
GridBagLayout gridbag = new GridBagLayout();
GridBagConstraints c = new GridBagConstraints();
mainPane.setLayout(gridbag);
c.anchor = GridBagConstraints.EAST;
c.insets = new Insets(10, 10, 10, 10);
c.weightx = GridBagConstraints.NONE;
c.gridx = 0;
c.gridy = 1;
mainPane.add(customerButton, c);
c.gridx = 1;
c.gridy = 1;
mainPane.add(employeeButton, c);
c.gridx = 2;
c.gridy = 1;
mainPane.add(adminButton, c);
return mainPane;
}
/*
* Helper that instantiates the buttons.
*/
private void instantiateButtons() {
// Customer Button
customerButton = new JButton("Customer");
customerButton.setVerticalTextPosition(AbstractButton.BOTTOM);
customerButton.setHorizontalTextPosition(AbstractButton.CENTER);
customerButton.setToolTipText(customerTooltip);
customerButton.setActionCommand("customer");
customerButton.addActionListener(this);
// Employee Button
employeeButton = new JButton("Employee");
employeeButton.setVerticalTextPosition(AbstractButton.BOTTOM);
employeeButton.setHorizontalTextPosition(AbstractButton.CENTER);
employeeButton.setToolTipText(employeeTooltip);
employeeButton.setActionCommand("employee");
employeeButton.addActionListener(this);
// Administrator Button
adminButton = new JButton("Administrator");
adminButton.setVerticalTextPosition(AbstractButton.BOTTOM);
adminButton.setHorizontalTextPosition(AbstractButton.CENTER);
adminButton.setToolTipText(adminTooltip);
adminButton.setActionCommand("administrator");
adminButton.addActionListener(this);
Font font = new Font("Helvitica", Font.PLAIN, 18);
customerButton.setFont(font);
employeeButton.setFont(font);
adminButton.setFont(font);
}
/**
* Define the behavior of the buttons.
*/
@Override
public void actionPerformed(ActionEvent e) {
// TODO Auto-generated method stub
try {
// Customer action
if ("customer".equals(e.getActionCommand())) {
this.dispose(); // dispose of this window
instantiateScoGUI(); // create new Self Check Out window
}
// Employee action
else if ("employee".equals(e.getActionCommand())) {
this.dispose(); // dispose of this window
instantiateEmployeeLogin(); // create new Login window
}
// Administrator action
else if ("administrator".equals(e.getActionCommand())) {
this.dispose(); // dispose of this window
instantiateAdminLogin(); // create new Login window
}
} catch (Exception ex) {
}
}
/**
* Create the SelfCheckOut GUI and show it.
*/
private void instantiateScoGUI() {
// Create and set up the window
new SelfCheckOutGUI();
}
/*
* Create the login GUI for administrator.
*/
private void instantiateAdminLogin() {
// Create and set up the window
JFrame frame = new JFrame("Self Check Out");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Add content to the window
frame.add(new Adminlogin());
// Display the window
frame.pack();
frame.setVisible(true);
}
/*
* Create the login GUI for Employees.
*/
private void instantiateEmployeeLogin() {
// Create and set up the window
JFrame frame = new JFrame("Self Check Out");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Add content to the window
frame.add(new EmployeeLogin());
// Display the window
frame.pack();
frame.setVisible(true);
}
/**
* Show the Graphical User Interface for the main menu application
*/
public static void main(String[] args) {
new StarterGUI();
}
}
<file_sep>/src/ca/utoronto/csc301/SelfCheckOut/Devices/PaymentCollectorTest.java
package ca.utoronto.csc301.SelfCheckOut.Devices;
import static org.junit.Assert.*;
import org.junit.Before;
//import org.junit.BeforeClass;
import org.junit.Test;
import ca.utoronto.csc301.SelfCheckOut.App.SelfCheckOut;
//import ca.utoronto.csc301.SelfCheckOut.Exceptions.InvalidCardException;
//import ca.utoronto.csc301.SelfCheckOut.Exceptions.NegativePayAmountException;
//import ca.utoronto.csc301.SelfCheckOut.Exceptions.UnrecognizedPayTypeException;
public class PaymentCollectorTest {
SelfCheckOut checkOut;
PaymentCollector payCol;
double EPSILON = 1e-15; // a very small number
@Before
public void setUp() throws Exception {
payCol = new PaymentCollector(false);
payCol.setTotalCost(6.0);
payCol.setTotalPaid(0.0);
checkOut = new SelfCheckOut();
}
/**
* Test for cash payment.
*
* @throws Exception
*/
@Test
public void testCollectCash() throws Exception {
checkOut.payForGroceries(false, "", 20.0, 1);
payCol.collect("", 20.0, 1);
assertEquals(payCol.getChange(), 14.0, EPSILON);
}
/**
* Test for debit payment.
*
* @throws Exception
*/
@Test
public void testCollectDebit() throws Exception {
checkOut.payForGroceries(false, "1234567890123456", 20.0, 2);
payCol.collect("1234567890123456", 20.0, 2);
equals(payCol.isFinishedPaying());
}
/**
* Test for Credit payment.
*
* @throws Exception
*/
@Test
public void testCollectCredit() throws Exception {
checkOut.payForGroceries(false, "1234567890123456", 20, 3);
payCol.collect("1234567890123456", 20, 3);
equals(payCol.isFinishedPaying());
}
/**
* Test for Gift card payment.
*
* @throws Exception
*/
@Test
public void testCollectGift() throws Exception {
checkOut.payForGroceries(false, "1234567890123456", 20, 4);
payCol.collect("1234567890123456", 20, 4);
equals(payCol.isFinishedPaying());
}
/**
* Test for when the exact amount on bill is paid.
*/
@Test
public void testMakePaymentExactAmount() {
assertEquals(payCol.makePayment(6.0, true), 0.0, EPSILON);
}
/**
* Test for when more than amount on bill is paid.
*/
@Test
public void testMakePaymentOverpay() {
assertEquals(payCol.makePayment(10.0, true), 4.0, EPSILON);
}
/**
* Test for when less than amount on bill is paid.
*/
@Test
public void testMakePaymentUnderpay() {
assertEquals(payCol.makePayment(4.0, true), 0.0, EPSILON);
}
/**
* Test setter function for totalCost.
*/
@Test
public void testSetTotalCost() {
payCol.setTotalCost(30);
assertEquals(payCol.getTotalCost(), 30, EPSILON);
}
/**
* Test for getter function for totalCost.
*/
@Test
public void testGetTotalCost() {
assertEquals(payCol.getTotalCost(), 6.0, EPSILON);
}
/**
* Test for setter function totalPaid.
*/
@Test
public void testSetTotalPaid() {
payCol.setTotalPaid(30);
assertEquals(payCol.getTotalPaid(), 30, EPSILON);
}
/**
* Test for getter function totalPaid.
*/
@Test
public void testGetTotalPaid() {
assertEquals(payCol.getTotalCost(), 6.0, EPSILON);
}
/**
* Test for setter function change.
*/
@Test
public void testSetChange() {
payCol.setChange(5.0);
assertEquals(payCol.getChange(), 5.0, EPSILON);
}
/**
* Test for getter function change.
*/
@Test
public void testsGetChange() {
payCol.setChange(5.0);
assertEquals(payCol.getChange(), 5.0, EPSILON);
}
}
| 78b5ca27b642697a9632b9c537b36958156b790c | [
"Java",
"Text",
"SQL"
] | 25 | Text | pnisarg/selfCheckOutSystem | df5064149193a9b38fc9b9902b59ac6ff219254d | cab4720c4d6e39f421011a8f910be6fad9a6231e |
refs/heads/master | <repo_name>Marlon-Santos/Desafio-Mattos-GFT-Pattern-Factory<file_sep>/src/br/com/gft/factoryInterface/Mensagem.java
package br.com.gft.factoryInterface;
public interface Mensagem {
public void enviar(String msg);
}
| 89730912cfebf7f2b187e5076b49ad2a632d654f | [
"Java"
] | 1 | Java | Marlon-Santos/Desafio-Mattos-GFT-Pattern-Factory | 10dc1687949d86bc9c18a8331f2e31c2b86065c8 | f9146a8279bb515fe0e2978ce82640f0389a71b2 |
refs/heads/master | <file_sep>#pipenv: /home/lager/.local/share/virtualenvs/mailgen-wnDkhgxz
import random, string, argparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def generated():
first_name_char = 5
first_name_type = string.ascii_letters
firstname = "".join(random.choice(first_name_type) for i in range(first_name_char))
firstname_result = firstname
last_name_char = 10
last_name_type = string.ascii_letters
lastname = "".join(random.choice(last_name_type) for i in range(last_name_char))
lastname_result = lastname
print("This is your username : ",firstname_result+'.'+lastname_result+'@<EMAIL>' "\nThis is your password : ",password_result)
driver = webdriver.Firefox(executable_path='/home/lager/PycharmProjects/Generators/mailgen/geckodriver')
driver.get('https://vfemail.net/regnorm/')
first_name = driver.find_element_by_id('first_name')
first_name.send_keys(firstname_result)
last_name = driver.find_element_by_id('last_name')
last_name.send_keys(lastname_result)
#user_name = driver.find_element_by_id('user_name')
#user_name.send_keys(username_result)
passw1 = driver.find_element_by_id('passwd_1')
passw1.send_keys(password_result)
passw2 = driver.find_element_by_id('passwd_2')
passw2.send_keys(password_result)
mail_coll = "This is your username: "+ firstname_result + '.' + lastname_result + '@vf<EMAIL>.net' + "\n" + "This is your password: "+ password_result
mail_doc = open('mail.txt','w')
mail_doc.write(str(mail_coll))
mail_doc.close()
#Description of the program
parser = argparse.ArgumentParser(prog='MailGen',
description='',
usage="Opens up a website with pre-filled info for creating dummy email accounts \
\n Type in username with format mail_gen.py -f FIRSTNAME -l LASTNAME \
\n Your credentials will appear in the mail.txt file \
\n Generate a username with -r or --random \
\n Username format is <EMAIL> ")
#Positional arguments
parser.add_argument("-f","--firstname",
help="select your own username",
action="store")
parser.add_argument("-l","--lastname",
help="select your lastname",
action="store")
#Optional argument for randomizing username
parser.add_argument("-r", "--random",
help="randomize username",
action="store_true")
args = parser.parse_args()
pass_min_char = 20
pass_allchar = string.ascii_letters + string.digits + string.punctuation
password = ''.join(random.choice(pass_allchar) for x in range(pass_min_char))
password_result = password
if args.random:
generated()
if args.firstname and args.lastname:
driver = webdriver.Firefox(executable_path='/home/lager/PycharmProjects/Generators/mailgen/geckodriver')
driver.get('https://vfemail.net/regnorm/')
first_name = driver.find_element_by_id('first_name')
first_name.send_keys(args.firstname)
last_name = driver.find_element_by_id('last_name')
last_name.send_keys(args.lastname)
passw1 = driver.find_element_by_id('passwd_1')
passw1.send_keys(password_result)
passw2 = driver.find_element_by_id('passwd_2')
passw2.send_keys(password_result)
mail_coll = "This is your username: " + args.firstname + '.' + args.lastname + '@vfemail.net' + "\n" + "This is your password : " + password_result
mail_doc = open('mail.txt','w')
mail_doc.write(str(mail_coll))
mail_doc.close()
<file_sep>Mailgen is a python script which opens up the website https://vfemail.net/regnorm/ for creating temporary or dummy email accounts.
The username can be typed in with the format mail_gen.py -f firstname and -l lastname flag, which will be combined for the adress.
To randomize the username, just run mail_gen.py -r.
The username and the password is stored in a txt file in the mailgen folder.
To install mailgen:
1. download folder
2. cd into folder
3. mkdir .venv
4.$ pipenv install
| f26ab2968c863082ebb25b5a0431d538fa6a1f6a | [
"Python",
"Text"
] | 2 | Python | LagerCode/EmailGenerator | 87aca39f04c0eef2eb3f7ad5b85a38018126d270 | 827b522edb6179999651f1ac9662df6d8e561e25 |
refs/heads/master | <file_sep>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.kwa.pvym;
import com.kwa.beanupvym.Controller.PenggunaJpaController;
import com.kwa.beanupvym.Controller.PesanJpaController;
import com.kwa.beanupvym.Pesan;
import com.kwa.beanupvym.PesanPK;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.logging.Level;
import javax.persistence.FlushModeType;
import org.apache.log4j.Logger;
import org.openymsg.network.FireEvent;
import org.openymsg.network.ServiceType;
import org.openymsg.network.Session;
import org.openymsg.network.YahooProtocol;
import org.openymsg.network.event.SessionAuthorizationEvent;
import org.openymsg.network.event.SessionEvent;
import org.openymsg.network.event.SessionListener;
/**
*
* @author ibung
*/
public class YMBatch implements SessionListener {
private Logger logger = Logger.getLogger(YMBatch.class);
private Session session = new Session();
private DateFormat dateFormat = new SimpleDateFormat("yyMMdd hhmmssSSS");
public static void main(String[] args) {
try {
YMBatch yb = new YMBatch(args[0], args[1]);
yb.initYM();
} catch (Exception ex) {
java.util.logging.Logger.getLogger(YMBatch.class.getName()).log(Level.SEVERE, null, ex);
}
}
public YMBatch(String id, String pwd) throws Exception {
session.login(id, pwd);
}
public void initYM() throws Exception {
session.addSessionListener(this);
//get admin
PenggunaJpaController user = new PenggunaJpaController(null,null);
String[] adminList = user.findAdminAktif();
while (true) {
PesanJpaController pesan = new PesanJpaController(user.getEmf(), user.getEm());
List<Pesan> result = pesan.getUndeliveredMesg();
if (result.isEmpty()) {
continue;
}
//pesan = new PesanJpaController(null);
// pesan = new PesanJpaController(null);
pesan.initTrx();
for (int i = 0; i < result.size(); i++) {
Pesan pes = result.get(i);
PesanPK ppk = pes.getPesanPK();
//user.setEm(pesan.getEm());
//PenggunaJpaController usr = new PenggunaJpaController(pesan.getEm());
//cek pengguna masih valid gak
//delete message if pengguna gak valid
if (!user.isPenggunaValid(ppk.getYmid())) {
pesan.destroy(ppk);
continue;
}
Date dt = dateFormat.parse(ppk.getTanggal() + " " + ppk.getWaktu());
Date date = new Date();
if (date.before(dt)) {
//gak perlu proses pesan ini
continue;
}
try {
session.sendMessage(ppk.getYmid(), pes.getPesan());
//if ym is not admin broadcast the message to admin
if (!user.isPenggunaAdmin(adminList, ppk.getYmid())) {
for (int j = 0; j < adminList.length; j++) {
session.sendMessage(adminList[j], ppk.getYmid() + " : " + pes.getPesan());
}
}
//pesan = new PesanJpaController(null);
pesan.destroy(ppk);
} catch (Exception e) {
e.printStackTrace();
}
}// end for i
pesan.commitTrx();
pesan.getEm().close();
}
//session.removeSessionListener(this);
}
//pbulic void
public void dispatch(FireEvent fe) {
ServiceType type = fe.getType();
SessionEvent sessionEvent = fe.getEvent();
//Session ses = fe.getEvent();
//TODO: if user not valid ignore
if(type == ServiceType.Y7_AUTHORIZATION){
//SessionAuthorizationEvent sae = (SessionAuthorizationEvent) fe.getEvent();
// Session ses = new Session();
//ses.acceptFriendAuthorization(null, YahooProtocol.YAHOO);
try{
PenggunaJpaController user = new PenggunaJpaController(null,null);
if(user.isPenggunaValid(sessionEvent.getFrom())){
session.acceptFriendAuthorization(sessionEvent.getFrom(), YahooProtocol.YAHOO);
}else{
//SessionAuthorizationEvent sae = (SessionAuthorizationEvent) session.;
session.rejectFriendAuthorization((SessionAuthorizationEvent) sessionEvent, sessionEvent.getFrom(), "Please contact admin!");
System.out.println(sessionEvent.getFrom() + "rejected!");
}
}catch(Exception e){
logger.error(e, e);
}
return;
}
if (type == ServiceType.MESSAGE) {
try {
PenggunaJpaController user = new PenggunaJpaController(null,null);
if (!user.isPenggunaValid(sessionEvent.getFrom())) {
System.out.println(sessionEvent.getFrom() + " : " + sessionEvent.getMessage());
user.getEm().close();
user.getEmf().close();
return;
}
//PesanJpaController meth = new PesanJpaController(null);
DateFormat dateFormat = new SimpleDateFormat("yyMMdd");
DateFormat timeFormat = new SimpleDateFormat("hhmmssSSS");
//get current date time with Date()
Date date = new Date();
//System.out.println(dateFormat.format(date));
PesanPK ppk = new PesanPK(sessionEvent.getFrom(), dateFormat.format(date), timeFormat.format(date));
Pesan pes = new Pesan(ppk, sessionEvent.getMessage(), 0, 1);
String[] adminList = user.findAdminAktif();
for (int j = 0; j < adminList.length; j++) {
if (adminList[j] == ppk.getYmid()) {
continue;
}
session.sendMessage(adminList[j], ppk.getYmid() + " : " + pes.getPesan());
}
PesanJpaController pesan = new PesanJpaController(user.getEmf(),user.getEm());
pesan.initTrx();
pesan.create(pes);
pesan.commitTrx();
pesan.getEm().close();
pesan.getEmf().close();
// log request message
logger.debug("message from " + sessionEvent.getFrom() + " \nmessage " + sessionEvent.getMessage());
//System.out.println("message from " + sessionEvent.getFrom() + " \nmessage " + sessionEvent.getMessage());
// give an automatic response
// session.sendMessage(sessionEvent.getFrom(), "hi, you are sending " + sessionEvent.getMessage());
} catch (Exception e) {
logger.error(e, e);
}
}
}
}
<file_sep>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.kwa.beanupvym;
import java.io.Serializable;
import javax.persistence.*;
import javax.xml.bind.annotation.XmlRootElement;
/**
*
* @author ibung
*/
@Entity
@Table(name = "pengguna", catalog = "PvYM", schema = "")
@XmlRootElement
@NamedQueries({
@NamedQuery(name = "Pengguna.findAll", query = "SELECT p FROM Pengguna p"),
@NamedQuery(name = "Pengguna.findByYmid", query = "SELECT p FROM Pengguna p WHERE p.ymid = :ymid"),
@NamedQuery(name = "Pengguna.findByNama", query = "SELECT p FROM Pengguna p WHERE p.nama = :nama"),
@NamedQuery(name = "Pengguna.findByTipe", query = "SELECT p FROM Pengguna p WHERE p.tipe = :tipe"),
@NamedQuery(name = "Pengguna.findByPlafon", query = "SELECT p FROM Pengguna p WHERE p.plafon = :plafon"),
@NamedQuery(name = "Pengguna.findPenggunaAktif", query = "SELECT p FROM Pengguna p WHERE p.ymid = :ymid and p.status = 1"),
@NamedQuery(name = "Pengguna.findAdminAktif", query = "SELECT p FROM Pengguna p WHERE p.tipe = 2 and p.status = 1"),
@NamedQuery(name = "Pengguna.findByStatus", query = "SELECT p FROM Pengguna p WHERE p.status = :status")})
public class Pengguna implements Serializable {
private static final long serialVersionUID = 1L;
@Id
@Basic(optional = false)
@Column(name = "ymid", nullable = false, length = 20)
private String ymid;
@Basic(optional = false)
@Column(name = "nama", nullable = false, length = 45)
private String nama;
@Basic(optional = false)
@Column(name = "tipe", nullable = false)
private int tipe;
@Basic(optional = false)
@Column(name = "plafon", nullable = false)
private double plafon;
@Basic(optional = false)
@Column(name = "status", nullable = false)
private int status;
public Pengguna() {
}
public Pengguna(String ymid) {
this.ymid = ymid;
}
public Pengguna(String ymid, String nama, int tipe, double plafon, int status) {
this.ymid = ymid;
this.nama = nama;
this.tipe = tipe;
this.plafon = plafon;
this.status = status;
}
public String getYmid() {
return ymid;
}
public void setYmid(String ymid) {
this.ymid = ymid;
}
public String getNama() {
return nama;
}
public void setNama(String nama) {
this.nama = nama;
}
public int getTipe() {
return tipe;
}
public void setTipe(int tipe) {
this.tipe = tipe;
}
public double getPlafon() {
return plafon;
}
public void setPlafon(double plafon) {
this.plafon = plafon;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
@Override
public int hashCode() {
int hash = 0;
hash += (ymid != null ? ymid.hashCode() : 0);
return hash;
}
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
if (!(object instanceof Pengguna)) {
return false;
}
Pengguna other = (Pengguna) object;
if ((this.ymid == null && other.ymid != null) || (this.ymid != null && !this.ymid.equals(other.ymid))) {
return false;
}
return true;
}
@Override
public String toString() {
return "com.kwa.beanupvym.Pengguna[ ymid=" + ymid + " ]";
}
}
<file_sep>CREATE DATABASE IF NOT EXISTS `pvym` /*!40100 DEFAULT CHARACTER SET utf8 */;
USE `pvym`;
-- MySQL dump 10.13 Distrib 5.5.16, for Win32 (x86)
--
-- Host: localhost Database: pvym
-- ------------------------------------------------------
-- Server version 5.5.19
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `saldo`
--
DROP TABLE IF EXISTS `saldo`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `saldo` (
`ymid` varchar(20) NOT NULL,
`jumlah` double NOT NULL,
PRIMARY KEY (`ymid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `saldo`
--
LOCK TABLES `saldo` WRITE;
/*!40000 ALTER TABLE `saldo` DISABLE KEYS */;
/*!40000 ALTER TABLE `saldo` ENABLE KEYS */;
UNLOCK TABLES;
--
-- Table structure for table `pengguna`
--
DROP TABLE IF EXISTS `pengguna`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `pengguna` (
`ymid` varchar(20) NOT NULL,
`nama` varchar(45) NOT NULL,
`tipe` int(1) NOT NULL COMMENT '0 - server, 1 - gateway, 2 - admin, 3 - user, 4 - dummy user',
`plafon` double NOT NULL,
`status` int(1) NOT NULL COMMENT '0 = inactive, 1 = active',
PRIMARY KEY (`ymid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `pengguna`
--
LOCK TABLES `pengguna` WRITE;
/*!40000 ALTER TABLE `pengguna` DISABLE KEYS */;
INSERT INTO `pengguna` VALUES ('arinegara','wendra',2,1000000,1);
/*!40000 ALTER TABLE `pengguna` ENABLE KEYS */;
UNLOCK TABLES;
--
-- Table structure for table `pesan`
--
DROP TABLE IF EXISTS `pesan`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `pesan` (
`ymid` varchar(20) NOT NULL,
`tanggal` varchar(6) NOT NULL COMMENT 'YYMMDD',
`waktu` varchar(9) NOT NULL COMMENT 'hhmmss',
`pesan` varchar(45) NOT NULL,
`tipe` int(1) NOT NULL COMMENT '0: incoming, 1: outgoing',
`status` int(1) NOT NULL COMMENT '0: processed(send/received), 1:to be processed',
PRIMARY KEY (`ymid`,`tanggal`,`waktu`),
UNIQUE KEY `unique` (`ymid`,`tanggal`,`waktu`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `pesan`
--
LOCK TABLES `pesan` WRITE;
/*!40000 ALTER TABLE `pesan` DISABLE KEYS */;
INSERT INTO `pesan` VALUES ('arinegara','120429','044659500','Pesen pulsa dong, boleh?',0,1);
/*!40000 ALTER TABLE `pesan` ENABLE KEYS */;
UNLOCK TABLES;
--
-- Table structure for table `produk`
--
DROP TABLE IF EXISTS `produk`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `produk` (
`kode` varchar(10) NOT NULL,
`harga` double NOT NULL,
`deskripsi` varchar(10) NOT NULL,
PRIMARY KEY (`kode`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `produk`
--
LOCK TABLES `produk` WRITE;
/*!40000 ALTER TABLE `produk` DISABLE KEYS */;
/*!40000 ALTER TABLE `produk` ENABLE KEYS */;
UNLOCK TABLES;
--
-- Table structure for table `transaksi`
--
DROP TABLE IF EXISTS `transaksi`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `transaksi` (
`ymid` varchar(20) NOT NULL,
`tipe` varchar(10) NOT NULL COMMENT 'product id (XL10, PLN10, ....)',
`nomor` varchar(15) NOT NULL,
`kounter` varchar(3) NOT NULL DEFAULT '',
`tanggal` varchar(6) NOT NULL COMMENT 'YYMMDD',
`waktu` varchar(6) NOT NULL,
`harga` double NOT NULL,
`saldo` double NOT NULL,
`status` int(11) NOT NULL COMMENT '0: gagal, 1:in progress, 2:OK',
PRIMARY KEY (`ymid`,`tipe`,`nomor`,`kounter`,`tanggal`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `transaksi`
--
LOCK TABLES `transaksi` WRITE;
/*!40000 ALTER TABLE `transaksi` DISABLE KEYS */;
/*!40000 ALTER TABLE `transaksi` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2012-05-09 6:27:59
<file_sep>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.kwa.beanupvym.Controller;
import com.kwa.beanupvym.Pengguna;
import com.kwa.core.GenericController;
import com.kwa.core.KWAMesg;
import com.kwa.core.Util;
import java.util.List;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Query;
import javax.persistence.criteria.CriteriaQuery;
import javax.persistence.criteria.Root;
/**
*
* @author ibung
*/
public class PenggunaJpaController extends GenericController {
public PenggunaJpaController(EntityManagerFactory emf, EntityManager em) throws Exception {
super(emf,em);
}
private boolean isTipeValid(int i) {
//if(i==null) return false;
if (i == 0 || i == 1 || i == 2 || i == 3 || i == 4) {
return true;
}
return false;
}
private boolean isStatusValid(int i) {
//if(i==null) return false;
if (i == 0 || i == 1) {
return true;
}
return false;
}
public boolean isPenggunaValid(String ymid){
Pengguna p = findPengguna(ymid);
if(p==null) return false;
if(p.getStatus()==0) return false;
return true;
}
public KWAMesg create(Pengguna pengguna) throws Exception {
checkConnection();
setError("unknown", "unknownError");
if (Util.isNullOrSpaces(pengguna.getYmid())) {
return setError("YmID", "Entity is null or spaces");
}
if (Util.isNullOrSpaces(pengguna.getNama())) {
return setError("Nama", "Entity is null or spaces");
}
if (!isTipeValid(pengguna.getTipe())) {
return setError("Tipe", "tipe invalid");
}
if (!isStatusValid(pengguna.getStatus())) {
return setError("Status", "Status invalid");
}
if (pengguna.getPlafon() <= 0) {
return setError("Plafon", "Plafon kurang dari 0");
}
getEm().persist(pengguna);
return setOK("Entry Created");
}
public KWAMesg edit(Pengguna pengguna) throws Exception {
checkConnection();
setError("unknown", "unknownError");
if (Util.isNullOrSpaces(pengguna.getYmid())) {
return setError("YmID", "Entity is null or spaces");
}
if (findPengguna(pengguna.getYmid()) == null) {
return setError("Primary Key", "Entry doesn't exist");
}
if (Util.isNullOrSpaces(pengguna.getNama())) {
return setError("Nama", "Entity is null or spaces");
}
if (!isTipeValid(pengguna.getTipe())) {
return setError("Tipe", "tipe invalid");
}
if (!isStatusValid(pengguna.getStatus())) {
return setError("Status", "Status invalid");
}
if (pengguna.getPlafon() <= 0) {
return setError("Plafon", "Plafon kurang dari 0");
}
getEm().merge(pengguna);
return setOK("Entry Modified");
}
public KWAMesg destroy(String id) throws Exception {
checkConnection();
setError("unknown", "unknownError");
if (Util.isNullOrSpaces(id)) {
return setError("YmID", "Entity is null or spaces");
}
if (findPengguna(id) == null) {
return setError("Primary Key", "Entry doesn't exist");
}
Pengguna pengguna = getEm().getReference(Pengguna.class, id);
getEm().remove(pengguna);
return setOK("Entry Deleted");
}
public String[] findAdminAktif(){
checkConnection();
Query q = getEm().createNamedQuery("Pengguna.findAdminAktif", Pengguna.class);
List<Pengguna> result = q.getResultList();
int nbPengguna = 0;
if(result!=null){
nbPengguna = result.size();
}
String[] listAdmin = new String[nbPengguna];
for(int i=0;i<result.size();i++){
listAdmin[i] = result.get(i).getYmid();
}
return listAdmin;
}
public boolean isPenggunaAdmin(String[] ymids, String ymid){
if(ymids == null) return false;
boolean ketemu = false;
int cntr = 0;
while(!ketemu && cntr<ymids.length){
if(ymids[cntr].trim().equalsIgnoreCase(ymid)){
ketemu = true;
}else{
cntr=cntr+1;
}
}
return ketemu;
}
public List<Pengguna> findPenggunaEntities() {
return findPenggunaEntities(true, -1, -1);
}
public List<Pengguna> findPenggunaEntities(int maxResults, int firstResult) {
return findPenggunaEntities(false, maxResults, firstResult);
}
private List<Pengguna> findPenggunaEntities(boolean all, int maxResults, int firstResult) {
checkConnection();
CriteriaQuery cq = getEm().getCriteriaBuilder().createQuery();
cq.select(cq.from(Pengguna.class));
Query q = getEm().createQuery(cq);
if (!all) {
q.setMaxResults(maxResults);
q.setFirstResult(firstResult);
}
return q.getResultList();
}
public Pengguna findPengguna(String id) {
checkConnection();
return getEm().find(Pengguna.class, id);
}
public int getPenggunaCount() {
checkConnection();
CriteriaQuery cq = getEm().getCriteriaBuilder().createQuery();
Root<Pengguna> rt = cq.from(Pengguna.class);
cq.select(getEm().getCriteriaBuilder().count(rt));
Query q = getEm().createQuery(cq);
return ((Long) q.getSingleResult()).intValue();
}
}
<file_sep>package com.kwa.beanupvym;
import javax.annotation.Generated;
import javax.persistence.metamodel.SingularAttribute;
import javax.persistence.metamodel.StaticMetamodel;
@Generated(value="EclipseLink-2.3.0.v20110604-r9504", date="2012-04-29T00:15:50")
@StaticMetamodel(Pengguna.class)
public class Pengguna_ {
public static volatile SingularAttribute<Pengguna, String> ymid;
public static volatile SingularAttribute<Pengguna, Integer> status;
public static volatile SingularAttribute<Pengguna, Double> plafon;
public static volatile SingularAttribute<Pengguna, Integer> tipe;
public static volatile SingularAttribute<Pengguna, String> nama;
}<file_sep>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.kwa.core;
import java.io.Serializable;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
/**
*
* @author ibung
*/
public class GenericController implements Serializable{
private EntityManagerFactory emf;
public EntityManagerFactory getEmf() {
return emf;
}
public void setEmf(EntityManagerFactory emf) {
this.emf = emf;
}
private EntityManager em;
private KWAMesg pmesg;
private static final String connName = "NBPvYMPU";
public KWAMesg getPmesg() {
return pmesg;
}
public void checkConnection(){
if(this.emf==null) this.emf = Persistence.createEntityManagerFactory(connName);
if(!this.emf.isOpen()) this.emf = Persistence.createEntityManagerFactory(connName);
if(this.em==null) this.em = this.emf.createEntityManager();
if(!this.em.isOpen()) this.em = this.emf.createEntityManager();
}
public void setPmesg(KWAMesg pmesg) {
this.pmesg = pmesg;
}
public EntityManager getEm() {
return em;
}
public void setEm(EntityManager em) {
this.em = em;
}
public GenericController(EntityManagerFactory emf, EntityManager em){
this.emf = emf;
this.em = em;
checkConnection();
}
public void initTrx(){
em.getTransaction().begin();
}
public void commitTrx(){
em.getTransaction().commit();
}
public KWAMesg setError(String fname, String mesg){
pmesg = new KWAMesg();
pmesg.setError(fname, mesg);
return pmesg;
}
public KWAMesg setOK(String mesg){
pmesg = new KWAMesg();
pmesg.setOK(mesg);
return pmesg;
}
}
<file_sep>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.kwa.core;
/**
*
* @author ibung
*/
public class KWAMesg {
private String mesg;
private boolean isError;
private String fieldname;
private boolean isOK;
private boolean isWarning;
public boolean isIsOK() {
return isOK;
}
public void setIsOK(boolean isOK) {
this.isOK = isOK;
}
public boolean isIsWarning() {
return isWarning;
}
public void setIsWarning(boolean isWarning) {
this.isWarning = isWarning;
}
public String getFieldname() {
return fieldname;
}
public void setFieldname(String fieldname) {
this.fieldname = fieldname;
}
public boolean isIsError() {
return isError;
}
public void setIsError(boolean iserror) {
this.isError = iserror;
}
public String getMesg() {
return mesg;
}
public void setMesg(String mesg) {
this.mesg = mesg;
}
public KWAMesg(){
mesg = "";
isError = false;
fieldname = "";
isOK = false;
isWarning = false;
}
public void setError(String fname, String mesg){
isError = true;
isOK = false;
isWarning = false;
this.mesg = mesg;
this.fieldname = fname;
}
public void setOK(String mesg){
isError = false;
isWarning = false;
isOK = true;
this.mesg = mesg;
this.fieldname = "";
}
}
<file_sep>PvYM
====<file_sep>/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.kwa.beanupvym;
import java.io.Serializable;
import javax.persistence.*;
import javax.xml.bind.annotation.XmlRootElement;
/**
*
* @author ibung
*/
@Entity
@Table(name = "pesan", catalog = "PvYM", schema = "", uniqueConstraints = {
@UniqueConstraint(columnNames = {"ymid", "tanggal", "waktu"})})
@XmlRootElement
@NamedQueries({
@NamedQuery(name = "Pesan.findAll", query = "SELECT p FROM Pesan p"),
@NamedQuery(name = "Pesan.findByYmid", query = "SELECT p FROM Pesan p WHERE p.pesanPK.ymid = :ymid"),
@NamedQuery(name = "Pesan.findByTanggal", query = "SELECT p FROM Pesan p WHERE p.pesanPK.tanggal = :tanggal"),
@NamedQuery(name = "Pesan.findByWaktu", query = "SELECT p FROM Pesan p WHERE p.pesanPK.waktu = :waktu"),
@NamedQuery(name = "Pesan.findByPesan", query = "SELECT p FROM Pesan p WHERE p.pesan = :pesan"),
@NamedQuery(name = "Pesan.findByTipe", query = "SELECT p FROM Pesan p WHERE p.tipe = :tipe"),
@NamedQuery(name = "Pesan.findByToBeDelivered", query = "SELECT p FROM Pesan p WHERE p.tipe = 1 and p.status = 1"),
@NamedQuery(name = "Pesan.findByStatus", query = "SELECT p FROM Pesan p WHERE p.status = :status")})
public class Pesan implements Serializable {
private static final long serialVersionUID = 1L;
@EmbeddedId
protected PesanPK pesanPK;
@Basic(optional = false)
@Column(name = "pesan", nullable = false, length = 45)
private String pesan;
@Basic(optional = false)
@Column(name = "tipe", nullable = false)
private int tipe;
@Basic(optional = false)
@Column(name = "status", nullable = false)
private int status;
public Pesan() {
}
public Pesan(PesanPK pesanPK) {
this.pesanPK = pesanPK;
}
public Pesan(PesanPK pesanPK, String pesan, int tipe, int status) {
this.pesanPK = pesanPK;
this.pesan = pesan;
this.tipe = tipe;
this.status = status;
}
public Pesan(String ymid, String tanggal, String waktu) {
this.pesanPK = new PesanPK(ymid, tanggal, waktu);
}
public PesanPK getPesanPK() {
return pesanPK;
}
public void setPesanPK(PesanPK pesanPK) {
this.pesanPK = pesanPK;
}
public String getPesan() {
return pesan;
}
public void setPesan(String pesan) {
this.pesan = pesan;
}
public int getTipe() {
return tipe;
}
public void setTipe(int tipe) {
this.tipe = tipe;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
@Override
public int hashCode() {
int hash = 0;
hash += (pesanPK != null ? pesanPK.hashCode() : 0);
return hash;
}
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
if (!(object instanceof Pesan)) {
return false;
}
Pesan other = (Pesan) object;
if ((this.pesanPK == null && other.pesanPK != null) || (this.pesanPK != null && !this.pesanPK.equals(other.pesanPK))) {
return false;
}
return true;
}
@Override
public String toString() {
return "entity.Pesan[ pesanPK=" + pesanPK + " ]";
}
}
| ea425f491891fc6386cf36bc7f401137323e08e2 | [
"Markdown",
"Java",
"SQL"
] | 9 | Java | kwaTIr/PvYM | 45548c98ce3694d58af88760c0078b71d1c6fa6f | 8049f44dfbfac1e23c757f3fadbfc2d82247c7dc |
refs/heads/master | <repo_name>mugabits/mongo-university<file_sep>/M101P/week3/lowest.py
import pymongo
from bson.son import SON
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.school
school = db.students
pipeline = {}
pipeline.update(
{"$unwind": "$scores"},
{"$match": {"scores.type":"homework"}},
{"$group": {"_id":"$_id","score": {"$min" : "$scores.score"}},
{"$sort": {"$scores.score":1}}
)
print list(school.aggregate(pipeline))
# In the Mongo Shell:
# db.students.aggregate(
# {'$unwind': '$scores'}
# ,{'$match': {'scores.type':'homework'}}
# ,{'$group': {'_id':'$_id'
# ,'score':'$scores.score'}}
# , {'$sort': {"scores.score":1 }}
# ,{'$limit': 1}
#
# for i in result_set:
# print i
# print len(result_set)
<file_sep>/M101P/week2/hw2-1/pipeline_and_aggregators.js
//in JavaScript
db.grades.aggregate([
{'$match':{'score':{'$gte':65}}},
{'$sort':{'score': 1}},
{'$project': {'student_id':true ,'score':true, '_id':false}},
{'$limit':1}])
<file_sep>/M101P/week2/hw2-1/remove_lowest_grade.py
import pymongo
import sys
from bson.son import SON
# connnecto to the db on standard port
connection = pymongo.MongoClient("mongodb://localhost")
#handles
db = connection.students
scores = db.grades
#find homework grades
def find_homework_grades():
#in JavaScript
# db.grades.aggregate([
# {'$match':{'score':{'$gte':65}}},
# {'$sort':{'score': 1}},
# {'$project': {'student_id':true ,'score':true, '_id':false}},
# {'$limit':1}])
scores
#removes homework grades
def remove_grade(grade_type):
try:
scores.aggregate({$group:{'_id':}})
result = scores.delete_many({'type':grade_type})
print "num removed: ", result.deleted_count
except Exception as e:
print "Exception: ", type(e), e
##def find_student_data():
##
## #handles
## db = connection.students
## scores = db.grades
##
#### print "Searching for student data for student with id = "
##
## try:
##
## docs = scores.find().sort( { 'score' : -1 } ).skip( 100 ).limit( 1 )
#### for doc in docs:
#### print doc
##
## except Exception as e:
## print "Exception: " , type(e), e
remove_grade("homework")
#find_student_data()
| 0675960b64b421a424f7d93edadeb78def184d9f | [
"JavaScript",
"Python"
] | 3 | Python | mugabits/mongo-university | 5b9354beb6814e8a5c92d2f194afecd7e7dee554 | daf545f2b9af0d76f50d6113f114800e95e34d78 |
refs/heads/master | <file_sep><!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
<title>Document</title>
</head>
<?php
$servername = "localhost";
$username = "admin";
$password = "<PASSWORD>";
$dbname = "pronostics";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
// Check connection
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
if(isset($_POST['nom'])) $nom=$_POST['nom'];
else $nom="";
if(isset($_POST['pseudo'])) $pseudo=$_POST['pseudo'];
else $pseudo="";
{
$sql = "INSERT INTO pronostics (prono,column_pseudo)
VALUES ('$nom','$pseudo')";
if ($conn->query($sql) === TRUE) {
} else {
echo "Error: " . $sql . "<br>" . $conn->error;
}
}
$conn->close();
?>
<header>
<img src="https://upload.wikimedia.org/wikipedia/fr/thumb/f/f2/Premier_League_Logo.svg/1200px-Premier_League_Logo.svg.png"/>
<br>
<p>
</p>
</header>
<body>
<div id="match" style="text-align:center">
</div>
</body>
<script>
let div = document.querySelector('#match');
let options2 = {
url: 'premierleague.json',
callback: function(reponse) {
let objet = JSON.parse(reponse);
for (let journée of objet.rounds) {
let h2 = document.createElement('h2');
let journee = '<h2 class="jumbotron text-center"> ' + journée.name + '</h2>';
h2.innerHTML = journee;
div.appendChild(h2);
for (let match_journée of journée.matches) {
let p = document.createElement('p');
let match = '<p> ' + match_journée.team1.name + ' contre ' + match_journée.team2.name + ' <p>';
p.innerHTML = match;
div.appendChild(p);
let article = document.createElement('article');
let result = '<article> <form method="POST" action="add.php">'+ '<input type="hidden" name="equipe1" value="'+match_journée.team1.name+'"/>'+'<input type="hidden" name="equipe2" value="'+match_journée.team2.name+'"/>' +'<input type="hidden" name="date" value="'+match_journée.date+'"/>' +'<input type="text" name="nom" size="20" value="1/N/2" maxlength="35" >' + '<input type="text" name="pseudo" size="20" value="pseudo" maxlength="35" >' + '<INPUT TYPE="submit" NAME="ok" > </form></article><br>';
//'<input type="hidden" name="equipe1" value="'+match_journée.team2.name+'"/>';
article.innerHTML = result;
div.appendChild(article);
//let pseudo = document.createElement('article');
// let resul = '<article> <form method="POST" action="addpseudo.php">' + "pseudo" + '<input type="text" name="pseudo" size="20" value="1/N/2" maxlength="35" >' + '<INPUT TYPE="submit" NAME="ok" > </form></article>';
// pseudo.innerHTML = resul;
// div.appendChild(pseudo);
let articl = document.createElement('article');
let resultat = '<article> <form method="POST" action="prono.php" >' + '<INPUT TYPE="submit" NAME="ok"value="Vos paris" > </form></article>';
articl.innerHTML = resultat;
div.appendChild(articl);
}
}
}
}
function doAjax(options) {
let defaults = {
url: '',
method: 'GET',
async: true,
args: '',
callback: function() {},
callbackError: function() {}
};
assignArgs(options, defaults);
let ajax = new XMLHttpRequest();
ajax.onreadystatechange = function() {
if (ajax.readyState === 4) {
if (ajax.status === 200 || ajax.status === 304) {
defaults.callback(ajax.response);
} else {
defaults.callbackError();
}
}
}
ajax.open(defaults.method, defaults.url, defaults.async);
ajax.send(defaults.args);
}
function assignArgs(source, target) {
for (let clef in source) {
if (target.hasOwnProperty(clef)) {
target[clef] = source[clef];
}
}
};
doAjax(options2);
</script>
</html><file_sep>DROP DATABASE IF EXISTS `pronos`;
CREATE DATABASE `prono` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
GRANT ALL PRIVILEGES ON `pronos`.* TO 'pronos'@'localhost' IDENTIFIED BY 'We Love SQL API!';
USE `prono`;
CREATE TABLE `pronostics` (
`id` INT AUTO_INCREMENT PRIMARY KEY,
`timestamp` TIMESTAMP NOT NULL,
`prono`TEXT NOT NULL
);<file_sep><?php
try
{
// On se connecte à MySQL
$bdd = new PDO('mysql:host=localhost;dbname=pronostics;charset=utf8', 'admin', 'simplon');
}
catch(Exception $e)
{
// En cas d'erreur, on affiche un message et on arrête tout
die('Erreur : '.$e->getMessage());
}
// Si tout va bien, on peut continuer
// On récupère tout le contenu de la table jeux_video
$reponse = $bdd->query('SELECT * FROM pronostics');
// On affiche chaque entrée une à une
while ($donnees = $reponse->fetch())
{
//Appel de mon JSON
//$json_source = file_get_contents('premierleague.json');
// Décode le JSON
//$json_data = json_decode($json_source, TRUE);
// Affiche la valeur des attributs du JSON
//echo $json_data["rounds"][0]["name"];
?>
<p>
<strong>pronostics</strong> : <?php echo $donnees['column_pseudo']; ?><br />
<?php echo $donnees['column_pseudo']; ?>
a jouer <?php echo $donnees['prono']; ?>,
<?php
}
$reponse->closeCursor(); // Termine le traitement de la requête
?>
<file_sep> <?php
$servername = "localhost";
$username = "admin";
$password = "<PASSWORD>";
$dbname = "pronostics";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
// Check connection
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
if(isset($_POST['nom'])) $nom=$_POST['nom'];
else $nom="";
if(isset($_POST['pseudo'])) $pseudo=$_POST['pseudo'];
else $pseudo="";
if(isset($_POST['date'])) $date=$_POST['date'];
else $date="";
if(isset($_POST['equipe1'])) $equipe1=$_POST['equipe1'];
else $equipe1="";
if(isset($_POST['equipe2'])) $equipe2=$_POST['equipe2'];
else $equipe2="";
{
$sql = "INSERT INTO pronostics (prono,column_pseudo,date,equipe1,equipe2)
VALUES ('$nom','$pseudo','$date','$equipe1','$equipe2')";
if ($conn->query($sql) === TRUE) {
echo "$pseudo à jouer $nom pour le match $equipe1 vs $equipe2";
} else {
echo "Error: " . $sql . "<br>" . $conn->error;
}
}
$conn->close();
?>
<file_sep># PronoPremierLeague
Paris sur la PremierLeague 2017/2018<file_sep> <?php
$servername = "localhost";
$username = "admin";
$password = "<PASSWORD>";
$dbname = "pronostics";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
// Check connection
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
if(isset($_POST['pseudo'])) $nom=$_POST['pseudo'];
else $nom="";
{
$sql = "INSERT INTO pronostics (column_pseudo)
VALUES ('$nom')";
if ($conn->query($sql) === TRUE) {
echo "Pronostic enregistré $pseudo";
} else {
echo "Error: " . $sql . "<br>" . $conn->error;
}
}
$conn->close();
?>
| e4e4cf94d8291b330648a834c4094d5a5e5a7f4f | [
"Markdown",
"SQL",
"PHP"
] | 6 | PHP | VachetVirginie/PronoPremierLeague | ce4f3385147f5c04283f0dc5c475ddca1e101d59 | 883aaa4d8dd021b526d2fa42a922a89d3be62383 |
refs/heads/master | <file_sep>`go get github.com/gorilla/mux`
build and run and go to `localhost:8080`
routes are:
* GET /todos
* GET /todos/{id}
* POST /todos<file_sep>package main
import (
"database/sql"
"github.com/go-martini/martini"
_ "github.com/go-sql-driver/mysql"
)
func checkError(err error) {
if err != nil {
panic(err.Error())
}
}
func main() {
db, err := sql.Open("mysql", "root@/dev_main?charset=utf8")
checkError(err)
defer db.Close()
stmtOut, err := db.Prepare("SELECT title FROM article WHERE id = ?")
checkError(err)
defer stmtOut.Close()
m := martini.Classic()
m.Get("/", func() string {
return "Hello world!"
})
m.Get("/article", func() string {
return "<a href='/article/1'>Новая Новость!</a>"
})
m.Get("/article/:id", func(params martini.Params) string {
var title string
err = stmtOut.QueryRow(params["id"]).Scan(&title)
if err != nil {
return "404"
}
return title
})
m.NotFound(func() string {
return "404"
})
m.Run()
}
| 67cdbadd243260178bf38956b62243af7d96a56d | [
"Markdown",
"Go"
] | 2 | Markdown | alexdevid/go-blog-server | 605a73a417ed637b66583107f80dc6b14adfc1da | 50b77032d2cd58f3e9119c16c4e709bd90658027 |
refs/heads/main | <repo_name>aioxzp/user-details<file_sep>/src/App.js
import logo from './logo.svg';
import './App.css';
import { useEffect, useState } from 'react';
import Users from './components/Users/Users';
import fakeData from './fakeData/profile.json';
function App() {
const userInfo = fakeData;
const [users, setUser] = useState(userInfo);
// useEffect(() => {
// fetch('https://jsonplaceholder.typicode.com/users')
// .then(res => res.json())
// .then(data => setUser(data))
// }, [])
return (
<div className="App">
<h1>User Information</h1>
{
users.map(user => <Users user={user} key={user.id}></Users>)
}
</div>
);
}
export default App;
| 94e21039ef2de5ed0ab2dbe923e304d9abf80f07 | [
"JavaScript"
] | 1 | JavaScript | aioxzp/user-details | 090aeb9b548ef249dd93772ee85da19baba496f1 | 290e43ffe81235e62f05bb316bae0b72e3e0215d |
refs/heads/master | <repo_name>INFO-4602-5602/project1-zayo-project1<file_sep>/README.md
# Project 1 : Zayo
Welcome come to our Zayo project page :smile::raised_hands:. All the visualizations are based on D3 (**version 4**).
## Catalog
* [Team members & Contributions](#Team members and contributions)
* [How to run](#How to run)
* [Information used](#Information used)
* [Design Process](#Design Process)
* [Checking List](#Checking List)
* [Unique Visualizations](#Unique Visualizations)
* [Visualization 1 : Map and Filter](#Visualization 1: Map and Filter)
* [Visualization 2: Monthly Revenue and CPQ Analysis](#Visualization 2: Monthly Revenue and CPQ Analysis)
* [Visualization 3: Revenue Analysis](#Visualization 3: Revenue Analysis)
* [Visualization 4: Group Performance Analysis](#Visualization 4: Group Performance Analysis)
* [Visualization 5: Tabular Exploration](#Visualization 5: Tabular Exploration)
* [Visualization 6: Not used](#Visualization 6: Not used due to linear data)
* [References](#References)
## Team members and contributions
* <NAME>:
* Participate in conceptualizing the visualizations
* 6th Visualization
* Added Responsive Bounded View Functionality to 1st Visualization
* Documentation
* <NAME>:
* Data Preprocessing: wrote python script to extract and calculate data from multiple tables.
* Participate in building visualization for 3rd & 4th Visualizations.
* Documentation: wrote and formatted markdown documentations.
* <NAME>:
* Worked on the scatter plot.
* Helped with conceptualizing the visualizations.
* Documentation.
* <NAME>:
* 1st & 5th Visualization & Dashboard
* Data Preprocessing
* Documentation
* <NAME>:
* Data Preprocessing: wrote python script to extract certain attributes and change the format so that it can used for visualization
* 2nd Visuzalization
* Documentation
## How to run
* Start sever
```bash
http-server -p 8088
```
or
```bash
python -m SimpleHTTPServer 8081
```
## Information Used
**We convert csv file to tsv file to avoid splitting error raised by commas in “numbers”. The tables we used are listed below: **
1. ZayoHackathonData_Buildings.csv and tsv:
Latitude and Longitude are used to locate buildings on the map. Street Address, City and Postal Code are visualized as tooltips. Market is used to filter map and analyze the customer distribution.
1. ZayoHackathonData_Accounts.csv:
Relation between Industry and Annual Revenue are visualized as Visualization 3. From relations between these two attributes, we can know which industries make more annual revenue and clients from these industries are more likely to be the potential target customers.
1. ZayoHackathonData_CPQs.csv
In this dataset, X36 NPV List is used and visualized in Visualization 2
1. ZayoHackathonData_Services.csv
We evaluate efficiency of each product group. We sum revenue for each group and create a bubble chart based on the results.
## Design Process
At the beginning of the project, our team analyzed the dataset Zayo provided together according to the the problem Zayo came up with, and decided the goal of this project was helping Zayo group to pick the most important features can be used to detect the potential target customers.
After reading the dataset. We found the customer detection can be contributed by two different views - spatial and non-spatial. For spatial part, visualization design is kinda easy since we have latitude and longitude. Usually more customers in an area means more probability to extract quality customers. And all the attributes should be included in spatial visualization are all in the same table so no data pre-processing need in this part. We also notice that the money based attributes like annual revenue, MRR, and total benefit over time are the directest factors to evaluate customer’s value. Also, which product group in Zayo is the busiest can indicate what demand is the most popular. The relation between customers’ industries and stage they are on can indicate which industry these clients come from most possibly. However, some of these features are located in different sets. Here we adopted the data pre-processing to combine these features into one dataset using Python script. Since the customer prediction is mainly based on three markets, so we also filtered out the data not in these three areas.
Then, we separated the project into several parts. And each part was assigned to one or more people. To avoid overlap design, we checked the project process at the end of each day to make sure everyone on the same page. The visualizations we chose are all very basic but have interesting interactions like for map and table, they can both be filtered by market filter, and for annual revenue, when clicking an industry, the arc of this industry will be filtered out, in this way, some industries’ arcs won’t be squeezed together.
Finally, we combined all these visualizations into a systematic **dashboard**. Although our visualizations cannot output a list of predicted target customers directly. After analyzing all these visualizations jointly. Some customer’s features can be proved meaningful for prediction process.
## Checking List
- [x] Include a README.md file
- [x] Include at least three unique visualizations: we have 6.
- [x] Be able to work with any dataset of this format: automatically either read from CSV file or provided by python scripts.
- [x] Dashboarding: Show all three visualizations as part of the same screen.
- [x] Provide more than three visualizations: We provide 6 unique visualizations.
- [x] Dynamic Queries: We utilized filters (Map and Pie charts) to allow dynamic transitions and changing.
- [x] Missing Data: plz check python scripts in `utils` folder. We handle missing data in python scripts.
- [x] Coordinated Views: Visualization 5: Tabular Exploration can be dynamically changed by updating Visualization 1 : Map and Filter.
- [x] Overview+Detail: The visualizations start with overview perspective. We build filters to allow users to check details. For the map, it allows users to zoom in or out.
- [x] Style: We used each panel to show different visualization. Each panel shares the same style. For example, there is a panel that shows filter visualization and panel that shows analysis of the revenue.
## Unique Visualizations
### Visualization 1: Map and Filter
The first visualization is combined by two parts - Map and filter. For the map, we visualize the locations of each account with steel blue dots. It is easy to tell almost all of customers are clustered around three main markets which are Denver, Atlanta and Dallas. Because there is a possibility that one building may contain several customers, we set the opacity of dots to represent how many users are in building. When hovering on each dot, a tooltip that includes the building address, city and postal code will show. The donut-looking filter beside the map is used to filter the dots on the map based on markets. When change selection by filter, the map will zoom to the most appropriate level.
### Visualization 2: Monthly Revenue and CPQ Analysis
The first visualization represented how much monthly revenue was received from the three states, Colorado, Texas and Georgia. We used a barchart to represent this data. According to the data and visualization Atlanta makes the biggest monthly revenue out of 3 states. And Dallas scores the 2nd. The 2nd bar chart shows the relationship between Industry and CPQ. In data file we used “X36 NPV List” attribute as it is the sum of “X36 MRC List” and “X36 NPV List”. And with this data the visualization represents CPQ by each industry. And the Telecommunication has the highest CPQ out of all the industries. 2nd and 3rd IT Infrastructure and Finance respectively. Other industries besides top 3 are mostly low in CPQ.
### Visualization 3: Revenue Analysis
This visualization represented how much revenue is generated by each industry. We thought it was best to use a pie chart because it would be the best way to visualize which industry was more profitable. When you click on a color on the pie chart it corresponds to one industry. When you click on the color desired it tells you what industry it is, how much they made in a year and based on how much it is used compared to the other industries using a percentage.
### Visualization 4: Group Performance Analysis
This part represented the different networks used by Zayo. Each circle represented a different network and the size of the circle corresponded to how much income was made each month. Therefore, larger the circle the more income there is. Once the circle is clicked, it enlarges the circle, and it tells you which network it is and how much income it had made.
### Visualization 5: Tabular Exploration
This is a table looking visualization. It shows Building ID, Account ID, StageName, Market and Industry in the record format. This table can also be filtered by the map market filter. According to this part, we can see the distribution of StageName and Industry on different market. For example, if we see that for Denver, most of accounts are on success stage, it may indicate that this market has more potential target customers. Or for Dallas, customers in Telecom industry are the most, and based on other visualizations, Telecom customers can provide the most benefits, then it may also indicate more target clients are in this area.
### Visualization 6: Scatter Plot
We did not incorporate this visualization because it presented no valuable information. We learned that proximity networking in Colorado was more expensive than the other two states. The price was expensive with large buildings (e.g. hospitals) with On Zayo Networks but overall, as the graph is linear because Zayo does not use any other variables other than proximity to the network.
## References
* For the bootstrap page:
[Bootstrap CSS](http://getbootstrap.com/css/)
[Bootstrap Components](http://getbootstrap.com/components/)
[Bootstrap Javascript](http://getbootstrap.com/javascript/)
* For the 3rd Visualization: [d3-js-step-by-step](https://github.com/zeroviscosity/d3-js-step-by-step/blob/master/step-6-animating-interactivity.html)
* For the 4th Visualization: [Nau Technologies Stack](https://naustud.io/tech-stack/)
* For the 2nd Visualization : [Simple d3.js barchart](http://bl.ocks.org/d3noob/8952219)
* For the 1st Visualization:
[dcjs-leaflet-untappd](https://github.com/austinlyons/dcjs-leaflet-untappd);
[leaflet Map](http://leafletjs.com/)(http://viz.hedaro.com/);
[dc.js PieChart](https://github.com/dc-js/dc.js/blob/master/web/examples/pie.html)
* For the 5th Visualization:
[dc.js DataTable](https://dc-js.github.io/dc.js/docs/html/dc.dataTable.html)
<file_sep>/utils/sample.sh
python3 preprocess.py ../data/ZayoHackathonData_Accounts.csv ../data/ZayoHackathonData_Services.csv
<file_sep>/results/3rdidea_tmp.py
with open('results_3rd.csv') as datafile:
lines = datafile.readlines()
count_sum = sum([float(line.split(',')[1]) for line in lines])
for line in lines:
infos = line.split(',')
print(infos[0] + '\t' + str(float(infos[1])/count_sum * 1000))
<file_sep>/utils/preprocess.py
"""
This file is for preprocess csv files for 2nd idea
"""
import json
import re
def cal_data(acct_file, ser_file, ind_filter=None, acc_filter=None, writepath='../results/results_2nd.csv'):
"""
This function will combine information from two files.
1: Services.csv; 2. Accounts.csv
The function will calculate how much each industry spend on our services.
The statistics will calculate three layers:
industry-> account_id-> services
Parameters:
acct_file: the file path of Accounts.csv
ser_file: the file path of Services.csv
ind_filter: filter set 4 industry, any within the set will be excluded;
acc_filter: filter set 4 account, any within the set will be excluded;
writepath: path to save final results
"""
# Dict of Account - Accumulated Service Fee
acct_ser_dict = dict()
# Dict of Account - Industry
acct_ind_dict = dict()
# Dict of Account - Service Fees,
# this one measures how much each company spend on our services
ind_prof_dict = dict()
# load data from Services.csv
with open(ser_file) as serf:
serf.readline() # skip the 1st line of column names
for line in serf:
# parse each line
infos = line.split('\t')
tmp = infos[2].split('$')[1].strip()
if len(tmp) < 2:
continue
try:
profits = float(re.findall(r'[\d|\.]+', infos[2])[0])
except ValueError:
print(tmp)
continue
acc_id = infos[1]
if acc_id in acct_ser_dict:
acct_ser_dict[acc_id] += profits
else:
acct_ser_dict[acc_id] = profits
# load data from Accounts.csv
with open(acct_file) as acctf:
acctf.readline() # skip column names
for line in acctf:
infos = line.split(',')
acc_id = infos[0].strip()
if acc_id in acct_ser_dict: # check if the account id exist
ind_name = infos[1].strip() # industrial name
# Accumulate value for each industry
if ind_name in ind_prof_dict:
ind_prof_dict[ind_name] += acct_ser_dict[acc_id]
else:
ind_prof_dict[ind_name] = acct_ser_dict[acc_id]
# save data to json in file
write2csv(writepath, ind_prof_dict)
def write2csv(path, object1, separation=','):
"""
Write dictionary object into csv format;
Parameters:
path - the path to save the file
object1 - the dictionary object
"""
with open(path, 'w') as writef:
for key in object1:
writef.write(str(key) + separation + str(object1[key]) + '\n')
def write2json(path, object1):
with open(path, 'wb') as writef:
writef.write(json.dumps(object1,
ensure_ascii=False,sort_keys=True,
indent=4).encode('utf-8', 'replace'))
def cal_group(ser_file, group_filter=None, writepath='../results/results_3rd.csv'):
"""
This function calculates how much money each product group earn
Parameters:
ser_file - Services.csv path
group_filter - the filter set for keeping groups
writepath - the file where the results will be saved
"""
group_profits = dict()
with open(ser_file) as datafile:
datafile.readline() # skip the 1st line
for line in datafile:
infos = line.split('\t')
tmp = infos[2].split('$')[1].strip()
if len(tmp) < 2:
continue
try:
profits = float(re.findall(r'[\d|\.]+', infos[2])[0])
except ValueError:
print(tmp)
continue
group_id = infos[4].strip()
if len(group_id) < 3:
continue
if group_id in group_profits:
group_profits[group_id] += profits
else:
group_profits[group_id] = profits
# Save to file
write2csv(writepath, group_profits)
print(group_profits)
def built_cost(blt_file, state_filter=set(['GA', 'TX', 'CO']), writepath='../results/results_4rd.tsv'):
blt_cost = dict()
with open(writepath, 'w') as writefile:
with open(blt_file) as datafile:
datafile.readline()
for line in datafile:
infos = line.split('\t')
state = infos[4].strip()
if state_filter and state not in state_filter:
continue
status = infos[8].strip()
net_prxi = infos[11].strip()
cost = infos[12].strip()
writefile.write(state + '\t' + status + '\t' + net_prxi + '\t' + cost + '\n')
if __name__ == '__main__':
import sys
#cal_data(sys.argv[1], sys.argv[2])
#cal_group(sys.argv[1])
built_cost(sys.argv[1]) | f307f1617dcba8069abba9d28908145098201ef6 | [
"Markdown",
"Python",
"Shell"
] | 4 | Markdown | INFO-4602-5602/project1-zayo-project1 | 953f611de785038196f45684baf4a45d511eb420 | c8b1416bfe1407df9411d9631e75604a30503fd9 |
refs/heads/master | <file_sep>import os
from flask import Flask, render_template, make_response, request
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("dashboard.html")
@app.route('/data_update', methods=['GET'])
def system_parameters():
data_type = request.args.get("data")
if data_type == "system_parameters":
filename_prefix = "erc_system_parameters_"
elif data_type == "adequacy":
filename_prefix = "erc_adequacy_table_"
elif data_type == "price":
filename_prefix = "erc_price_graph_"
elif data_type == "load":
filename_prefix = "erc_load_graph_"
elif data_type == "wind":
filename_prefix = "erc_wind_graph_"
else:
raise Exception("Invalid type: %s" %data_type)
data_dir = os.path.join(os.path.dirname(__file__), 'data')
last_file_name = sorted([f for f in os.listdir(data_dir) if f.startswith(filename_prefix)])[-1]
response = make_response(
open(os.path.join(data_dir, last_file_name)).read()
)
response.headers["Content-type"] = "text/plain"
return response
@app.route('/adequacy', methods=['GET'])
def adequacy():
data_dir = os.path.join(os.path.dirname(__file__), 'data')
last_file_name = sorted([f for f in os.listdir(data_dir) if f.startswith("erc_adequacy_table_")])[-1]
response = make_response(
open(os.path.join(data_dir, last_file_name)).read()
)
response.headers["Content-type"] = "text/plain"
return response
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run("0.0.0.0", port, debug=True) #run app
| b375a73cc9301f3655eb6b534e707efe1cdabc63 | [
"Python"
] | 1 | Python | eprikazc/d3-dashboard | 685a6351270b5324cd786e97ed063017991b88fd | 72862284aa237934027ecb5f1675da487b3ac348 |
refs/heads/main | <repo_name>theo-nejm/CasinoBOT<file_sep>/src/actions/cardAction.js
const { MessageEmbed } = require("discord.js");
const decreaseAmount = require("../services/decreaseAmount");
const getPlayerByDcId = require("../services/getPlayerByDcId");
const increaseAmount = require("../services/increaseAmount");
const randomInArray = require("../utils/randomInArray");
const randomInt = require("../utils/randomInt");
module.exports = async msg => {
const params = msg.content.replace(`${process.env.PREFIX}card`, '').split(' ');
const amount = Number(params[1]);
if(msg.mentions.users.size === 0) {
const player = await getPlayerByDcId(msg.author.id);
if(player.balance < amount) {
msg.reply(`você não pode apostar um valor maior que o que você tem. Seu saldo é $${player.balance},00`);
return;
}
if(amount < 0) {
msg.reply('você não pode apostar um valor negativo.');
return;
}
const game = {
isRunning: true,
round: 0,
isAgainstBot: true,
whoseTurn: 'player1',
player1: {
total: 0,
name: player.name,
id: player.id,
},
player2: {
total: randomInt(12, 16) + randomInt(0, 11),
name: 'CasinoBOT',
id: 1,
},
amount: amount || 0,
}
chooseCard(msg, game);
} else {
const player1 = await getPlayerByDcId(msg.author.id);
const player2 = await getPlayerByDcId(msg.mentions.users.first().id);
if(!player2) {
msg.reply('a pessoa que você desafiou não está no jogo ainda. Fala pra ela entrar! $entrar');
return;
}
if(player1.balance < amount) {
msg.reply(`você não pode apostar um valor maior que o que você tem. Seu saldo é: $${player1.balance},00`);
return;
}
if (player2.balance < amount) {
msg.reply(`você não pode apostar um valor maior que o saldo do seu oponente. O saldo dele é: $${player2.balance},00`);
return;
}
const game = {
isRunning: true,
round: 0,
amount: amount || 0,
isAgainstBot: false,
whoseTurn: 'player1',
player1: {
total: 0,
id: player1.id,
name: player1.name,
isFinished: false,
},
player2: {
total: 0,
id: player2.id,
name: player2.name,
isFinished: false,
}
}
const embed = new MessageEmbed()
.setColor([35, 23, 45])
.setTitle('Vai aceitar o desafio?')
msg.channel.send(`<@${player2.id}>, o ${player1.name} te desafiou para um blackjack. [Você tem 30s para aceitar]`)
.then(embedMsg => {
const possibleReactions = ['✅', '❌']
possibleReactions.forEach((reaction, user) => embedMsg.react(reaction));
const filter = (reaction, user) => {
return possibleReactions.includes(reaction.emoji.name) && user.id === player2.id;
}
embedMsg.awaitReactions(filter, { max: 1, time: 30000, errors: ['time'] })
.then(reactions => {
const reaction = reactions.first();
const choosenOption = possibleReactions.indexOf(reaction.emoji.name);
if(choosenOption === 0) {
chooseCard(msg, game);
} else {
msg.channel.send(`<@${player1.id}>, ${player2.name} rejeitou seu desafio.`);
}
}).catch(e => {
console.log(e);
msg.channel.send(`Infelizmente, <@${player1.id}>, o tempo de aceitar/rejeitar o desafio expirou.`);
})
});
}
}
function chooseCard(msg, game) {
if(!game.isRunning) return;
const cards = {
'A': 11,
'J': 10,
'Q': 10,
'K': 10,
'9': 9,
'8': 8,
'7': 7,
'6': 6,
'5': 5,
'4': 4,
'3': 3,
'2': 2,
}
const naipes = ['copas', 'ouro', 'espada', 'paus'];
const naipe = randomInArray(naipes);
const cardIndex = randomInt(0, Object.keys(cards).length);
const carta = Object.keys(cards)[cardIndex];
const cardValue = Object.values(cards)[cardIndex];
game[game.whoseTurn].total += cardValue;
game.round += 1;
const embed = new MessageEmbed()
.setColor([35, 23, 45])
if(game.round === 1) {
embed.setTitle(`${msg.author.username}, bem-vindo ao blackjack! Você está jogando contra ${game.isAgainstBot ? 'um bot' : game.player2.name}!`);
embed.addField('Você recebeu um:', `${carta} de ${naipe}!`, true);
embed.addField('Pontuação total:', game.player1.total, true);
} else {
embed.addField(`${game[game.whoseTurn].name} recebeu um:`, `${carta} de ${naipe}!`, true);
embed.addField('Pontuação total:', game[game.whoseTurn].total, true);
if(game[game.whoseTurn].total >= 21) {
finishGame(msg, game);
msg.channel.send(embed);
return;
}
}
msg.channel.send(embed).then(embedMsg => {
const possibleReactions = ['🃏', '❌'];
possibleReactions.forEach((reaction) => embedMsg.react(reaction));
const filter = (reaction, user) => {
return possibleReactions.includes(reaction.emoji.name) && user.id === game[game.whoseTurn].id;
}
embedMsg.awaitReactions(filter, { max: 1, time: 15000, errors: ['time'] })
.then(reactions => {
const reaction = reactions.first();
const choosenOption = possibleReactions.indexOf(reaction.emoji.name);
if(choosenOption === 0) {
if(!game.isAgainstBot) game.whoseTurn = game.whoseTurn === 'player1' ? 'player2' : 'player1';
if(game[game.whoseTurn].isFinished) game.whoseTurn = game.whoseTurn === 'player1' ? 'player2' : 'player1';
chooseCard(msg, game);
} else {
game[game.whoseTurn].isFinished = true;
if(game.isAgainstBot && game.player1.isFinished) {
finishGame(msg, game);
return;
}
if(game.player1.isFinished && game.player2.isFinished) {
finishGame(msg, game);
return;
} else {
game.whoseTurn = game.whoseTurn === 'player1' ? 'player2' : 'player1';
chooseCard(msg, game);
}
}
})
})
}
async function finishGame(msg, game) {
const newEmbed = new MessageEmbed();
game.isRunning = false;
if(game.player2.total > 21 && game.player1.total > 21 || game.player1.total == game.player2.total) {
newEmbed.setColor([200, 70, 10]);
newEmbed.setTitle('EMPATE!');
} else if((game.player2.total > game.player1.total && game.player2.total <= 21) || game.player1.total > 21) {
if(await decreaseAmount(msg.author.id, game.amount)) {
!game.isAgainstBot && await increaseAmount(game.player2.id, game.amount);
newEmbed.setColor([110, 35, 35]);
newEmbed.setTitle(`${game.player2.name} venceu!`);
newEmbed.addField(`${game.player2.name}, você ganhou`, `$${game.amount},00`);
newEmbed.addField(`${game.player1.name}, você perdeu`, `$${game.amount},00`);
} else {
newEmbed.setColor([65, 23, 45]);
newEmbed.setTitle('Ocorreu um erro.');
}
} else {
if(await increaseAmount(msg.author.id, game.amount)) {
!game.isAgainstBot && await decreaseAmount(game.player2.id, game.amount);
newEmbed.setColor([35, 110, 35]);
newEmbed.setTitle(`${game.player1.name} venceu!`);
newEmbed.addField(`${game.player1.name}, você ganhou`, `$${game.amount},00`);
newEmbed.addField(`${game.player2.name}, você perdeu`, `$${game.amount},00`);
} else {
newEmbed.setColor([65, 23, 45]);
newEmbed.setTitle('Ocorreu um erro.')
}
}
newEmbed.addField(`Pontuação de ${game.player1.name}: `, game.player1.total, true);
newEmbed.addField(`Pontuação de ${game.player2.name}: `, game.player2.total, true);
msg.reply(newEmbed);
}<file_sep>/src/utils/randomInArray.js
const randomInt = require("./randomInt")
module.exports = (array) => {
return array[randomInt(0, array.length)];
}<file_sep>/src/services/getPlayerByDcId.js
const { QueryTypes } = require('sequelize');
const sequelize = require('../config/db');
module.exports = async discord_id => {
const sql = `SELECT * FROM players WHERE id = ${discord_id};`
const exists = await sequelize.query(sql, { type: QueryTypes.SELECT });
return exists.length > 0 ? exists[0] : false;
}<file_sep>/src/actions/duelomortalAction.js
const { MessageEmbed } = require("discord.js");
const decreaseAmount = require("../services/decreaseAmount");
const getPlayerByDcId = require("../services/getPlayerByDcId");
const increaseAmount = require("../services/increaseAmount");
const randomInArray = require("../utils/randomInArray");
const randomInt = require("../utils/randomInt");
const duelomortalService = require("../services/duelomortalService");
module.exports = async msg => {
const player1 = await getPlayerByDcId(msg.author.id);
const player2 = await getPlayerByDcId(msg.mentions.users.first().id);
if(msg.mentions.users.size === 0) {
msg.reply('você precisa desafiar alguém para o duelo mortal.');
return;
}
if(!player2) {
msg.reply('a pessoa que você desafiou não está no jogo ainda. Fala pra ela entrar usando o $entrar!');
return;
}
const embed = new MessageEmbed()
.setColor([35, 23, 45])
.setTitle('Vai aceitar o desafio?')
msg.channel.send(`<@${player2.id}>, o ${player1.name} te desafiou para um Duelo Mortal. [Você tem 30s para aceitar]`)
.then(embedMsg => {
const possibleReactions = ['✅', '❌']
possibleReactions.forEach((reaction, user) => embedMsg.react(reaction));
const filter = (reaction, user) => {
return possibleReactions.includes(reaction.emoji.name) && user.id === player2.id;
}
embedMsg.awaitReactions(filter, { max: 1, time: 30000, errors: ['time'] })
.then(reactions => {
const reaction = reactions.first();
const choosenOption = possibleReactions.indexOf(reaction.emoji.name);
if(choosenOption === 0) {
const playersduelo = [player1, player2];
const vencedor = randomInArray(playersduelo);
msg.channel.send(`Vencedor: ${vencedor.name}`)
duelomortalService(vencedor.id, playersduelo.find(player => player.id !== vencedor.id).id);
} else {
msg.channel.send(`<@${player1.id}>, ${player2.name} rejeitou seu desafio.`);
}
}).catch(e => {
console.log(e);
msg.channel.send(`Infelizmente, <@${player1.id}>, o tempo de aceitar/rejeitar o desafio expirou.`);
})
});
}
<file_sep>/src/services/checkRouletteCooldown.js
const { QueryTypes } = require('sequelize');
const sequelize = require('../config/db');
module.exports = async discord_id => {
const sql = `SELECT lastRouletteWithdraw FROM players WHERE id = ${discord_id};`
const [ { lastRouletteWithdraw } ] = await sequelize.query(sql, { type: QueryTypes.SELECT });
if(!lastRouletteWithdraw) return false;
const cdDate = new Date(lastRouletteWithdraw).getTime();
const now = new Date().getTime();
const timePast = now - cdDate;
if(timePast > 32400000) return false;
else return 32400000 - timePast;
}<file_sep>/src/actions/ajudaAction.js
const { MessageEmbed } = require("discord.js");
const getPlayerByDcId = require("../services/getPlayerByDcId")
module.exports = async msg => {
const player = await getPlayerByDcId(msg.author.id);
const params = msg.content.replace(`${process.env.PREFIX}ajuda`, '').trim().split(' ');
const category = params[0];
const subcategory = params[1];
const validCategories = [
'game',
'roleta',
'perfil',
'entrar'
]
const stringValidCategories = validCategories.join(', ');
const validGames = [
'card',
'dado',
'duelomortal'
]
const embed = new MessageEmbed()
.setColor([120, 120, 200])
.setAuthor('AJUDA!')
.setTitle('Aqui você pode ver os comandos e como usá-los.')
.addField('$ajuda', 'Mostra para você os comandos básicos e um pouco de como usá-los.', false)
.addField('$ajuda <categoria> (ex.: $ajuda game)', 'Mostra para você os comandos básicos da categoria escolhida.', false)
.addField('$ajuda <categoria> <subcategoria> (ex.: $ajuda game card)', 'Explica como funciona a subcategoria, no caso do exemplo, o jogo.', false)
.setDescription('\n**Categorias**: ' + stringValidCategories + '.')
if(category) {
if(!validCategories.includes(category)) {
embed.addField('Você precisa selecionar uma categoria válida', `Categorias: ${stringValidCategories}.`);
}else{
switch(category) {
case 'perfil':
embed.addField('game explanation', '.');
break;
case 'roleta':
embed.addField('dado explanation', '.');
break;
case 'game':
embed.addField('game explanation', '.');
break;
case 'entrar':
embed.addField('entrar explanation', '.');
break;
}
}
}
if(subcategory) {
if(!validGames.includes(subcategory)) {
embed.addField('você precisa selecionar um jogo válido.')
} else{
switch(subcategory) {
case 'card':
embed.addField('card explanation', '.');
break;
case 'dado':
embed.addField('dado explanation', '.');
break;
}
}
}
msg.channel.send(embed);
}<file_sep>/src/bot.js
require('dotenv/config');
const { Client } = require('discord.js');
const actions = require('./actions/index');
const client = new Client();
client.on('ready', () => {
console.log(`Bot started ${client.user.tag}`);
});
client.on('message', msg => {
actions(msg);
});
// ;(
// async () => {
// const Player = require('./models/Player');
// const db = require('./config/db');
// try {
// const result = await db.sync();
// await Player.create({
// id: '1',
// name: 'a',
// balance: 0,
// lastRouletteWithdraw: null,
// })
// } catch (e) {
// console.log(e);
// }
// }
// )();
client.login(process.env.BOT_TOKEN);
<file_sep>/src/models/Player.js
const Sequelize = require('sequelize');
const db = require('../config/db');
const Player = db.define('player', {
id: {
type: Sequelize.STRING,
allowNull: false,
primaryKey: true,
},
name: {
type: Sequelize.STRING,
allowNull: false,
},
balance: {
type: Sequelize.DOUBLE,
allowNull: false,
},
lastRouletteWithdraw: {
type: Sequelize.DATE,
allowNull: true,
}
})
module.exports = Player;<file_sep>/src/utils/verifyAmount.js
module.exports = (amount, player1, player2) => {
if(player1.balance < amount) return `você não tem saldo suficiente para esse desafio.`;
if(player2 && player2.balance < amount) return `${player2.name} não tem saldo suficiente para esse desafio.`;
if(amount < 0) return `você não pode apostar um valor negativo.`;
if(String(amount).includes('.') || !Number(amount)) return `você só pode apostar valores inteiros.`;
if(typeof amount !== 'number') return `você deve apostar um valor numérico.`;
}<file_sep>/src/services/duelomortalService.js
const { QueryTypes } = require('sequelize');
const sequelize = require('../config/db');
const getPlayerByDcId = require('./getPlayerByDcId');
module.exports = async (id1, id2) => {
try {
const player1 = await getPlayerByDcId(id1);
const player2 = await getPlayerByDcId(id2);
const updateQuery = `UPDATE players
SET balance = ${player1.balance + player2.balance}
WHERE id = ${id1};`
const deleteQuery = `DELETE FROM players WHERE id = ${id2}`;
await sequelize.query(updateQuery, { type: QueryTypes.UPDATE });
await sequelize.query(deleteQuery, { type: QueryTypes.DELETE });
return true;
} catch (e) {
console.log(e);
return false;
}
}<file_sep>/src/actions/index.js
const globalFilters = require('../filters/globalFilters');
const playerExists = require('../services/playerExists');
const ajudaAction = require('./ajudaAction');
const cardAction = require('./cardAction');
const dadoAction = require('./dadoAction');
const duelomortalAction = require('./duelomortalAction');
const entrarAction = require('./entrarAction');
const parImparAction = require('./parImparAction');
const profileAction = require('./profileAction');
const roletaAction = require('./roletaAction');
const prefix = process.env.PREFIX;
module.exports = async function messageAction(msg) {
const isPlayer = await playerExists(msg.author.id);
if(await globalFilters(msg)) return;
if(!(msg.content.split(' ')[0] === `${prefix}entrar` || msg.content.split(' ')[0] === `${prefix}ajuda`) && !isPlayer) {
msg.reply(' você precisa estar no jogo para usar esse comando. Digite $entrar');
return;
}
switch(msg.content.split(' ')[0]) {
case `${prefix}dado`:
dadoAction(msg);
break;
case `${prefix}random`:
randomAction(msg);
break;
case `${prefix}card`:
cardAction(msg);
break;
case `${prefix}entrar`:
await entrarAction(msg);
break;
case `${prefix}parimpar`:
await parImparAction(msg);
break;
case `${prefix}perfil`:
await profileAction(msg);
break;
case `${prefix}roleta`:
await roletaAction(msg);
break;
case `${prefix}duelomortal`:
await duelomortalAction(msg);
break;
case `${prefix}ajuda`:
await ajudaAction(msg);
break;
}
}
<file_sep>/src/actions/parImparAction.js
const { MessageEmbed } = require("discord.js");
const randomInArray = require("../utils/randomInArray");
const randomInt = require("../utils/randomInt");
module.exports = msg => {
const game = {
isRunning: true,
bolasPlayer: 10,
bolasBot: 10,
}
playerTime(msg, game);
}
function playerTime(msg, game) {
const botChooseOdd = randomInArray([true, false]);
const embed = new MessageEmbed()
.setColor([35, 23, 45])
.setTitle('Escolha um número: ')
.setDescription('penis')
msg.channel.send(embed).then(embedMsg => {
const possibleReactions = ['1️⃣','2️⃣','3️⃣','4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣','🔟']
possibleReactions.forEach((reaction, user) => embedMsg.react(reaction));
const filter = (reaction, user) => {
return possibleReactions.includes(reaction.emoji.name) && user.id === msg.author.id;
}
embedMsg.awaitReactions(filter, { max: 1, time: 15000, errors: ['time'] })
.then(reactions => {
const reaction = reactions.first();
const newEmbed = new MessageEmbed();
const choosenOption = possibleReactions.indexOf(reaction.emoji.name) + 1;
const isOdd = choosenOption % 2 === 0;
if(isOdd && botChooseOdd){
game.bolasBot += choosenOption;
game.bolasPlayer -= choosenOption;
newEmbed.setColor([35, 110, 35]);
newEmbed.setTitle(`${msg.author.username} perdeu ${choosenOption} bolas!`);
isFinishGame(msg, game);
} else {
game.bolasBot -= choosenOption;
game.bolasPlayer += choosenOption;
newEmbed.setColor([35, 110, 35]);
newEmbed.setTitle(`${msg.author.username} ganhou ${choosenOption} bolas!`);
isFinishGame(msg, game);
}
})
})
}
async function isFinishGame(msg, game) {
const newEmbed = new MessageEmbed();
if (game.bolasPlayer <= 0 || game.bolasBot <= 0){
game.isRunning = false;
}else{
game.isRunning = true;
}
if(!game.isRunning) {
if(game.bolasPlayer > game.bolasBot){
newEmbed.addField(`${msg.author.username} venceu!`);
}else{
newEmbed.addField(`Bot venceu!`);
}
msg.reply(newEmbed);
}else{
console.log('continua essa porra')
playerTime(msg, game)
}
}<file_sep>/src/actions/entrarAction.js
const { MessageEmbed } = require("discord.js");
const Player = require("../models/Player");
const playerExists = require("../services/playerExists");
module.exports = async msg => {
try {
if (await playerExists(msg.author.id)) {
msg.reply(' você já está no jogo.');
return;
}
await Player.create({
id: msg.author.id,
name: msg.author.username,
balance: 100,
});
const embed = new MessageEmbed()
.setAuthor('CasinoBANK')
.setColor([65, 225, 65])
.setDescription(msg.author.username + 'você acaba de entrar no jogo!')
.setFooter('Seu saldo é: $100,00');
msg.reply(embed);
} catch (e) {
console.log(e);
msg.reply(' não deu pra entrar no jogo.');
}
}
| 9e3d654225e22e85619b67c8e77e2caec14c71b8 | [
"JavaScript"
] | 13 | JavaScript | theo-nejm/CasinoBOT | f4c62432454863405aadbbd297305c1701c6d8f7 | b9f0c05aa456becadf45ded22cd0918c8f768681 |
refs/heads/master | <repo_name>tKoeck73/OxRAM-BarMixvah<file_sep>/pump_controller.ino
boolean pumps[5];
void setup(){
pinMode(8, OUTPUT);
pinMode(9, OUTPUT);
pinMode(10, OUTPUT);
pinMode(11, OUTPUT);
pinMode(12, OUTPUT);
Serial.begin(9600);
while (!Serial);
Serial.println("Pump controller");
}
int countSplitCharacters(String text, char splitChar) {
int returnValue = 0;
for(int i=0; i<=text.length()-1; i++){
if(text.charAt(i)==splitChar){
returnValue++;
}
}
return returnValue;
}
String getValue(String data, char separator, int index){
int found = 0;
int strIndex[] = {0, -1};
int maxIndex = data.length()-1;
for(int i=0; i<=maxIndex && found<=index; i++){
if(data.charAt(i)==separator || i==maxIndex){
found++;
strIndex[0] = strIndex[1]+1;
strIndex[1] = (i == maxIndex) ? i+1 : i;
}
}
return found>index ? data.substring(strIndex[0], strIndex[1]) : "";
}
void togglePump(int i){
pumps[i-8] = not pumps[i-8];
digitalWrite(i,pumps[i-8]);
}
void snooze(int t){
delay(1000*t);
}
int getMin(int arr[]){
int minVal = 31000;
for(int i=0; i<5; i++){
if (arr[i] < minVal and arr[i] > 0){
minVal = arr[i];
}
}
return minVal;
}
boolean isZero(int arr[]){
boolean val = true;
for(int i=0; i<=4; i++){
val = val and (arr[i]==0);
}
return val;
}
void loop() {
if (Serial.available()){
String s = Serial.readString();
//Serial.println(String(s.length()));
int splits = countSplitCharacters(s,',');
//Serial.println(String(splits));
int t;
int id;
switch (splits){
case 0:
Serial.print("toggled pump " + s);
togglePump(s.toInt());
break;
case 1:
t = getValue(s,',',1).toInt();
id = getValue(s,',',0).toInt();
Serial.println("toggle pump " + String(id) + " on for " + String(t) + "seconds" );
togglePump(id);
snooze(t);
togglePump(id);
break;
case 4:
int times[5];
for(int i=8; i<=12; i++){
times[i-8] = getValue(s,',',i-8).toInt();
pumps[i-8] = times[i-8];
digitalWrite(i,pumps[i-8]);
}
//Serial.println(getMin(times));
//Serial.println(isZero(times));
while (not(isZero(times))){
t = getMin(times);
snooze(t);
Serial.println(t);
for(int i=0; i<=4; i++){
if (times[i]>=t){
times[i]-=t;
}
digitalWrite(i+8,times[i]);
pumps[i]=times[i];
}
}
}
}
}
| 0fdd92985ccb30eb38daaa6425d169339108153f | [
"C++"
] | 1 | C++ | tKoeck73/OxRAM-BarMixvah | 18b68c9bd7ea2a5792c901a5af2dc56a5ae45422 | ece3b789f618328349e2f18dcd47736485b207a6 |
refs/heads/master | <file_sep> $(document).ready(function(){
gapi.load('client:auth2', initClient);
mapRoomColors();
})
// Client ID and API key from the Developer Console
var CLIENT_ID = '640400674098-bsei1hk16prohqiuie6lapgs8267b0g0.apps.googleusercontent.com';
var API_KEY = '<KEY>';
// Array of API discovery doc URLs for APIs used by the quickstart
var DISCOVERY_DOCS = ["https://www.googleapis.com/discovery/v1/apis/calendar/v3/rest"];
// Authorization scopes required by the API; multiple scopes can be
// included, separated by spaces.
var SCOPES = "https://www.googleapis.com/auth/calendar.readonly";
var authorizeButton = document.getElementById('authorize_button');
var signoutButton = document.getElementById('signout_button');
/**
* On load, called to load the auth2 library and API client library.
*/
function handleClientLoad() {
gapi.load('client:auth2', initClient);
}
/**
* Initializes the API client library and sets up sign-in state
* listeners.
*/
function initClient() {
gapi.client.init({
apiKey: API_KEY,
clientId: CLIENT_ID,
discoveryDocs: DISCOVERY_DOCS,
scope: SCOPES
}).then(function () {
// Listen for sign-in state changes.
gapi.auth2.getAuthInstance().isSignedIn.listen(updateSigninStatus);
// Handle the initial sign-in state.
updateSigninStatus(gapi.auth2.getAuthInstance().isSignedIn.get());
authorizeButton.onclick = handleAuthClick;
signoutButton.onclick = handleSignoutClick;
}, function(error) {
console.log('ERROR', error);
});
}
/**
* Called when the signed in status changes, to update the UI
* appropriately. After a sign-in, the API is called.
*/
function updateSigninStatus(isSignedIn) {
if (isSignedIn) {
authorizeButton.style.display = 'none';
signoutButton.style.display = 'block';
determineRoomStatus(rooms);
} else {
authorizeButton.style.display = 'block';
signoutButton.style.display = 'none';
}
}
/**
* Sign in the user upon button click.
*/
function handleAuthClick(event) {
gapi.auth2.getAuthInstance().signIn();
}
/**
* Sign out the user upon button click.
*/
function handleSignoutClick(event) {
gapi.auth2.getAuthInstance().signOut();
}
// A list of code room calendars mapped to room names
var rooms = [
{
"name":'SPOCK',
"id":"<EMAIL>3<EMAIL>"
},
{
"name":'PAPER',
"id": "<EMAIL>"
},
{
"name":'ROCK',
"id":"<EMAIL>"},
{
"name":'SCISSORS',
"id": "<EMAIL>"},
{
"name":'LIZARD',
"id": "<EMAIL>"},
{
"name":'MCROOMFACE',
"id": "<EMAIL>"},
{
"name":'MORTY',
"id": "code.berlin_31333137303136343636<EMAIL>"},
{
"name":'RICK',
"id": "<EMAIL>",}
];
function determineRoomStatus(rooms) {
rooms.forEach(function(room){
console.log("thing-i-am-looking-at", gapi);
gapi.client.calendar.events.list({
'calendarId': room.id,
'timeMin': (new Date()).toISOString(),
'showDeleted': false,
'singleEvents': true,
'maxResults': 1,
'orderBy': 'startTime'
}).then(function(response) {
// determine if room is booked
// write to room.status
var events = response.result.items;
if(events.length > 0) {
let start = new Date(events[0].start.dateTime);
let end = new Date(events[0].end.dateTime);
let now = new Date();
if (now > start && now < end) {
room.status = 'booked';
} else {
room.status = 'not-booked';
}
} else {
room.status = "not-booked";
}
});
console.log(room);
} )
}
mapRoomColors = function(){
var currentRoomStatus = determineRoomStatus (rooms);
currentRoomStatus.forEach(function(room){
var roomName = room.name;
var roomState = room.status;
var roomOccupancy = room.occupation;
console.log(roomName);
$("#meeting-rooms ."+roomName).css("fill", roomColor(roomState,roomOccupancy));
})
}
roomColor = function(roomState,roomOccupancy){
if(roomState == "booked" && roomOccupancy == 'true'){
return "#ea4335";
}else if((roomState == "booked" && roomOccupancy == "false") ){
return "#fbbc05";
}else if((roomState == "not-booked" && roomOccupancy == "true")){
return "#34a853";
}else {
return "#4285f4";
}
} | 62137e02b5a6935f6c4491277a12b3015e820dc8 | [
"JavaScript"
] | 1 | JavaScript | thdvbr/schrodingersroom | c0e0055eee1e276d99b2ee6a4a3e875e3e83e629 | 6a4850296bbda7f29c9634d798d4995916d49926 |
refs/heads/master | <file_sep>package com.algaworks.cursojsf2;
import javax.faces.bean.ManagedBean;
@ManagedBean
public class ExercicioBean {
private int valor1;
private int valor2;
private int resultado;
public ExercicioBean() {
}
public int getValor1() {
return valor1;
}
public void setValor1(int valor1) {
this.valor1 = valor1;
}
public int getValor2() {
return valor2;
}
public void setValor2(int valor2) {
this.valor2 = valor2;
}
public int getResultado() {
return resultado;
}
public void setResultado(int resultado) {
this.resultado = resultado;
}
public void soma(){
resultado = this.getValor1()+ this.getValor2();
}
}<file_sep>package com.algaworks.cursojsf2.converter;
import java.math.BigDecimal;
import javax.faces.application.FacesMessage;
import javax.faces.component.UIComponent;
import javax.faces.context.FacesContext;
import javax.faces.convert.Converter;
import javax.faces.convert.ConverterException;
import javax.faces.convert.FacesConverter;
@FacesConverter("com.algaworks.Valores")
public class ValoresConverter implements Converter {
@Override
public Object getAsObject(FacesContext context, UIComponent component, String value) {
BigDecimal arrecadacao = null;
if(value != null && !value.equals("")) {
if(value.equalsIgnoreCase("um milhão")) {
return arrecadacao = new BigDecimal(1000000);
} if(value.equalsIgnoreCase("cem mil")){
return arrecadacao = new BigDecimal(100000);
} else {
try {
arrecadacao = new BigDecimal(value);
} catch (Exception e) {
FacesMessage msg = new FacesMessage(FacesMessage.SEVERITY_ERROR,
"Valor Incorreto!", "Informe um valor correto.");
throw new ConverterException(msg);
}
}
}
return arrecadacao;
}
@Override
public String getAsString(FacesContext context, UIComponent component, Object value) {
String valor = (String) value;
return valor;
}
}<file_sep>package com.algaworks.cursojsf2.financeiro.model;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Date;
public class Lancamento implements Serializable {
private static final long serialVersionUID = 1L;
private Integer codigo;
private TipoLancamento tipo;
private Pessoa pessoa;
private String descricao;
private BigDecimal valor;
private Date dataVencimento;
private boolean pago;
private Date dataPagamento;
public Lancamento() {
}
public Integer getCodigo() {
return codigo;
}
public void setCodigo(Integer codigo) {
this.codigo = codigo;
}
public TipoLancamento getTipo() {
return tipo;
}
public void setTipo(TipoLancamento tipo) {
this.tipo = tipo;
}
public Pessoa getPessoa() {
return pessoa;
}
public void setPessoa(Pessoa pessoa) {
this.pessoa = pessoa;
}
public String getDescricao() {
return descricao;
}
public void setDescricao(String descricao) {
this.descricao = descricao;
}
public BigDecimal getValor() {
return valor;
}
public void setValor(BigDecimal valor) {
this.valor = valor;
}
public Date getDataVencimento() {
return dataVencimento;
}
public void setDataVencimento(Date dataVencimento) {
this.dataVencimento = dataVencimento;
}
public boolean isPago() {
return pago;
}
public void setPago(boolean pago) {
this.pago = pago;
}
public Date getDataPagamento() {
return dataPagamento;
}
public void setDataPagamento(Date dataPagamento) {
this.dataPagamento = dataPagamento;
}
}
<file_sep>#Mon Aug 13 11:41:04 BRT 2018
org.eclipse.core.runtime=2
org.eclipse.platform=4.7.3.v20180330-0640
<file_sep>javax.faces.converter.DateTimeConverter.DATE=Data inválida.
javax.faces.converter.DateTimeConverter.DATE_detail=O campo ''{2}'' não foi informado com uma data válida.
javax.faces.converter.IntegerConverter.INTEGER=Número inválido.
javax.faces.converter.IntegerConverter.INTEGER_detail=O campo ''{2}'' não foi informado com um número válido.
javax.faces.converter.BigDecimalConverter.DECIMAL=Número decimal inválido.
javax.faces.converter.BigDecimalConverter.DECIMAL_detail=O campo ''{2}'' não foi informado com um valor decimal válido.
<file_sep>javax.faces.component.UIInput.REQUIRED=Preencha o campo {0}.
javax.faces.component.DateTimeConverter.DATE={2} inválido.
javax.faces.component.NumberConverter.NUMBER={2} inválido.
javax.faces.validator.DoubleRangeValidator.MINIMUM={1} inválido.<file_sep>package com.algaworks.cursojsf2.financeiro.view;
import java.io.Serializable;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.ViewScoped;
@ManagedBean
@ViewScoped
public class CadastroLancamentoBean implements Serializable {
private static final long serialVersionUID = 1L;
}
<file_sep>package com.algaworks.cursojsf2;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.ViewScoped;
@ManagedBean(name="AcompanhamentoPartidasBean")
@ViewScoped
public class AcompanhamentoPartidasBean implements Serializable {
private static final long serialVersionUID = 7208025651315378403L;
private Partida partida;
private List<Partida> partidas;
public AcompanhamentoPartidasBean() {
partida = new Partida();
partidas = new ArrayList<Partida>();
}
public Partida getPartida() {
return partida;
}
public List<Partida> getPartidas() {
return partidas;
}
public void incluir() {
this.partidas.add(this.partida);
this.partida = new Partida();
}
}
<file_sep>package com.algaworks.cursojsf2.visao;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.ViewScoped;
import com.algaworks.curojsf2.dominio.Livro;
@ManagedBean
@ViewScoped
public class CatalogoLivrosBean implements Serializable {
private static final long serialVersionUID = 1L;
private Livro livro;
private List<Livro> livros;
public CatalogoLivrosBean() {
this.livro = new Livro();
this.livros = new ArrayList<Livro>();
}
public Livro getLivro() {
return livro;
}
public void setLivro(Livro livro) {
this.livro = livro;
}
public List<Livro> getLivros() {
return livros;
}
public void setLivros(List<Livro> livros) {
this.livros = livros;
}
public void incluirLivro(){
this.livros.add(this.livro);
livro = new Livro();
}
} | ad4ef9c2ce9330d75b220c805a70e5f0d19fff1e | [
"Java",
"INI"
] | 9 | Java | Alexandre-Azvdo/Curso-AlgaWorks-Desenvolvimento-Web-com-JSF-2 | 52900819530294db1524ac70c3bf4fb26116f3f7 | 2e022272bd197693c1848c7843bcfb767a295b86 |
refs/heads/master | <repo_name>Max-Zviagintsev/Z-portfolio-client<file_sep>/src/pages/about-me.jsx
import React from 'react';
import styled from "styled-components";
import background from "../../assets/about-me.jpg";
import {Layout} from "antd";
import NavBar from "../components/NavBar/NavBar";
import FooterComponent from "../components/FooterComponent";
import GlobalStyle from "../shared/css/globalStyles";
import AboutMeComponent from "../components/AboutMeComponent";
import {Helmet} from "react-helmet";
const StyledTop = styled.div`
background: url(${background}) rgba(28, 37, 44, 1) no-repeat center fixed;
background-size: cover;
`;
const AboutMe = () => {
return (
<Layout>
<Helmet>
<meta charSet="utf-8" name="<NAME>." content="About me"/>
<title>About me</title>
<link rel="canonical" href="https://z-portfolio.tk/about-me/"/>
</Helmet>
<StyledTop>
<NavBar/>
<AboutMeComponent/>
</StyledTop>
<FooterComponent/>
<GlobalStyle/>
</Layout>
);
};
export default AboutMe;
<file_sep>/wrap-with-provider.js
import React from "react"
import {theme} from "./src/shared/css/theme";
import {ThemeProvider} from "styled-components";
// eslint-disable-next-line react/display-name,react/prop-types
export default ({element}) => {
return (
<ThemeProvider theme={theme}>
{element}
</ThemeProvider>
);
}<file_sep>/src/shared/glabal_variables.js
const mode = 'prod'; //'loc' or 'prod'
const locURL = 'http://zportfolio.lndo.site';
const prodURL = 'http://dev-z-portfolio.pantheonsite.io';
export const URL = (mode === 'loc') ? locURL : prodURL;<file_sep>/src/components/PortfolioComponent/PortfolioCoverflow.jsx
import React, {Component} from 'react';
import Coverflow from 'react-coverflow';
import {URL} from "../../shared/glabal_variables";
import Loader from 'react-loader-spinner';
import styled from "styled-components";
import CurrentProject from "./CurrentProject";
import {Spring, Transition} from 'react-spring/renderprops';
import {IconContext} from "react-icons";
import {IoIosArrowRoundUp} from 'react-icons/io';
import {colorPrimary} from '../../shared/css/theme.js';
// CSS starts
const StyledWrapper = styled.div`
text-align: center;
`;
const HelperWrapper = styled.div`
text-align: center;
min-height: 40vh;
`;
const HelperText = styled.div`
text-align: center;
font-family: "Exo 2.0";
color: ${(props) => props.theme.colorPrimary};
font-size: 20px;
text-shadow: ${(props) => props.theme.textShadowOnWhite};
`;
// CSS ends
class PortfolioCoverflow extends Component {
constructor(props) {
super(props);
this.state = {
currentProjectIndex: null,
currentProject: null,
isLoading: true,
fetchedData: [],
src: [],
};
}
setStateAsync(state) {
return new Promise((resolve) => {
this.setState(state, resolve)
});
}
async componentDidMount() {
await this.setStateAsync({
fetchedData: this.props.data.allNodeProject.edges,
isLoading: false
});
}
handleCurrentProject = (item, index) => {
this.setState({
currentProjectIndex: index,
currentProject: item
});
};
render() {
const show = <CurrentProject projectData={this.state.currentProject}/>;
return (
!this.state.isLoading && typeof window !== "undefined" ?
<React.Fragment>
<Spring from={{opacity: 0}}
to={{opacity: 1}}
config={{tension: 10, friction: 10, delay: 500}}>
{styles => <div style={styles}>
<Coverflow
width={960}
height={480}
displayQuantityOfSide={1}
enableHeading={false}
>
{this.state.fetchedData.map((item, index) =>
<img src={`${URL}${item.node.relationships.field_project_cover.uri.url}`}
alt={'Portfolio item ' + index}
onClick={() => this.handleCurrentProject(item, index)}
key={index}/>
)}
</Coverflow>
</div>}
</Spring>
{this.state.currentProject != null ?
<Transition
items={show}
from={{opacity: 0}}
enter={{opacity: 1}}
leave={{opacity: 0}}
config={{tension: 10, friction: 10}}>
{show => show && (props => <div style={props}>
{show}
️</div>)}
</Transition>
:
<Spring from={{opacity: 0}}
to={{opacity: 1}}
config={{tension: 10, friction: 10, delay: 1000}}>
{styles => <HelperWrapper style={styles}>
<IconContext.Provider value={{color: colorPrimary, size: '72px'}}>
<IoIosArrowRoundUp/>
</IconContext.Provider>
<HelperText>Click for project details</HelperText>
</HelperWrapper>}
</Spring>
}
</React.Fragment>
:
<StyledWrapper>
<Loader
type="Triangle"
color={colorPrimary}
height="100"
width="100"
/>
</StyledWrapper>
);
}
}
export default PortfolioCoverflow;<file_sep>/src/pages/gallery.jsx
import React from 'react';
import {Layout} from "antd";
import NavBar from "../components/NavBar/NavBar";
import styled from 'styled-components';
import GlobalStyle from '../shared/css/globalStyles';
import GalleryComponent from "../components/GalleryComponent";
import {graphql} from 'gatsby';
import {Spring} from 'react-spring/renderprops';
import FooterComponent from "../components/FooterComponent";
import {colorPrimary, textColorOnWhite} from '../shared/css/theme.js';
import {Helmet} from "react-helmet";
export const query = graphql`
query{
allNodeGallery {
edges{
node{
relationships{
field_gallery_image{
uri {
url
}
}
}
}
}
}
}
`;
const {Header, Content} = Layout;
// CSS starts
const StyledHeader = styled(Header)`
height: 115px;
background-color: rgba(20, 20, 20, 0.8);
box-shadow: 0 0 6px rgba(20, 20, 20, 0.9);
`;
const StyledH1 = styled.h1`
font-family: "Ailerons-Typeface";
font-size: ${(props) => props.theme.fontSizeHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 46px 15px;
`;
// CSS ends
const Gallery = ({data}) => {
return (
<Layout>
<Helmet>
<meta charSet="utf-8" name="Max Z." content="Gallery"/>
<title>Gallery</title>
<link rel="canonical" href="https://z-portfolio.tk/gallery/"/>
</Helmet>
<StyledHeader>
<NavBar/>
</StyledHeader>
<Content>
<Spring
from={{color: textColorOnWhite}}
to={{color:colorPrimary}}
config={{tension: 10, friction: 10, delay: 500}}>
{props => <StyledH1 style={props}>My work</StyledH1>}
</Spring>
<GalleryComponent data={data}/>
</Content>
<FooterComponent />
<GlobalStyle/>
</Layout>
);
};
export default Gallery;<file_sep>/gatsby-node.js
exports.onCreateBabelConfig = ({ actions }) => {
actions.setBabelPlugin({
name: 'babel-plugin-import',
options: {
libraryName: 'antd',
style: true
}
})
};
exports.onCreateWebpackConfig = ({ stage, loaders, actions }) => {
if (stage === "build-html") {
actions.setWebpackConfig({
module: {
rules: [
{
test: /react-coverflow/,
use: loaders.null(),
},
],
},
})
}
};<file_sep>/src/components/PortfolioComponent/CurrentProject.jsx
import React from 'react';
import {Row, Col} from 'antd';
import styled from "styled-components";
import PortfolioGalleryComponent from "./PortfolioGalleryComponent";
import {Spring} from 'react-spring/renderprops';
// CSS starts
const StyledWrapper = styled.div`
background-color: white;
font-family: "Exo 2.0";
font-size: ${(props) => props.theme.fontSizeRegular};
`;
const StyledRightCol = styled.div`
padding: 0 15px 15px 0;
display: flex;
flex-wrap: wrap;
`;
const StyledLeftCol = styled.div`
padding: 0 15px 15px 15px;
`;
const StyledTech = styled.div`
padding: 5px 10px;
margin: 5px;
background-color: ${(props) => props.theme.textColorOnWhite};
color: ${(props) => props.theme.colorPrimary};
border: 1px solid ${(props) => props.theme.colorPrimary};
border-radius: 4px;
box-shadow: 1px 1px 4px rgba(20, 20, 20, 0.4);
`;
const StyledH2 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeSubHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 30px 15px;
`;
// CSS ends
const CurrentProject = (props) => {
const usedTech = props.projectData.node.field_used_tech.map(item => <StyledTech key={item}>{item}</StyledTech>);
return (
<StyledWrapper>
<Row>
<Col span={16}>
<Spring
from={{color: 'rgba(90, 90, 90, 1)'}}
to={{color: 'rgba(250, 65, 0, 1)'}}
config={{tension: 10, friction: 60, delay: 1000}}>
{props => <StyledH2 style={props}>Description</StyledH2>}
</Spring>
</Col>
<Col span={8}>
<Spring
from={{color: 'rgba(90, 90, 90, 1)'}}
to={{color: 'rgba(250, 65, 0, 1)'}}
config={{tension: 10, friction: 60, delay: 1000}}>
{props => <StyledH2 style={props}>Tech</StyledH2>}
</Spring>
</Col>
</Row>
<Row>
<Col span={16}>
<StyledLeftCol
dangerouslySetInnerHTML={{__html: props.projectData.node.field_project_description.value}}/>
</Col>
<Col span={8}> <StyledRightCol> {usedTech} </StyledRightCol>
{props.projectData.node.field_project_url !== null ?
<a href={props.projectData.node.field_project_url.uri} target="_blank" rel="noopener noreferrer"> Project link</a>
:
null}
</Col>
</Row>
<Spring
from={{color: 'rgba(90, 90, 90, 1)'}}
to={{color: 'rgba(250, 65, 0, 1)'}}
config={{tension: 10, friction: 60, delay: 1000}}>
{props => <StyledH2 style={props}>Gallery</StyledH2>}
</Spring>
<PortfolioGalleryComponent projectData={props}/>
</StyledWrapper>
);
};
export default CurrentProject;
<file_sep>/src/pages/portfolio.jsx
import React from 'react';
import PortfolioCoverflow from "../components/PortfolioComponent/PortfolioCoverflow";
import {graphql} from "gatsby";
import {Layout} from "antd";
import NavBar from "../components/NavBar/NavBar";
import GlobalStyle from "../shared/css/globalStyles";
import styled from "styled-components";
import {Spring} from 'react-spring/renderprops';
import FooterComponent from "../components/FooterComponent";
import {colorPrimary, textColorOnWhite} from '../shared/css/theme.js';
import {Helmet} from "react-helmet";
export const query = graphql`
query {
allNodeProject {
edges {
node {
title
field_project_description {
value
}
field_project_url {
uri
}
field_used_tech
relationships {
field_project_gallery {
uri{
url
}
}
field_project_cover {
uri{
url
}
}
field_project_gallery {
uri{
url
}
}
}
}
}
}
}
`;
const {Header, Content} = Layout;
// CSS starts
const StyledHeader = styled(Header)`
height: 115px;
background-color: rgba(20, 20, 20, 0.8);
box-shadow: 0 0 6px rgba(20, 20, 20, 0.9);
`;
const StyledH1 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 46px 15px;
`;
// CSS ends
const Portfolio = ({data}) => {
return (
<Layout>
<Helmet>
<meta charSet="utf-8" name="<NAME>." content="Portfolio"/>
<title>Projects</title>
<link rel="canonical" href="https://z-portfolio.tk/portfolio/"/>
</Helmet>
<StyledHeader>
<NavBar/>
</StyledHeader>
<Content>
<Spring
from={{color: textColorOnWhite}}
to={{color: colorPrimary}}
config={{tension: 10, friction: 10, delay: 500}}>
{props => <StyledH1 style={props}>My Projects</StyledH1>}
</Spring>
<PortfolioCoverflow data={data}/>
</Content>
<FooterComponent/>
<GlobalStyle/>
</Layout>
);
};
export default Portfolio;
<file_sep>/src/components/MySkillsComponent.jsx
import React, {Component} from 'react';
import {Animated} from "react-animated-css";
import styled from "styled-components";
import {Spring} from 'react-spring/renderprops';
import {Waypoint} from 'react-waypoint';
import {colorPrimary, textColorOnWhite} from "../shared/css/theme";
import SkillsBackground from '../../assets/Skills_Background.png';
import {
IoLogoHtml5,
IoLogoCss3,
IoLogoNodejs,
IoMdLeaf,
IoIosSpeedometer,
IoIosTrendingUp,
IoLogoUsd,
IoIosGitNetwork,
IoIosRocket
} from 'react-icons/io';
import {FaReact, FaDrupal, FaDatabase, FaPhp} from 'react-icons/fa';
const SkillsWrapper = styled.div`
background: url(${SkillsBackground}) rgba(22, 22, 24, 1) no-repeat fixed center;
background-size: cover;
padding: 5px 15px 50px 15px;
min-height: 503px;
`;
const StyledH1 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 30px 15px;
`;
const StyledTech = styled.div`
padding: 5px 10px;
margin: 15px 5px;
@media(min-width: 540px) {
margin: 15px;
}
background-color: rgba(39, 40, 35, 1);
color: ${(props) => props.theme.colorPrimary};
border: 1px solid ${(props) => props.theme.colorPrimary};
border-radius: 4px;
box-shadow: 1px 1px 4px rgba(20, 20, 20, 0.4);
font-family: "Exo 2.0";
font-size: ${(props) => props.theme.fontSizeRegular};
display: flex;
justify-content: center;
align-items: center;
`;
const SkillRow = styled.div`
display: flex;
flex-wrap: wrap;
justify-content: center;
align-items: center;
`;
class MySkillsComponent extends Component {
constructor(props) {
super(props);
this.state = {
showMySkills: textColorOnWhite,
showFirstRow: false,
showSecondRow: false,
showThirdRow: false,
showFourthRow: false,
showFifthRow: false
};
}
showMySkills = () => {
this.setState({
showMySkills: colorPrimary
});
};
showFirstRow = () => {
this.setState({
showFirstRow: true
});
};
showSecondRow = () => {
this.setState({
showSecondRow: true
});
};
showThirdRow = () => {
this.setState({
showThirdRow: true
});
};
showFourthRow = () => {
this.setState({
showFourthRow: true
});
};
showFifthRow = () => {
this.setState({
showFifthRow: true
});
};
render() {
return (
<SkillsWrapper>
<Waypoint onEnter={this.showMySkills}/>
<Spring
from={{color: textColorOnWhite}}
to={{color: `${this.state.showMySkills}`}}
config={{tension: 10, friction: 10}}>
{props => <StyledH1 style={props}>My skills</StyledH1>}
</Spring>
<Waypoint onEnter={this.showFirstRow}/>
{this.state.showFirstRow ?
<SkillRow>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech>TypeScript</StyledTech>
</Animated>
</SkillRow> : null
}
<Waypoint onEnter={this.showSecondRow}/>
{this.state.showSecondRow ?
<SkillRow>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoLogoHtml5/> HTML</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoLogoCss3/> CSS</StyledTech>
</Animated>
</SkillRow> : null
}
<Waypoint onEnter={this.showThirdRow}/>
{this.state.showThirdRow ?
<SkillRow>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <FaReact/> React</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoLogoNodejs/> Node.js</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <FaDrupal/> Drupal</StyledTech>
</Animated>
</SkillRow> : null
}
<Waypoint onEnter={this.showFourthRow}/>
{this.state.showFourthRow ?
<SkillRow>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoMdLeaf/> MongoDB</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoIosSpeedometer/> Express.js</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <FaDatabase/> MySQL</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech><FaPhp/> php</StyledTech>
</Animated>
</SkillRow> : null
}
<Waypoint onEnter={this.showFifthRow}/>
{this.state.showFifthRow ?
<SkillRow>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoIosTrendingUp/> SEO</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoLogoUsd/> Drupal Commerce</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoIosGitNetwork/> GraphQL</StyledTech>
</Animated>
<Animated animationIn="bounceInDown" animationOut="fadeOut" isVisible={true}>
<StyledTech> <IoIosRocket/> Gatsby.js</StyledTech>
</Animated>
</SkillRow> : null
}
</SkillsWrapper>
);
}
}
export default MySkillsComponent;<file_sep>/src/pages/index.jsx
import React from 'react';
import {Layout} from 'antd';
import NavBar from "../components/NavBar/NavBar";
import styled from 'styled-components';
import GlobalStyle from '../shared/css/globalStyles';
import HomeHeroComponent from "../components/HomeHeroComponent";
import background from '../../assets/home_hero.jpg';
import MyTechComponent from "../components/MyTechComponent";
import MySkillsComponent from "../components/MySkillsComponent";
import TestimonialsComponent from "../components/TestimonialsComponent";
import {graphql} from "gatsby";
import FooterComponent from "../components/FooterComponent";
import {Helmet} from "react-helmet";
export const query = graphql`
query {allNodeTestimonials{
edges{
node{
field_client,
field_testimonial
}
}
}
}
`;
const {Header, Content} = Layout;
// CSS starts
const StyledHeader = styled(Header)`
height: 800px;
background: url(${background}) rgba(15, 36, 39, 1) no-repeat center fixed;
background-size: cover;
@media(min-width: 1600px) {
height: 1080px;
}
`;
// CSS ends
const Home = ({data}) => {
return (
<Layout>
<Helmet>
<meta charSet="utf-8" name="Max Z." content="Max Z. Web Developer Portfolio"/>
<title>Portfolio</title>
<link rel="canonical" href="https://z-portfolio.tk"/>
</Helmet>
<StyledHeader>
<NavBar/>
<HomeHeroComponent/>
</StyledHeader>
<Content>
<MyTechComponent/>
<MySkillsComponent/>
<TestimonialsComponent data={data}/>
</Content>
<FooterComponent/>
<GlobalStyle/>
</Layout>
);
};
export default Home;<file_sep>/src/components/MyTechComponent.jsx
import React, {Component} from 'react';
import styled from "styled-components";
import {Spring} from 'react-spring/renderprops';
import {Waypoint} from 'react-waypoint';
import Loader from 'react-loader-spinner';
import Typist from 'react-typist';
import {colorPrimary, textColorOnWhite} from '../shared/css/theme.js';
import ReactLogo from '../../assets/react_logo_240.png';
import DrupalLogo from '../../assets/drupal_logo_215.png';
import FacebookLogo from '../../assets/Facebook_50.png';
import InstagramLogo from '../../assets/Instagram_50.png';
import PaypalLogo from '../../assets/Paypal_50.png';
import NetflixLogo from '../../assets/Netflix_50.png';
import NYTimesLogo from '../../assets/The-New-York-Times_50.png';
import TeslaLogo from '../../assets/Tesla_43.png';
import NasaLogo from '../../assets/Nasa_55.png';
import NokiaLogo from '../../assets/Nokia_70.png';
import VerizonLogo from '../../assets/Verizon_70.png';
import PinterestLogo from '../../assets/Pinterest_50.png';
// CSS starts
const MyTechWrapper = styled.div`
font-family: "Exo 2.0";
font-size: ${(props) => props.theme.fontSizeRegular};
`;
const StyledH1 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 30px 15px 10px 15px;
`;
const StyledH2 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeSubHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 30px 15px;
`;
const StyledH3 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeH3};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 30px 15px;
`;
const StyledInner = styled.div`
display: block;
padding: 0 15px 30px 15px;
@media(min-width: 1000px) {
display: flex;
}
`;
const StyledLeftDiv = styled.div`
display: flex;
flex-direction: column;
flex: 1 1 0;
align-items: center;
`;
const StyledRightDiv = styled.div`
display: flex;
flex-direction: column;
flex: 1 1 0;
align-items: center;
`;
const ImageContainerRight = styled.div`
display: flex;
justify-content: center;
margin-bottom: 15px;
`;
const ImageContainerLeft = styled.div`
display: flex;
justify-content: center;
margin-top: 28px;
margin-bottom: 15px;
`;
const MiddleDiv = styled.div`
flex: 1;
max-width: 1px;
@media(max-width: 1000px) {
display: none;
}
`;
const TechRow = styled.div`
display: flex;
`;
const UsedByWrapper = styled.div`
display: flex;
flex-wrap: wrap;
justify-content: center;
align-items: center;
margin-bottom: 10px;
`;
const UsedByImage = styled.img`
margin: 10px;
`;
// CSS ends
class MyTechComponent extends Component {
constructor(props) {
super(props);
this.state = {
showBorder: textColorOnWhite,
showMyTech: textColorOnWhite
};
}
ShowBorder = () => {
this.setState({
showBorder: colorPrimary
});
};
HideBorder = () => {
this.setState({
showBorder: textColorOnWhite
});
};
ShowMyTech = () => {
this.setState({
showMyTech: colorPrimary
});
};
HideMyTech = () => {
this.setState({
showMyTech: textColorOnWhite
});
};
render() {
return (
<MyTechWrapper>
<Waypoint onEnter={this.ShowMyTech}
onLeave={this.HideMyTech}
/>
<Spring
from={{color: textColorOnWhite}}
to={{color: `${this.state.showMyTech}`}}
config={{tension: 10, friction: 10, delay: 500}}>
{props => <StyledH1 style={props}>My Tech</StyledH1>}
</Spring>
<StyledInner>
<StyledLeftDiv>
<StyledH2>React</StyledH2>
<ImageContainerLeft>
<img src={ReactLogo} alt="React logo" width="240px"/>
</ImageContainerLeft>
<StyledH3>Used by:</StyledH3>
<UsedByWrapper>
<a href="https://www.facebook.com/">
<UsedByImage src={FacebookLogo} alt="Facebook logo"
width='50px' height='auto'/></a>
<a href="https://www.instagram.com/?hl=en">
<UsedByImage src={InstagramLogo}
alt="Instagram logo" width='50px'
height='auto'/></a>
<a href="https://www.paypal.com">
<UsedByImage src={PaypalLogo} alt="Paypal logo"
width='50px' height='auto'/></a>
<a href="https://www.nytimes.com/">
<UsedByImage src={NYTimesLogo} alt="New York Times logo"
width='50px' height='auto'/></a>
<a href="https://www.netflix.com">
<UsedByImage src={NetflixLogo} alt="Netflix logo"
width='50px' height='auto'/></a>
</UsedByWrapper>
<Typist cursor={{show: false}} avgTypingDelay={1} stdTypingDelay={1}>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> World №1 JavaScript framework for Front-End development;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Developed by Facebook;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Lightning fast Virtual DOM technology;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Fits any project due to amazing flexibility and tiny ~100Kb size;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Supports TypeScript for corporate-level code quality and
scalability</TechRow>
</Typist>
</StyledLeftDiv>
<Waypoint onEnter={this.ShowBorder}
onLeave={this.HideBorder}
/>
<Spring
from={{backgroundColor: textColorOnWhite}}
to={{backgroundColor: `${this.state.showBorder}`}}
config={{tension: 10, friction: 10, delay: 500}}>
{props =>
<MiddleDiv style={props}> </MiddleDiv>}
</Spring>
<StyledRightDiv>
<StyledH2>Drupal</StyledH2>
<ImageContainerRight>
<img src={DrupalLogo} alt="Drupal logo" width="215px"/>
</ImageContainerRight>
<StyledH3>Used by:</StyledH3>
<UsedByWrapper>
<a href="https://products.internetservices.verizon.com/">
<UsedByImage src={VerizonLogo}
alt="Verizon logo"
width='70px'
height='auto'/></a>
<a href="https://www.tesla.com/">
<UsedByImage src={TeslaLogo} alt="Tesla logo" width='43px'
height='auto'/></a>
<a href="https://www.nasa.gov/">
<UsedByImage src={NasaLogo} alt="NASA logo" width='50px'
height='auto'/></a>
<a href="https://business.pinterest.com/en">
<UsedByImage src={PinterestLogo}
alt="Pinterest logo" width='50px'
height='auto'/></a>
<a href="https://www.nokia.com">
<UsedByImage src={NokiaLogo} alt="Nokia logo" width='70px'
height='auto'/></a>
</UsedByWrapper>
<Typist cursor={{show: false}} avgTypingDelay={1} stdTypingDelay={1}>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> The most advanced Content Management System;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Perfect for Headless apps due to API-first philosophy;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Manage your content for any App using Drupal as a server;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Corporate-level architecture based on Symfony framework;</TechRow>
<TechRow><Loader
type="Triangle"
color={colorPrimary}
height="15"
width="15"
/> Top-notch performance and security;</TechRow>
</Typist>
</StyledRightDiv>
</StyledInner>
</MyTechWrapper>
);
};
}
export default MyTechComponent;
<file_sep>/src/components/PortfolioComponent/PortfolioGalleryComponent.jsx
import React, {Component} from 'react';
import Gallery from 'react-photo-gallery';
import Lightbox from 'react-images';
import styled from 'styled-components';
import Loader from 'react-loader-spinner';
import Measure from 'react-measure';
import {URL} from "../../shared/glabal_variables";
import {Spring} from 'react-spring/renderprops';
// CSS starts
const StyledWrapper = styled.div`
text-align: center;
`;
// CSS ends
class PortfolioGalleryComponent extends Component {
constructor() {
super();
this.state = {
isLoading: true,
currentImage: 0,
imageLinks: [],
photos: [],
width: -1
};
}
setStateAsync(state) {
return new Promise((resolve) => {
this.setState(state, resolve)
});
}
openLightbox = (event, obj) => {
this.setState({
currentImage: obj.index,
lightboxIsOpen: true,
});
};
closeLightbox = () => {
this.setState({
currentImage: 0,
lightboxIsOpen: false,
});
};
gotoPrevious = () => {
this.setState({
currentImage: this.state.currentImage - 1,
});
};
gotoNext = () => {
this.setState({
currentImage: this.state.currentImage + 1,
});
};
async setPhotos() {
await this.setStateAsync({
imageLinks: this.props.projectData.projectData.node.relationships.field_project_gallery,
isLoading: false
});
const {imageLinks} = this.state;
const photos = imageLinks.map((item) => {
return (
{src: `${URL}${item.uri.url}`, width: 2, height: 1}
);
});
this.setState({photos: photos});
}
componentDidMount() {
this.setPhotos();
}
componentDidUpdate(prevProps) {
if (this.props !== prevProps) {
this.setState({isLoading: true});
this.setPhotos();
}
}
render = () => {
const {photos} = this.state;
const width = this.state.width;
return (
!this.state.isLoading ?
<Spring from={{opacity: 0}}
to={{opacity: 1}}
config={{tension: 10, friction: 10, delay: 1000}}>
{styles => <div style={styles}>
<Measure bounds onResize={(contentRect) => this.setState({width: contentRect.bounds.width})}>
{
({measureRef}) => {
if (width < 1) {
return <div ref={measureRef}></div>;
}
let columns = 1;
if (width >= 780) {
columns = 2;
}
return <div ref={measureRef}><Gallery photos={photos} columns={columns}
onClick={this.openLightbox}/>
<Lightbox images={photos}
onClose={this.closeLightbox}
onClickPrev={this.gotoPrevious}
onClickNext={this.gotoNext}
currentImage={this.state.currentImage}
isOpen={this.state.lightboxIsOpen}
/></div>
}
}
</Measure>
</div>}
</Spring>
:
<StyledWrapper>
<Loader
type="Triangle"
color="rgba(250, 65, 0, 1)"
height="100"
width="100"
/>
</StyledWrapper>
);
}
}
export default PortfolioGalleryComponent;
<file_sep>/gatsby-browser.js
import wrapWithProvider from "./wrap-with-provider"
import "./src/shared/css/animate.min.css"
export const wrapRootElement = wrapWithProvider<file_sep>/src/components/AboutMeComponent.jsx
import React from 'react';
import {Spring} from 'react-spring/renderprops';
import styled from "styled-components";
import portrait from "../../assets/cyb.jpg";
const Wrapper = styled.div`
display: flex;
justify-content: center;
align-items: center;
margin-top: -150px;
margin-bottom: 80px;
`;
const Background = styled.div`
max-width: 800px;
padding: 15px;
@media(min-width: 800px){
padding: 25px;
}
border-radius: 5px;
`;
const Inner = styled.div`
text-shadow: ${(props) => props.theme.textShadow};
font-family: "Exo 2.0";
font-size: ${(props) => props.theme.fontSizeRegular};
color: white;
text-align: justify;
`;
const TopWrapper = styled.div`
display: flex;
`;
const TopInner = styled.div`
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
text-align: center;
width: 100%;
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.colorPrimary};
font-size: 20px;
@media(min-width: 678px) {
font-size: 24px;
}
text-shadow: ${(props) => props.theme.textShadowOnWhite};
`;
const Portrait = styled.img`
margin-right: 15px;
`;
const Text = styled.p`
//display: inline-flex;
`;
const AboutMeComponent = () => {
return (
<Wrapper>
<Spring from={{backgroundColor: 'rgba(20, 20, 20, 0)'}}
to={{backgroundColor: 'rgba(20, 20, 20, 0.6)'}}
config={{tension: 10, friction: 20, delay: 1000}}>
{styles => <Background style={styles}>
<Spring
from={{opacity: 0}}
to={{opacity: 1}}
config={{tension: 10, friction: 15, delay: 1000}}>
{styles => <Inner style={styles}>
<TopWrapper>
<Portrait src={portrait} alt="MaxZ"
width='128px' height='144px'/>
<TopInner>
<p>My name is Max</p>
<p>I create beautiful websites and apps</p>
</TopInner>
</TopWrapper>
<Text>My primary role in the projects in which I participated in recent years is a React
Engineer.
I build any kinds of React apps from small SPA to large and complex apps with
Redux-based complicated data structure.
I fell in love with Typescript, so I use it as my main programming language.
I'm using Redux-Saga as a middleware to send requests to the server simultaneously and
control the dispatched actions.
I’ve also worked with all popular UI Frameworks. My personal choice is Ant Design
because of the most powerful API,
the largest variety of components and regular updates.</Text>
<Text> If you need a website or app, you probably want a Content Management System to
edit your pages, products, articles and blogs without touching a code, otherwise you
have to hire a developer for managing your content.</Text>
<Text>After the years of experience with popular CMS including Wordpress and Joomla I found
out that Drupal is the only system that allows client to conveniently manage a content
without the risk of touching a Front-End or Logical layers. Drupal 8 is based on
API-first philosophy and perfectly fits into the modern “headless” solutions where you
have a single source of content for all websites and apps including mobile and
desktop.</Text>
<Text>Drupal gives a corporate-level quality and security just as is. No need to buy a
premium theme and a dozen of premium plugins to make a professional website. It is
developer-friendly and amazingly flexible. If you need something more complex than a
personal blog or brochure and expect a heavy traffic load, there is no alternative to
Drupal.</Text>
<Text>I have a visual designer partner, so together we are able to complete both programming
and graphics.</Text>
<Text>In the past I held leading positions in local and international world-known companies
for more than 10 years but I've decided to work on my own, so I'm looking forward to the
interesting and creative projects.</Text>
</Inner>}
</Spring>
</Background>
}
</Spring>
</Wrapper>
);
};
export default AboutMeComponent;
<file_sep>/src/components/TestimonialsComponent.jsx
import React, {Component} from 'react';
import styled from "styled-components";
import Slider from 'react-animated-slider';
import 'react-animated-slider/build/vertical.css';
import Loader from "react-loader-spinner";
import {colorPrimary, textColorOnWhite} from "../shared/css/theme";
import {Waypoint} from "react-waypoint";
import {Spring} from 'react-spring/renderprops';
import Interface from '../../assets/interface.gif';
// CSS starts
const StyledWrapper = styled.div`
text-align: center;
`;
const StyledH1 = styled.h1`
font-family: "Ailerons-Typeface";
color: ${(props) => props.theme.textColorOnWhite};
font-size: ${(props) => props.theme.fontSizeHeading};
text-shadow: ${(props) => props.theme.textShadowOnWhite};
text-align: center;
margin: 30px 15px;
`;
const TestimonialsWrapper = styled.div`
display: block;
@media(min-width: 700px) {
display: flex;
justify-content: center;
align-items: center;
}
`;
const StyledInterface = styled.img`
box-shadow: 1px 1px 15px rgba(90, 90, 90, 1);
max-width: 655px;
`;
const TestimonialsInner = styled.div`
font-family: "Exo 2.0";
font-size: ${(props) => props.theme.fontSizeRegular};
height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
`;
const TestimonialsItem = styled.div`
flex: 1 1 0;
display: flex;
justify-content: center;
align-items: center;
margin: 15px;
`;
const Testimonial = styled.div`
font-family: "Exo 2.0, italic";
max-width: 650px;
text-align: justify;
`;
const Client = styled.div`
font-family: "Exo 2.0, bold italic";
max-width: 650px;
margin-top: 10px;
text-align: right;
`;
// CSS ends
class TestimonialsComponent extends Component {
constructor(props) {
super(props);
this.state = {
fetchedData: [],
isLoading: true,
showHeading: textColorOnWhite
};
}
async componentDidMount() {
await this.setState({
fetchedData: this.props.data.allNodeTestimonials.edges,
isLoading: false
});
}
showHeading = () => {
this.setState({
showHeading: colorPrimary
});
};
hideHeading = () => {
this.setState({
showHeading: textColorOnWhite
});
};
render() {
return (
!this.state.isLoading ?
<React.Fragment>
<Waypoint onEnter={this.showHeading} onLeave={this.hideHeading}/>
<Spring
from={{color: textColorOnWhite}}
to={{color: `${this.state.showHeading}`}}
config={{tension: 10, friction: 10, delay: 500}}>
{props => <StyledH1 style={props}>Testimonials</StyledH1>}
</Spring>
<TestimonialsWrapper>
<TestimonialsItem>
<StyledInterface src={Interface} alt="Interface" width="100%"/>
</TestimonialsItem>
<TestimonialsItem>
<Slider direction="vertical">
{this.state.fetchedData.slice(0).reverse().map((item, index) => (
<TestimonialsInner key={index}>
<Testimonial>{item.node.field_testimonial}
<Client>{item.node.field_client}</Client>
</Testimonial>
</TestimonialsInner>
))}
</Slider>
</TestimonialsItem>
</TestimonialsWrapper>
</React.Fragment>
:
<StyledWrapper>
<Loader
type="Triangle"
color={colorPrimary}
height="100"
width="100"
/>
</StyledWrapper>
);
}
}
export default TestimonialsComponent;<file_sep>/src/components/HomeHeroComponent.jsx
import React from 'react';
import styled from 'styled-components';
import {Spring} from 'react-spring/renderprops';
const Hero = styled.div`
font-family: "Ailerons-Typeface";
color: white;
font-size: 24px;
line-height:30px;
text-shadow: ${(props) => props.theme.textShadow};
position: absolute;
top: 45%;
left: 8px;
@media (min-width: 456px) {
font-size: 28px;
}
@media (min-width: 512px) {
font-size: 32px;
}
@media (min-width: 626px) {
font-size: ${(props) => props.theme.fontSizeHeading};
}
@media (min-width: 1400px) {
top: 45%;
left: 18%;
}
`;
const HeroText = styled.div`
border-radius: 5px;
max-width: 630px;
padding: 10px 10px 1px 10px;
@media (min-width: 456px) {
padding: 15px 15px 1px 15px;
}
@media (min-width: 626px) {
padding: 20px 20px 1px 20px;
}
@media (min-width: 1400px) {
padding: 30px 30px 1px 30px;
}
`;
const HomeHeroComponent = () => {
return (
<Hero>
<Spring from={{backgroundColor: 'rgba(20, 20, 20, 0)'}}
to={{backgroundColor: 'rgba(20, 20, 20, 0.6)'}}
config={{tension: 10, friction: 10}}>
{styles => <HeroText style={styles}>
<Spring from={{opacity: 0}}
to={{opacity: 1}}
config={{tension: 10, friction: 20, delay: 1000}}>
{styles => <p style={styles}>Hi, I'm <NAME>.</p>}
</Spring>
<Spring from={{opacity: 0}} to={{opacity: 1}}
config={{tension: 10, friction: 20, delay: 2500}}>
{styles => <p style={styles}>I'm a Web Developer</p>}
</Spring>
<Spring from={{opacity: 0}} to={{opacity: 1}}
config={{tension: 10, friction: 20, delay: 4000}}>
{styles => <p style={styles}>Obsessed With Technology</p>}
</Spring>
</HeroText>}
</Spring>
</Hero>
);
};
export default HomeHeroComponent;
| c00b1491311e4594e5a246db8c4c1d98fe372e9f | [
"JavaScript"
] | 16 | JavaScript | Max-Zviagintsev/Z-portfolio-client | f42cec5d905532d4d22fed77fc82e85049db7262 | 73c2c20b0e4813770a4f35d98ba101fcb360567c |
refs/heads/master | <file_sep>using System;
namespace Part_5___Hurricane_Project
{
class Program
{
static void Main(string[] args)
{
int hur_cat;
Console.WriteLine("Please enter a category of hurricane form 1-5.");
hur_cat switch (1)
{
};
}
}
}
| 44ee2ee405b813919ab11b2cc28f18900671e2a3 | [
"C#"
] | 1 | C# | NAGRich2347/Part-5---Hurricane-Project | 84f45c050f94528bf1d93335c95e75f96d42458d | c9e9734713f0f69689170119183fdf996d3a5cd0 |
refs/heads/master | <file_sep># Fast-Classification-Network
Implementation of the Fast Classification Network as described in
><NAME>. & <NAME>. (2002) 21: 207. https://doi.org/10.1007/s00034-002-2007-7
<file_sep>import matplotlib.pyplot as plt
import numpy as np
from FastClassificationNetwork import FCNetwork
WINDOW_SIZE = 4
SAMPLE_SIZE = 450
TEST_SIZE = 50
A = 1.4
B = 0.3
def HenonMap(xm1, xm2):
return 1. - A*(xm1**2.) + B*(xm2**2.)
#Get points of data
dataPoints = np.empty(WINDOW_SIZE + SAMPLE_SIZE + TEST_SIZE)
#Set initial conditions
dataPoints[0] = 0.3
dataPoints[1] = 0.1
for i in range(2, WINDOW_SIZE + SAMPLE_SIZE + TEST_SIZE):
dataPoints[i] = HenonMap(dataPoints[i-1], dataPoints[i-2])
#Assign data points to our sample and test arrays
inputSamp = np.empty((SAMPLE_SIZE, WINDOW_SIZE))
outputSamp = np.empty((SAMPLE_SIZE,1))
testData = np.empty((TEST_SIZE, WINDOW_SIZE))
testOut = np.empty(TEST_SIZE)
for i in range(0, SAMPLE_SIZE + WINDOW_SIZE + TEST_SIZE):
#Assign sample data
if i < SAMPLE_SIZE:
for j in range(0,WINDOW_SIZE):
inputSamp[i][j] = dataPoints[i+j]
if i >= WINDOW_SIZE and i < SAMPLE_SIZE+WINDOW_SIZE:
outputSamp[i-WINDOW_SIZE][0] = dataPoints[i]
#Assign test data
if i >= SAMPLE_SIZE and i < SAMPLE_SIZE + TEST_SIZE:
for j in range(0,WINDOW_SIZE):
testData[i-SAMPLE_SIZE][j] = dataPoints[i+j]
if i >= SAMPLE_SIZE + WINDOW_SIZE:
testOut[i-SAMPLE_SIZE-WINDOW_SIZE] = dataPoints[i]
#Create network and get network test output
henonNetwork = FCNetwork(inputSamp, outputSamp, 5)
netOut = np.empty(TEST_SIZE)
error = 0
for i in range(0, TEST_SIZE):
netOut[i] = henonNetwork.feedForward(testData[i])
error = (netOut[i]-testOut[i])**2
print("error = {}".format(error))
#Plot predicted and actual output
# plt.scatter(dataPoints[1:WINDOW_SIZE + SAMPLE_SIZE + TEST_SIZE],dataPoints[0:WINDOW_SIZE + SAMPLE_SIZE + TEST_SIZE-1])
plt.plot(np.arange(0,TEST_SIZE), netOut, color='r', marker='o');
plt.plot(np.arange(0,TEST_SIZE), testOut, color='b', marker='o');
plt.show()
<file_sep>import numpy as np
import sys
class FCNetwork:
def _euclidDistance(self, point1, point2):
if(len(point1) != len(point2)):
raise ValueError("point1 and point2 must have the same dimensions (point1={}, point2={})".format(point1, point2))
accum = 0
for i in range(0, len(point1)):
accum += (point1[i]-point2[i])**2
return accum**0.5
def _triangularMembership(self, distances, k):
#Find the closest k distances
kNearest = np.argsort(distances)[:k]
#Find the denominator of the triangular membership function
denom = np.sum(1. / np.array([distances[i] for i in kNearest]))
#Compute the membership grades
memberGrades = np.empty(len(distances))
testSum = 0
for i in range(0,len(distances)):
memberGrades[i] = (1./distances[i])/denom if i in kNearest else 0
gradeTotal = np.sum(memberGrades)
assert (gradeTotal>0.999 and gradeTotal<1.001), "The sum of the memberGrades ({}) is not equal to 1.0.".format(gradeTotal)
return memberGrades
def __init__(self, trainIn, trainOut, k):
#TODO: Filter out duplicate samples
self.hiddenLength = len(trainIn)
if self.hiddenLength == 0:
raise valueError("There must be atleast one sample")
if self.hiddenLength != len(trainOut):
raise ValueError("Input length must equal output length")
self.outLength = len(trainOut[0])
self.inWeights = trainIn
self.outWeights = np.array(trainOut)
self.k = k
self.radGen = np.full(self.hiddenLength, sys.maxsize, dtype=np.float32)
#Find the radius of generalization for each hidden node
for i in range(0, self.hiddenLength-1):
for j in range(i+1, self.hiddenLength):
#calculate distance between vector i and j
dis = self._euclidDistance(trainIn[i], trainIn[j])/2
#check if either is lower than their current value, replace it
if dis < self.radGen[i]:
self.radGen[i] = dis
if dis < self.radGen[j]:
self.radGen[j] = dis
#Vector functions for the network
# print("radGen = {}".format(self.radGen))
self._actFunc = np.vectorize(lambda d, r: 0 if d <= r else d)
def feedForward(self, input):
#Calculate distance between each inWeight vector and the input
d = [self._euclidDistance(self.inWeights[i], input) for i in range(0,self.hiddenLength)]
#Get activation values
h = self._actFunc(d, self.radGen)
#Find a zero activation if any
zInd = np.where(h == 0)[0]
m = len(zInd)
# assert (m<=1), "More than one activation was zero\n\n\tinput = {}\n\tzeroVectors = {}\n\tdistances = {}\n\tzInd = {}\n\tm = {}".format(input, [self.inWeights[i] for i in zInd], [d[i] for i in zInd], zInd, m)
#Calculate the rule base output
mu = np.empty(self.hiddenLength)
if m>=1:
#Use 1NN
mu.fill(0)
mu[zInd[0]] = 1
else:
#Use KNN
mu = self._triangularMembership(d, self.k)
# print("mu = {}".format(mu))
#Do dot product between rule base output and output weights
return np.array([np.dot(mu, self.outWeights[:,i]) for i in range(0, self.outLength)])
<file_sep>import numpy as np
from FastClassificationNetwork import FCNetwork
import matplotlib.pyplot as plt
def printPattern(pattern):
for y in range(0, len(pattern)):
line = ''
for x in range(0, len(pattern[y])):
if round(pattern[y][x]) == 0:
line = line + " "
elif round(pattern[y][x]) == 1:
line = line + "#"
else:
line = line + "!"
print(line)
def createTrainingSamples(pattern, numberOfSamples):
trainingSamples = np.empty([numberOfSamples,2])
outputClass = np.empty([numberOfSamples,1])
s = 0
while s < numberOfSamples:
#Randomly choose a coordinate
x = np.random.randint(16)
y = np.random.randint(16)
#Prevent duplicate samples
if [x,y] in trainingSamples.tolist():
continue
trainingSamples[s] = [x,y]
outputClass[s] = pattern[y][x]
s+=1
return (trainingSamples, outputClass)
#Create Input and output data
spiral = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
printPattern(spiral)
(inputSamp, outputSamp) = createTrainingSamples(spiral, 32)
#Create network
spiralNetwork = FCNetwork(inputSamp, outputSamp, 3)
#Record predicted output and error
result = np.empty([16,16])
error = 0
for y in range(0,16):
for x in range(0, 16):
result[y,x] = spiralNetwork.feedForward([x,y])[0]
error += (result[y,x] - spiral[y][x])**2
printPattern(result)
print("error = {}".format(error/(16**2)))
#Display results and matplotlib
oneSamps = np.array([inputSamp[i] for i in np.where(outputSamp==1)[0]])
zeroSamps = np.array([inputSamp[i] for i in np.where(outputSamp==0)[0]])
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='all', sharey='all')
im1 = ax1.imshow(result, cmap=plt.get_cmap('hot'), interpolation='bilinear', vmin=np.amin(result), vmax=np.amax(result))
im2 = ax2.imshow(spiral, cmap=plt.get_cmap('hot'), interpolation='bilinear', vmin=np.amin(result), vmax=np.amax(result))
ax1.scatter(oneSamps[:,0],oneSamps[:,1])
ax1.scatter(zeroSamps[:,0],zeroSamps[:,1], c='r')
plt.show()
| e4813c32fb4809e1a1b49577448cc039e64621fd | [
"Markdown",
"Python"
] | 4 | Markdown | nmemmott/Fast-Classification-Network | 51095f9fdbc140234763c87503a47facab4506dd | 3542fcc9c43496af8a865c6ddad00398faf87049 |
refs/heads/master | <file_sep>package com.samrt.carnavigation.Activity;
import android.app.AlertDialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.os.Message;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.webkit.WebSettings;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.Toast;
import com.samrt.carnavigation.R;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import java.util.ArrayList;
import java.util.List;
import static com.samrt.carnavigation.Utils.Constants.loginUrl;
public class PersonalInfo extends AppCompatActivity {
private Button btnBack;
private Button btnRegister;
private Button btnLogin;
private WebView PresonInfoWV;
private WebSettings webSettings;
private TextView notice;
HttpClient httpClient;
Handler handler = new Handler()
{
public void handleMessage(Message msg)
{
String msgInfo =(String)msg.obj;
PresonInfoWV.loadDataWithBaseURL(null,msgInfo,"text/html","utf-8",null);
notice.setVisibility(View.GONE);
}
};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_personal_info);
setTitle("您的信息");
init();
}
private void init(){
notice =(TextView)findViewById(R.id.notice);
btnBack =(Button)findViewById(R.id.btnBack);
btnRegister =(Button)findViewById(R.id.btnRegister);
btnLogin =(Button)findViewById(R.id.btnLogin);
PresonInfoWV =(WebView)findViewById(R.id.PresonInfoWV);
webSettings = PresonInfoWV.getSettings();
//支持javascript
webSettings.setJavaScriptEnabled(true);
// 设置可以支持缩放
webSettings.setSupportZoom(true);
// 设置出现缩放工具
webSettings.setBuiltInZoomControls(true);
//扩大比例的缩放
webSettings.setUseWideViewPort(true);
//自适应屏幕
//webSettings.setLayoutAlgorithm(WebSettings.LayoutAlgorithm.SINGLE_COLUMN);
webSettings.setLoadWithOverviewMode(true);
PresonInfoWV.setWebViewClient(new WebViewClient(){
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
view.loadUrl(url);
return true;
}
});
httpClient = new DefaultHttpClient();
btnBack.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View view){
finish();
}
});
btnLogin.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View view){//访问数据库
Toast.makeText(PersonalInfo.this,"登陆",Toast.LENGTH_SHORT).show();
// 加载登录界面
final View loginDialog = getLayoutInflater().inflate(
R.layout.login, null);
// 使用对话框供用户登录系统
new AlertDialog.Builder(PersonalInfo.this)
.setTitle("登录系统")
.setView(loginDialog)
.setPositiveButton("确定",
new DialogInterface.OnClickListener()
{
@Override
public void onClick(DialogInterface dialog,
int which)
{
// 获取用户输入的用户名、密码
final String name = ((EditText) loginDialog
.findViewById(R.id.name)).getText()
.toString();
final String pass = ((EditText) loginDialog
.findViewById(R.id.pass)).getText()
.toString();
new Thread(){
@Override
public void run()
{
try
{
HttpPost post = new HttpPost(loginUrl);
// 如果传递参数个数比较多,可以对传递的参数进行封装
List<NameValuePair> params = new ArrayList<>();
params.add(new BasicNameValuePair
("name", name));
params.add(new BasicNameValuePair
("password", <PASSWORD>));
// 设置请求参数
post.setEntity(new UrlEncodedFormEntity(
params, "utf-8"));
// 发送POST请求
HttpResponse response = httpClient
.execute(post);
// 如果服务器成功地返回响应
if (response.getStatusLine()
.getStatusCode() == 200)
{
String sInfo = EntityUtils
.toString(response.getEntity());
Message msg = handler.obtainMessage();
msg.obj = sInfo;
handler.sendMessage(msg);
// 提示登录成功
}
}
catch (Exception e)
{
e.printStackTrace();
}
}
}.start();
}
}).setNegativeButton("取消", null).show();
}
});
btnRegister.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View view){
finish();
Intent intent = new Intent(PersonalInfo.this,PersonalInfoRegister.class);
startActivity(intent);
}
});
}
}
<file_sep>package com.samrt.carnavigation.Beans;
/**
* Created by xu on 2016/8/26.
*/
public class PersonBean{
private String name;
private String sex;
private int age;
}
<file_sep>package com.samrt.carnavigation.Utils;
import android.database.Cursor;
import android.net.Uri;
import android.provider.MediaStore;
import com.samrt.carnavigation.Beans.MusicBean;
import java.util.ArrayList;
/**
* Created by xu on 2016/8/13.
*/
public class Constants {
//音乐控制
public static final int PAUSE_ACTION = 0x00;
public static final int PREVIOUS_ACTION = 0x10;
public static final int NEXT_ACTION = 0x01;
public static final int PLAY_ACTION_APP = 0x11;
public static final int PLAY_ACTION_ITEM = 0x12;
//要查询的列
public static String[] cursorCols = new String[]{
MediaStore.Audio.Media.TITLE,//歌名
MediaStore.Audio.Media.ARTIST,//歌手
MediaStore.Audio.Media.SIZE,//大小
MediaStore.Audio.Media.DURATION,//时长
MediaStore.Audio.Media.MIME_TYPE,//类型
MediaStore.Audio.Media.DATA
};
public static Uri uri = MediaStore.Audio.Media.EXTERNAL_CONTENT_URI;
public static ArrayList<MusicBean> mbList ;
public static Cursor cursor;//查询结果集
public static int currentPos;//查询结果集
//个人中心
public static String loginUrl = "http://192.168.1.33:8080/CarNavigationServer/PersonLoginSlv";
public static String registerUrl = "http://192.168.1.33:8080/CarNavigationServer/jsp/RegisterJsp.jsp";
}
<file_sep>package com.samrt.carnavigation.Net;
/**
* Created by xu on 2016/8/26.
*/
public class MyHttpClient{
public MyHttpClient(){
}
}
<file_sep>package com.samrt.carnavigation.Service;
import android.app.Service;
import android.content.Intent;
import android.media.MediaPlayer;
import android.net.Uri;
import android.os.Bundle;
import android.os.IBinder;
import android.os.Message;
import android.provider.MediaStore;
import android.util.Log;
import android.widget.Toast;
import com.samrt.carnavigation.Activity.MusicActivity;
import java.io.IOException;
import java.util.Timer;
import java.util.TimerTask;
import static com.samrt.carnavigation.Utils.Constants.NEXT_ACTION;
import static com.samrt.carnavigation.Utils.Constants.PAUSE_ACTION;
import static com.samrt.carnavigation.Utils.Constants.PLAY_ACTION_APP;
import static com.samrt.carnavigation.Utils.Constants.PLAY_ACTION_ITEM;
import static com.samrt.carnavigation.Utils.Constants.PREVIOUS_ACTION;
import static com.samrt.carnavigation.Utils.Constants.currentPos;
import static com.samrt.carnavigation.Utils.Constants.cursor;
public class MusicPlayService extends Service {
public static MediaPlayer player;
private Uri uri;
boolean pause = false;
private static Timer timer;
private String tilte;
private String artist;
public boolean bCloseTimer ;//从App触发播放
public static boolean fromPauseToPlay ;//从Item触发播放
private String TAG = "MusicPlayService";
public MusicPlayService() {
}
@Override
public IBinder onBind(Intent intent) {
throw new UnsupportedOperationException("Not yet implemented");
}
@Override
public void onCreate() {
super.onCreate();
player.setOnPreparedListener(new MediaPlayer.OnPreparedListener() {
@Override
public void onPrepared(MediaPlayer mediaPlayer) {
if (!bCloseTimer) {
toTimer();
}
}
});
player.setOnCompletionListener(new MediaPlayer.OnCompletionListener(){
@Override
public void onCompletion(MediaPlayer mediaPlayer){
Log.i("come","onCompletion: 该歌曲播放完毕!");
toNext();
}
});
player.setOnErrorListener(new MediaPlayer.OnErrorListener() {
@Override
public boolean onError(MediaPlayer mediaPlayer, int i, int i1) {
return false;
}
});
}
private void toTimer() {//更新UI
if (timer == null){
timer = new Timer();
timer.schedule(new TimerTask() {
@Override
public void run() {
cursor.moveToPosition(currentPos);
tilte = cursor.getString(cursor.getColumnIndexOrThrow(MediaStore.Audio.Media.TITLE));
artist = cursor.getString(cursor.getColumnIndexOrThrow(MediaStore.Audio.Media.ARTIST));
int totalTime = cursor.getInt(cursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DURATION));//歌曲总长度ms
int currentPosition = player.getCurrentPosition();//当前播放进度
Bundle bundle = new Bundle();
Message msg = MusicActivity.handler.obtainMessage();
bundle.putString("tilte", tilte);
bundle.putString("artist", artist);
bundle.putInt("totalTime", totalTime);
bundle.putInt("currentPosition", currentPosition);
msg.obj = bundle;
MusicActivity.handler.sendMessage(msg);
}
}, 500, 500);
}
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
if (intent != null) {
int action = intent.getIntExtra("action",0);
bCloseTimer = intent.getBooleanExtra("mFromApp",false);//如果是随APP启动播放,则不更新播放界面的控制区,不启动定时器
if (!bCloseTimer) {//
Log.i(TAG,"onStartCommand: 打开计时器");
toTimer();
}
if (action == PLAY_ACTION_APP&&cursor.moveToFirst()) {
Log.i(TAG,"onStartCommand: 从APP启动开始播放");
toStart();
} else if (action == PAUSE_ACTION) {
toPause();
pause = true;
} else if (action == NEXT_ACTION) {
toNext();
} else if (action == PREVIOUS_ACTION) {
toPrevious();
}else if (action == PLAY_ACTION_ITEM) {
itemStart();
}
}
return super.onStartCommand(intent, flags, startId);
}
@Override
public void onDestroy() {
stopMusic();
super.onDestroy();
}
private void init() {
player.reset();
String dataSource = cursor.getString(cursor.getColumnIndex(MediaStore.Audio.Media.DATA));
Log.i(TAG,"init: ");
try {
player.setDataSource(dataSource);
player.prepare();
currentPos = cursor.getPosition();
Log.i(TAG,"init: "+currentPos);
player.start();
} catch (IOException e) {
e.printStackTrace();
}
}
private void toStart() {//非Item触发调用
if (player.isPlaying()) {
Log.i(TAG,"toStart: App启动播放音乐");
return;//此语句块,什么事也不做
}else {
init();
player.start();
pause = false;
fromPauseToPlay = false;
}
}
private void itemStart() {
if (cursor == null) {//如果所选歌曲正在播放
Toast.makeText(this, "没有找到本地歌曲", Toast.LENGTH_SHORT).show();
return;//此语句块,什么事也不做
}else if (fromPauseToPlay) {
try {
player.prepare();
player.start();
fromPauseToPlay = false;
} catch (IOException e) {
e.printStackTrace();
}
}else {
init();
pause = false;
}
}
private void toPause() {
if (player != null && player.isPlaying()) {
player.stop();
fromPauseToPlay = true;
Toast.makeText(this, fromPauseToPlay+"", Toast.LENGTH_SHORT).show();
pause = true;
}
}
private void toNext() {//OK
fromPauseToPlay = false;
if (cursor == null) {
Toast.makeText(this, "没有下一曲", Toast.LENGTH_SHORT).show();
return;
} else if (cursor.getCount() == cursor.getPosition()+1) {
cursor.moveToFirst();
}else {
cursor.moveToNext();
}
init();
}
private void toPrevious() {//OK
fromPauseToPlay = false;
if (cursor == null) {
return;
} else if (cursor.getPosition() == 0) {
cursor.moveToLast();
}else {
cursor.moveToPrevious();
}
init();
}
public static void stopMusic() {
player.release();
player =null;
if (timer != null) {
timer.cancel();
timer = null;
}
}
}
<file_sep>package com.samrt.carnavigation.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.webkit.WebSettings;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.Button;
import com.samrt.carnavigation.R;
import com.samrt.carnavigation.Utils.AndroidForJS;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
import java.util.List;
import static com.samrt.carnavigation.Utils.Constants.registerUrl;
public class PersonalInfoRegister extends AppCompatActivity{
private String name;
private String sex;
private String age;
private String password;
private String email;
private Button btnSubmit;
private Button btnBack;
private Button btnReg;
private WebView registerWV;
private WebSettings webSettings;
private HttpClient httpClient;//执行post请求
private HttpPost httpPost;
private List<NameValuePair> parmas;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_personal_info_register);
setTitle("用户注册");
init();
registerWV.loadUrl(registerUrl);
}
private void init(){
btnBack =(Button)findViewById(R.id.btnBack);
btnReg =(Button)findViewById(R.id.btnReg);
registerWV =(WebView)findViewById(R.id.registerWV);
webSettings = registerWV.getSettings();
//支持javascript
webSettings.setJavaScriptEnabled(true);
// 设置可以支持缩放
webSettings.setSupportZoom(true);
// 设置出现缩放工具
webSettings.setBuiltInZoomControls(true);
//扩大比例的缩放
webSettings.setUseWideViewPort(true);
//自适应屏幕
webSettings.setLayoutAlgorithm(WebSettings.LayoutAlgorithm.SINGLE_COLUMN);
webSettings.setLoadWithOverviewMode(true);
//registerWV.addJavascriptInterface(new AndroidForJS(this),"loginObj");
registerWV.addJavascriptInterface(new AndroidForJS(this),"backObj");
registerWV.setWebViewClient(new WebViewClient(){
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
view.loadUrl(url);
return true;
}
});
btnBack.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View view){
finish();
Intent intent = new Intent(PersonalInfoRegister.this,PersonalInfo.class);
startActivity(intent);
}
});
btnReg.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View view){
finish();
Intent intent = new Intent(PersonalInfoRegister.this,PersonalInfoRegister.class);
startActivity(intent);
}
});
}
}
<file_sep># CarNavigation
:fire:
##### QQ:交流群 :192268854

| ded8860930f71242bebdba4f108c1bf1395b0ef4 | [
"Markdown",
"Java"
] | 7 | Java | xubinbin1024/CarNavigation | 34b4760d8ca683e05452a786c81afa05c038367f | 0b5d5a9edbd5a2dd5a5acc273aef670310442103 |
refs/heads/master | <repo_name>lawchacon/nodejs-mysql-crud<file_sep>/README.md
# nodejs-mysql-crud
_Desarrollo proyecto 0 clase Desarrollo de soluciones Cloud_
## Construido con️
* [node.js](https://nodejs.org/en/)
* [mysql](https://www.mysql.com/)
* [EJS](https://ejs.co/)
## Creado por
* **<NAME>**
<file_sep>/src/index.js
const express = require('express');
const morgan = require('morgan');
const path = require('path');
const passport = require('passport');
const session = require('express-session');
const mySQLStore = require('express-mysql-session');
const {database} = require('./keys');
//initializations
const app =express();
require('./lib/passport');
//settings
app.set('port', process.env.PORT || 8080);
app.set('views', path.join(__dirname, 'views'));
app.set ('view engine','ejs');
//Middlewares
app.use(session({
secret: 'crudsession',
resave: false,
saveUninitialized: false,
store: new mySQLStore(database)
}));
app.use(morgan('dev'));
app.use(express.urlencoded({extended: false}));
app.use(express.json());
app.use(passport.initialize());
app.use(passport.session());
//Global Variables
app.use((req,res,next)=>{
app.locals.usuario = req.user;
next();
});
//Rutes
app.use(require('./routes'));
app.use('/auth',require('./routes/auth'));
app.use('/eventos',require('./routes/eventos'));
//Public
app.use(express.static(path.join(__dirname,'public')));
//Starting the server
app.listen(app.get('port'),()=>{
console.log('server on port', app.get('port'));
});<file_sep>/src/lib/sesion.js
module.exports = {
sesionActiva(req,res,next){
if(req.isAuthenticated()){
return next();
}
else
{
return res.redirect('/auth/login');
}
},
sesionInactiva(req,res,next){
if(!req.isAuthenticated()){
return next();
}
else
{
return res.redirect('/eventos');
}
}
};<file_sep>/src/lib/passport.js
const passport = require('passport');
const strategy = require('passport-local').Strategy
const pool = require('../database');
const passwords = require('./passwords') ;
passport.use('local.signup', new strategy({
usernameField: 'email',
passwordField: '<PASSWORD>',
passReqToCallback: true
}, async(req, email, password,done)=>{
let usuario = {
email: email,
password: <PASSWORD>
};
usuario.password = await passwords.encriptar(password);
const result= await pool.query('INSERT INTO usuarios SET ?', [usuario]);
usuario.id =result.insertId;
return done(null , usuario);
}));
passport.use('local.login', new strategy({
usernameField: 'email',
passwordField: '<PASSWORD>',
passReqToCallback: true
}, async (req, email, password,done) =>{
const usuarios = await pool.query('SELECT * FROM usuarios WHERE email = ?',[email]);
if(usuarios.length){
const usuario = usuarios[0];
const passValida = await passwords.comparar(password,usuario.password);
if(passValida){
done(null,usuario);
}
else{
done(null, false);
}
}
else{
done(null, false);
}
}));
passport.serializeUser((user, done) => {
done(null, user.id);
});
passport.deserializeUser(async (id, done) => {
const filas = await pool.query('SELECT * FROM usuarios WHERE id = ?', [id]);
done(null, filas[0]);
});
<file_sep>/bd/bd.sql
drop database ABC;
CREATE DATABASE IF NOT EXISTS ABC;
USE ABC;
CREATE TABLE IF NOT EXISTS usuarios
(
id INT NOT NULL AUTO_INCREMENT,
email VARCHAR(200) NOT NULL,
password VARCHAR(100) NOT NULL,
PRIMARY KEY(id)
);
CREATE TABLE IF NOT EXISTS eventos
(
id INT AUTO_INCREMENT,
user_id INT NOT NULL,
nombre VARCHAR(200) NOT NULL,
categoria VARCHAR(15) NOT NULL,
lugar VARCHAR(200) NOT NULL,
direccion VARCHAR(200) NOT NULL,
fecha_inicio DATETIME,
fecha_fin DATETIME,
tipo_evento VARCHAR(10),
PRIMARY KEY(id),
CONSTRAINT fk_user FOREIGN KEY(user_id) REFERENCES usuarios(id)
);<file_sep>/src/routes/eventos.js
const express = require('express');
const router = express.Router();
const pool = require('../database');
const {sesionActiva} = require('../lib/sesion');
router.get('/agregar' , sesionActiva, (req , res) => {
res.render('eventos/agregar');
});
router.post('/agregar', sesionActiva, async(req,res) => {
const {nombre, categoria, lugar,direccion,fechaInicio,fechaFin,tipo} = req.body;
const evento = {
user_id: req.user.id,
nombre: nombre,
categoria: categoria,
lugar: lugar,
direccion: direccion,
fecha_inicio: fechaInicio,
fecha_fin: fechaFin,
tipo_evento: tipo
};
await pool.query('INSERT INTO eventos set ?',[evento]);
res.redirect('/eventos');
});
router.get('/', sesionActiva, async(req,res) => {
const eventos = await pool.query('SELECT * FROM eventos WHERE user_id = ?',[req.user.id]);
res.render('eventos/listar', {eventos});
});
router.get('/eliminar/:id', sesionActiva, async(req,res) => {
const { id } = req.params;
const eventos = await pool.query('DELETE FROM eventos WHERE ID =?',[id]);
res.redirect('/eventos');
});
router.get('/editar/:id', sesionActiva, async(req,res) => {
const { id } = req.params;
const eventos = await pool.query('SELECT * FROM eventos WHERE ID =?',[id]);
res.render('eventos/editar',{evento: eventos[0]});
});
router.post('/editar/:id', sesionActiva, async(req,res) => {
const { id } = req.params;
const {nombre, categoria, lugar,direccion,fechaInicio,fechaFin,tipo} = req.body;
const evento = {
nombre: nombre,
categoria: categoria,
lugar: lugar,
direccion: direccion,
fecha_inicio: fechaInicio,
fecha_fin: fechaFin,
tipo_evento: tipo
};
const eventos = await pool.query('UPDATE eventos SET ? WHERE ID =?',[evento,id]);
res.redirect('/eventos');
});
module.exports = router;
<file_sep>/src/lib/passwords.js
const bcrypt = require('bcryptjs');
const passwords = {};
passwords.encriptar = async (password)=> {
try{
const salt = await bcrypt.genSalt();
const pass = await bcrypt.hash(password,salt);
return pass;
}catch (e){
console.error(e);
}
};
passwords.comparar = async(password,passBd) =>{
try{
return await bcrypt.compare(password,passBd);
}
catch(e)
{
console.error(e);
}
};
module.exports = passwords; | e3d1fcf7f8d0fb77973d31fa7acb9109f043ccd9 | [
"Markdown",
"SQL",
"JavaScript"
] | 7 | Markdown | lawchacon/nodejs-mysql-crud | a4d60758231d338165e79291ae1508bb8ae7b6aa | dfb451baa70ee7f92b15f10eb0d14bf70c058e5d |
refs/heads/master | <repo_name>fakaka/Game<file_sep>/Don't Starve Mod/esctemplate/scripts/prefabs/eardress.lua
local assets=
{
Asset("ANIM", "anim/eardress.zip"),
Asset("IMAGE", "images/inventoryimages/eardress.tex"),
Asset("ATLAS", "images/inventoryimages/eardress.xml"),
}
local prefabs = {}
local function onequip(inst, owner)
owner.AnimState:OverrideSymbol("swap_body", "eardress", "swap_body")
owner.components.inventory:SetOverflow(inst)
inst.components.fueled:StartConsuming()
inst.components.container:Open(owner)
end
local function onunequip(inst, owner)
owner.AnimState:ClearOverrideSymbol("swap_body")
owner.components.inventory:SetOverflow(nil)
inst.components.fueled:StopConsuming()
inst.components.container:Close(owner)
end
local slotpos = {}
for y = 0, 2 do
table.insert(slotpos, Vector3(-162, (1 - y) * 75,0))
table.insert(slotpos, Vector3(-162 + 75, (1 - y) * 75,0))
end
local function onfinish(inst)
inst.components.container:DropEverything()
inst.components.container:Close()
inst:Remove()
end
local function fn(Sim)
local inst = CreateEntity()
inst.entity:AddTransform()
inst.entity:AddAnimState()
MakeInventoryPhysics(inst)
local minimap = inst.entity:AddMiniMapEntity()
minimap:SetIcon("krampus_sack.png")
inst.AnimState:SetBuild("eardress")
inst.AnimState:SetBank("torso_rain")
inst.AnimState:PlayAnimation("anim")
inst:AddTag("fridge")
inst:AddTag("lowcool")
inst:AddComponent("inspectable")
inst:AddComponent("inventoryitem")
inst.components.inventoryitem.atlasname = "images/inventoryimages/eardress.xml"
inst:AddComponent("equippable")
inst.components.equippable.equipslot = EQUIPSLOTS.BODY
inst.components.equippable.insulated = true
inst.components.equippable.dapperness = TUNING.DAPPERNESS_MED
inst.components.equippable:SetOnEquip( onequip )
inst.components.equippable:SetOnUnequip( onunequip )
inst:AddComponent("fueled")
inst.components.fueled.fueltype = "USAGE"
inst.components.fueled:InitializeFuelLevel(4800)
inst.components.fueled:SetDepletedFn(onfinish)
inst:AddComponent("container")
inst.components.container:SetNumSlots(#slotpos)
inst.components.container.widgetslotpos = slotpos
inst.components.container.widgetanimbank = "ui_icepack_2x3"
inst.components.container.widgetanimbuild = "ui_icepack_2x3"
inst.components.container.widgetpos = Vector3(-5,-70,0)
inst.components.container.side_widget = true
inst.components.container.type = "pack"
return inst
end
return Prefab( "common/inventory/eardress", fn, assets , prefabs)
<file_sep>/Cocos2d-js/hairpinrun/src/statusLayer.js
var StatusLayer = cc.Layer.extend({
labelCoin: null,
labelMeter: null,
coins: 0,
ctor: function () {
this._super();
this.init();
},
init: function () {
var size = cc.director.getWinSize();
this.labelCoin = new cc.LabelTTF("Coins : 0", "Helvetica", 20);
this.labelCoin.setColor(cc.color.WHITE);
this.labelCoin.setPosition(80, size.height - 20);
this.addChild(this.labelCoin);
this.labelMeter = new cc.LabelTTF(" 0 M", "Helvetica", 20);
this.labelMeter.setColor(cc.color(0, 0, 0));
this.labelMeter.setPosition(size.width - 80, size.height - 20);
this.addChild(this.labelMeter);
},
updateMeter: function (px) {
this.labelMeter.setString(parseInt(px / 10) + "M");
},
updateCoin: function (num) {
this.coins += num;
this.labelCoin.setString("Coins:" + this.coins);
}
})
<file_sep>/Cocos2d-js/hairpinrun/src/globals.js
/**
* Created by mj on 2016/12/22.
*/
var g_groundHeight = 57;
var g_runnerStartX = 80;
if (typeof TagOfLayer == "undefined") {
var TagOfLayer = {
Background: 0,
Game: 1,
Status: 2
};
}
if (typeof SpriteTag == "undefined") {
var SpriteTag = {
runner: 0,
coin: 1,
rock: 2
};
}
<file_sep>/Don't Starve Mod/README.md
# 夜王莉莉丝
人物原型 -> [李艺彤](http://weibo.com/u/3700233717)
## 三围
基本三围 100 150 100 √
不同季节环境变化
成长三围 200 250 200 √
## 特殊能力
人物移动速度1.2倍 √
最高20级 √
自愈(等级高时)
一开始比较黑等级高变白
### 可升级
吃肉 5% √
杀怪 5% √
变白
## 专属宠物
小海豹 用 **?** 占满箱子变成冰箱 √
## 技能
逆十字墓场
## 专属物品
* 武器
长刀 攻击最高60吧
水之魂
* 衣服
洛丽塔 (背包 保鲜?) √
木偶的服装
* 帽子
**白发卡** √
皇冠
* 道具
房子(帐篷)
黑色蕾丝伞
* 食物
咖喱饭 (鸡腿 + 土豆 + 种子)
红茶
## 人物状态
1. 饥饿大于90%时
2. 饥饿小于20%
3. 脑残高于?%
## 人物音效
* 吃东西时
* 砍树时
* 被怪打伤时
* 升级时
* 中毒时
* 。。。
## 人物语言
* 介绍自己 √
* 好好吃 √
* 好开心啊
* 我是不会和你好的哼!
* 点击专属物品的说话
* 。。。
## 外观
* 有2种外貌 明和暗


* 各种物品在建造栏 手上 地上的样子
* mod的图标
* 选择人物界面图片
* 动画动作的图片
## 工作人员
- 0
- 糊糊
- 河畔_迎风
- R酱
- 少女
- 盐酱
<file_sep>/Don't Starve Mod/esctemplate/modmain.lua
PrefabFiles = {
"esctemplate",
"yukarihat",
"eardress",
"mihobell",
"miho",
}
Assets = {
Asset( "IMAGE", "images/saveslot_portraits/esctemplate.tex" ),
Asset( "ATLAS", "images/saveslot_portraits/esctemplate.xml" ),
Asset( "IMAGE", "images/selectscreen_portraits/esctemplate.tex" ),
Asset( "ATLAS", "images/selectscreen_portraits/esctemplate.xml" ),
Asset( "IMAGE", "images/selectscreen_portraits/esctemplate_silho.tex" ),
Asset( "ATLAS", "images/selectscreen_portraits/esctemplate_silho.xml" ),
Asset( "IMAGE", "bigportraits/esctemplate.tex" ),
Asset( "ATLAS", "bigportraits/esctemplate.xml" ),
Asset( "IMAGE", "images/map_icons/esctemplate.tex" ),
Asset( "ATLAS", "images/map_icons/esctemplate.xml" ),
Asset( "IMAGE", "images/avatars/avatar_esctemplate.tex" ),
Asset( "ATLAS", "images/avatars/avatar_esctemplate.xml" ),
Asset( "IMAGE", "images/map_icons/yukarihat.tex" ),
Asset( "ATLAS", "images/map_icons/yukarihat.xml" ),
}
local require = GLOBAL.require
local STRINGS = GLOBAL.STRINGS
-- The character select screen lines
STRINGS.CHARACTER_TITLES.esctemplate = "The Sample Character"
STRINGS.CHARACTER_NAMES.esctemplate = "Esc"
STRINGS.CHARACTER_DESCRIPTIONS.esctemplate = "*Perk 1\n*Perk 2\n*Perk 3"
STRINGS.CHARACTER_QUOTES.esctemplate = "\"Quote\""
-- The character's name as appears in-game
STRINGS.NAMES.ESCTEMPLATE = "Esc"
function AddMap(inst)
local minimap = inst.entity:AddMiniMapEntity()
minimap:SetIcon( inst.prefab .. ".tex" )
end
AddMinimapAtlas("images/map_icons/yukarihat.xml")
-- Add mod character to mod character list. Also specify a gender. Possible genders are MALE, FEMALE, ROBOT, NEUTRAL, and PLURAL.
AddModCharacter("esctemplate", "FEMALE")
<file_sep>/Don't Starve Mod/esctemplate/README.md
# 饥荒人物mod (夜王莉莉丝)
人物原型 [李艺彤](http://weibo.com/u/3700233717)
## 三围
100 100 100
三围变化率 1.0 1.0 1.0(基于威尔逊)
不同季节环境变化
成长三围 300 200 200
## 特殊能力
人物移动速度1.2倍
自愈(等级高时)
### 可升级
吃肉 5%
杀怪 5%
## 专属宠物
小海豹 可以用冰占满箱子变成冰箱
## 专属物品
* 武器
长刀 攻击最高60吧
* 衣服
裙子 洛丽塔 (只是好看? 可以放东西?)
* 帽子
**白发卡**
* 道具
房子(帐篷)
* 食物
咖喱饭(A+B+C)
## 人物状态
1. 饥饿大于90%时
2. 饥饿小于20%
3. 脑残高于?%
## 人物音效
* 吃东西时
* 砍树时
* 被怪打伤时
* 升级时
* 中毒时
## 人物语言
* 介绍自己
* 好好吃
* 好开心啊
* 我是不会和你好的哼!
* 点击专属物品的说话
* 。。。
## 外观
* 至少有2种外貌 长发 马尾 中分
* 各种物品在建造栏 手上 地上的样子
* mod的图标

* 选择人物界面图片
* 动画动作的图片
## 工作人员
糊糊
[河畔_迎风](http://weibo.com/u/3060720340)
R酱
少女
盐酱
<file_sep>/Cocos2d-js/hairpinrun/src/gameLayer.js
if (typeof RunnerStat == "undefined") {
var RunnerStat = {
running: 0,
jumpUp: 1,
jumpDown: 2
}
}
var GameLayer = cc.Layer.extend({
spriteSheet: null,
runningAction: null,
jumpUpAction: null,
jumpDownAction: null,
sprite: null,
space: null,
body: null,
shape: null,
runnerStat: null,
ctor: function (space) {
this._super();
this.space = space;
this.runnerStat = RunnerStat.running;
this.init();
},
init: function () {
var size = cc.director.getWinSize();
// this._debugNode = new cc.PhysicsDebugNode(this.space);
// // Parallax ratio and offset
// this.addChild(this._debugNode, 10);
cc.spriteFrameCache.addSpriteFrames(res.runner_plist);
this.spriteSheet = new cc.SpriteBatchNode(res.runner_png);
this.addChild(this.spriteSheet);
this.createAction();
this.sprite = new cc.PhysicsSprite("#runner0.png");
var contentSize = this.sprite.getContentSize();
this.body = new cp.Body(1, cp.momentForBox(1, contentSize.width, contentSize.height));
this.body.p = cc.p(g_runnerStartX, g_groundHeight + contentSize.height / 2);
this.body.applyImpulse(cp.v(150, 0), cp.v(0, 0));//run speed
this.space.addBody(this.body);
this.shape = new cp.BoxShape(this.body, contentSize.width - 14, contentSize.height);
this.space.addShape(this.shape);
this.sprite.setBody(this.body);
this.sprite.runAction(this.runningAction);
this.spriteSheet.addChild(this.sprite);
cc.eventManager.addListener({
event: cc.EventListener.TOUCH_ONE_BY_ONE,
swallowTouches: true,
onTouchBegan: function (touch, event) {
var pos = touch.getLocation();
// event.getCurrentTarget().recognizer.beginPoint(pos.x, pos.y);
return true;
},
onTouchMoved: function (touch, event) {
var pos = touch.getLocation();
// event.getCurrentTarget().recognizer.movePoint(pos.x, pos.y);
},
onTouchEnded: function (touch, event) {
// this.sprite.runAction(this.jumpUpAction);
event.getCurrentTarget().jump();
}
}, this);
this.scheduleUpdate();
},
createAction: function () {
// runningAction
var animFrames = [];
for (var i = 0; i < 8; i++) {
var str = "runner" + i + ".png";
var frame = cc.spriteFrameCache.getSpriteFrame(str);
animFrames.push(frame);
}
var animation = new cc.Animation(animFrames, 0.1);
this.runningAction = new cc.RepeatForever(new cc.Animate(animation));
this.runningAction.retain();
//jumpUpAction
animFrames = [];
for (var i = 0; i < 4; i++) {
var str = "runnerJumpUp" + i + ".png";
var frame = cc.spriteFrameCache.getSpriteFrame(str);
animFrames.push(frame);
}
animation = new cc.Animation(animFrames, 0.2);
this.jumpUpAction = new cc.RepeatForever(new cc.Animate(animation));
this.runningAction.retain();
//jumpDownAction
animFrames = [];
for (var i = 0; i < 2; i++) {
var str = "runnerJumpDown" + i + ".png";
var frame = cc.spriteFrameCache.getSpriteFrame(str);
animFrames.push(frame);
}
animation = new cc.Animation(animFrames, 0.3);
this.jumpDownAction = new cc.RepeatForever(new cc.Animate(animation));
this.jumpDownAction.retain();
},
getEyeX: function () {
return this.sprite.getPositionX() - g_runnerStartX;
},
jump: function () {
cc.log("jump");
if (this.runnerStat == RunnerStat.running) {
cc.audioEngine.playEffect(res.jump_mp3);
this.body.applyImpulse(cp.v(0, 250), cp.v(0, 0));
this.runnerStat = RunnerStat.jumpUp;
this.sprite.stopAllActions();
this.sprite.runAction(this.jumpUpAction);
}
},
update: function () {
var sLayer = this.getParent().getParent().getChildByTag(TagOfLayer.Status);
sLayer.updateMeter(this.getEyeX());
var vel = this.body.getVel();
if (this.runnerStat == RunnerStat.jumpUp) {
if (vel.y < 0.1) {
this.runnerStat = RunnerStat.jumpDown;
this.sprite.stopAllActions();
this.sprite.runAction(this.jumpDownAction);
}
} else if (this.runnerStat == RunnerStat.jumpDown) {
if (vel.y == 0) {
this.runnerStat = RunnerStat.running;
this.sprite.stopAllActions();
this.sprite.runAction(this.runningAction);
}
}
}
});
<file_sep>/Arknights/README.md
# Arknights UI 明日方舟主界面
<!--  -->
## 参考原型
[mashirozx/arknights-ui](https://github.com/mashirozx/arknights-ui/)
## IDEA
本来想自己完全实现的,预研了3d透视的技术后完成了demo就搁置了,没想到找到个现实比较全的项目,就在上面修改了。
## TODO
- [x] 随机立绘
- [ ] 角色台词
- [ ] 点击悬浮特效
- [ ] 粒子效果
- [ ] 布局比例
- [ ] 盒子阴影
- [ ] 音效
- [ ] 加载时
- [ ] 人物语音
- [x] BGM
- [ ] 活动音乐
- [ ] ...
<file_sep>/Arknights/js/config.js
const chras = [
{
name: 'chen',
pic: {
normal: '',
elite2: ''
},
voices: [
{
text: 'arknights',
voice: 'chen/arknights.mp3'
},
{
text: '博士?睡着了?哼,真没紧张感。',
voice: 'chen/sleep.mp3'
}
]
}
]
<file_sep>/Don't Starve Mod/esctemplate/scripts/prefabs/yukarihat.lua
local assets=
{
Asset("ANIM", "anim/yukarihat.zip"),
Asset("ANIM", "anim/yukarihat_swap.zip"),
Asset("ATLAS", "images/inventoryimages/yukarihat.xml"),
}
prefabs = {}
local function onequiphat(inst, owner)
owner.AnimState:OverrideSymbol("swap_hat", "yukarihat_swap", "swap_hat")
owner.AnimState:Show("HAT")
owner.AnimState:Show("HAT_HAIR")
owner.AnimState:Hide("HAIR_NOHAT")
owner.AnimState:Hide("HAIR")
end
local function onunequiphat(inst, owner)
owner.AnimState:Hide("HAT")
owner.AnimState:Hide("HAT_HAIR")
owner.AnimState:Show("HAIR_NOHAT")
owner.AnimState:Show("HAIR")
end
local function fn()
local inst = CreateEntity()
local trans = inst.entity:AddTransform()
local anim = inst.entity:AddAnimState()
MakeInventoryPhysics(inst)
if IsDLCEnabled(CAPY_DLC) then
MakeInventoryFloatable(inst, "idle", "idle")
end
anim:SetBuild("yukarihat")
anim:SetBank("yukarihat")
anim:PlayAnimation("idle")
inst:AddComponent("inspectable")
inst:AddComponent("inventoryitem")
inst.components.inventoryitem.imagename = "yukarihat"
inst.components.inventoryitem.atlasname = "images/inventoryimages/yukarihat.xml"
inst.entity:AddMiniMapEntity()
inst.MiniMapEntity:SetIcon("yukarihat.tex")
inst:AddComponent("equippable")
inst.components.equippable.equipslot = EQUIPSLOTS.HEAD
inst.components.equippable:SetOnEquip(onequiphat)
inst.components.equippable:SetOnUnequip(onunequiphat)
return inst
end
return Prefab("yukarihat", fn, assets, prefabs)<file_sep>/Don't Starve Mod/esctemplate/scripts/prefabs/miho.lua
require "prefabutil"
local WAKE_TO_FOLLOW_DISTANCE = 2
local SLEEP_NEAR_LEADER_DISTANCE = 2
local assets ={
Asset( "ANIM", "anim/fox_miho_new.zip" ),
Asset( "ANIM", "anim/miho.zip" ),
Asset( "ANIM", "anim/mihoup.zip" ),
Asset( "ANIM", "anim/ui_miho_3x4.zip"),
Asset( "ANIM", "anim/ui_miho_4x4.zip"),
}
local prefabs ={
"mihobell",
}
local function ShouldKeepTarget(inst, target)
return false
end
local function OnOpen(inst)
if inst.MorphTask then
inst.MorphTask:Cancel()
inst.MorphTask = nil
end
inst.sg:GoToState("open")
end
local function OnClose(inst)
inst.sg:GoToState("close")
end
local function OnStopFollowing(inst)
inst:RemoveTag("companion")
end
local function OnStartFollowing(inst)
inst:AddTag("companion")
end
local slotpos_3x4 = {}
for y = 2.5, -.5, -1 do
for x = 0, 2 do
table.insert(slotpos_3x4, Vector3(75*x-75*2+75, 75*y-80*2+75,0))
end
end
local slotpos_4x4 = {}
for y = 2.5, -.5, -1 do
for x = 0, 3 do
table.insert(slotpos_4x4, Vector3(75*x-93*2+75, 75*y-80*2+75,0))
end
end
local function MorphUpMiho(inst, dofx)
inst.components.container:SetNumSlots(#slotpos_4x4)
inst.components.container.widgetslotpos = slotpos_4x4
inst.components.container.widgetanimbank = "ui_miho_4x4"
inst.components.container.widgetanimbuild = "ui_miho_4x4"
inst.components.container.widgetpos = Vector3(0,140,0)
inst.components.container.widgetpos_controller = Vector3(0,140,0)
inst.components.container.side_align_tip = 160
inst.components.locomotor.walkspeed = 6.8
inst.components.locomotor.runspeed = 9
inst.Transform:SetScale(1.2,1.2,1.2)
inst.MihoState = "UP"
end
local function MorphNoMiho(inst, dofx)
inst.Transform:SetScale(1,1,1)
inst.MihoState = "NO"
end
local function CanMorph(inst)
local clock = GetWorld().components.clock
if not clock:IsNight() or clock:GetMoonPhase() ~= "full" or inst.MihoState ~= "NO" then
return false, false
end
local container = inst.components.container
local canUP = true
for i = 1, container:GetNumSlots() do
local item = container:GetItemInSlot(i)
if not item then
canUP = false
break
end
if item.prefab ~= "cutgrass" then
canUP = false
else
end
end
return canUP
end
local function MorphMiho(inst)
local clock = GetWorld().components.clock
if not clock:IsNight() or inst.MihoState ~= "NO" or clock:GetMoonPhase() ~= "full" then
return
end
local container = inst.components.container
local canUP = inst:CanMorph()
if canUP then
container:ConsumeByName("cutgrass", container:GetNumSlots())
MorphUpMiho(inst, true)
end
end
local function CheckForMorph(inst)
local upmiho = inst:CanMorph()
if upmiho then
if inst.MorphTask then
inst.MorphTask:Cancel()
inst.MorphTask = nil
end
inst.MorphTask = inst:DoTaskInTime(2, function(inst)
inst.sg:GoToState("transition")
end)
end
end
local function OnSave(inst, data)
data.MihoState = inst.MihoState
end
local function OnPreLoad(inst, data)
if not data then return end
if data.MihoState == "UP" then
MorphUpMiho(inst)
end
end
local function fn()
local inst = CreateEntity()
inst.entity:AddTransform()
inst.Transform:SetFourFaced()
inst.entity:AddAnimState()
inst.entity:AddSoundEmitter()
inst.entity:AddDynamicShadow()
inst.DynamicShadow:SetSize( 1.3, .5 )
inst.entity:AddMiniMapEntity()
inst.MiniMapEntity:SetIcon("miho.tex")
inst.AnimState:SetBank("fox_miho")
inst.AnimState:SetBuild("fox_miho")
inst.AnimState:PlayAnimation("idle_loop")
inst:AddTag("companion")
inst:AddTag("scarytoprey")
inst:AddTag("noauradamage")
inst:AddTag("notraptrigger")
inst:AddTag("character")
inst:AddTag("fox")
inst:AddTag("miho")
inst:AddTag("light")
if IsDLCEnabled(CAPY_DLC) then
MakeAmphibiousCharacterPhysics(inst, 75, .5)
else
MakeCharacterPhysics(inst, 75, .5)
inst.Physics:SetCollisionGroup(COLLISION.CHARACTERS)
inst.Physics:ClearCollisionMask()
inst.Physics:CollidesWith(COLLISION.WORLD)
inst.Physics:CollidesWith(COLLISION.OBSTACLES)
inst.Physics:CollidesWith(COLLISION.CHARACTERS)
end
inst:AddComponent("inspectable")
inst.components.inspectable:RecordViews()
inst:AddComponent("locomotor")
inst.components.locomotor.walkspeed = 8
inst.components.locomotor.runspeed = 10
inst:AddComponent("follower")
inst:ListenForEvent("stopfollowing", OnStopFollowing)
inst:ListenForEvent("startfollowing", OnStartFollowing)
inst:AddComponent("health")
inst.components.health:SetMaxHealth(100)
inst.components.health:StartRegen(1, 1)
inst.components.health.invincible = true
inst.components.health.fire_damage_scale = 0
inst:AddComponent("knownlocations")
inst:AddComponent("container")
inst.components.container.onopenfn = OnOpen
inst.components.container.onclosefn = OnClose
inst.components.container:SetNumSlots(#slotpos_3x4)
inst.components.container.widgetslotpos = slotpos_3x4
inst.components.container.widgetanimbank = "ui_miho_3x4"
inst.components.container.widgetanimbuild = "ui_miho_3x4"
inst.components.container.widgetpos = Vector3(0,140,0)
inst.components.container.widgetpos_controller = Vector3(0,140,0)
inst.components.container.side_align_tip = 160
local light = inst.entity:AddLight()
inst.Light:Enable(true)
inst.Light:SetRadius(1)
inst.Light:SetFalloff(.5)
inst.Light:SetIntensity(.35)
inst.Light:SetColour(150/255,150/255, 0/255)
inst.MihoState = "NO"
inst.CanMorph = CanMorph
inst.MorphMiho = MorphMiho
inst:ListenForEvent("nighttime", function() CheckForMorph(inst) end, GetWorld())
inst:ListenForEvent("onclose", function() CheckForMorph(inst) end)
inst.OnSave = OnSave
inst.OnPreLoad = OnPreLoad
inst:SetStateGraph("SGmiho_o")
local brain = require "brains/mihobrain"
inst:SetBrain(brain)
if IsDLCEnabled(REIGN_OF_GIANTS) or IsDLCEnabled(CAPY_DLC) then
inst:AddComponent("waterproofer")
inst.components.waterproofer:SetEffectiveness(0.3)
end
inst:DoTaskInTime(1, function(inst)
if not TheSim:FindFirstEntityWithTag("mihobell") then
inst:Remove()
end
end)
return inst
end
return Prefab( "common/miho", fn, assets, prefabs) <file_sep>/Cocos2d-js/hairpinrun/src/gameOverLayer.js
var GameOverLayer = cc.LayerColor.extend({
ctor: function () {
this._super(cc.color(0, 0, 0, 180));
var size = cc.director.getWinSize();
var menuItemRestart = new cc.MenuItemSprite(
new cc.Sprite(res.restart_n_png),
new cc.Sprite(res.restart_s_png),
function (sender) {
console.log("==>restart game");
cc.director.resume();
cc.director.runScene(new PlayScene());
}, this);
var menu = new cc.Menu(menuItemRestart);
menu.setPosition(size.width / 2, size.height / 2);
this.addChild(menu);
}
});
<file_sep>/Cocos2d-js/template-v3.13/src/resource.js
var res = {
HelloWorld_png: "res/HelloWorld.png"
};
var g_resource = [
res.HelloWorld_png
];<file_sep>/Don't Starve Mod/esctemplate/scripts/prefabs/esctemplate.lua
local MakePlayerCharacter = require "prefabs/player_common"
local assets = {
Asset( "ANIM", "anim/player_basic.zip" ),
Asset( "ANIM", "anim/player_idles_shiver.zip" ),
Asset( "ANIM", "anim/player_actions.zip" ),
Asset( "ANIM", "anim/player_actions_axe.zip" ),
Asset( "ANIM", "anim/player_actions_pickaxe.zip" ),
Asset( "ANIM", "anim/player_actions_shovel.zip" ),
Asset( "ANIM", "anim/player_actions_blowdart.zip" ),
Asset( "ANIM", "anim/player_actions_eat.zip" ),
Asset( "ANIM", "anim/player_actions_item.zip" ),
Asset( "ANIM", "anim/player_actions_uniqueitem.zip" ),
Asset( "ANIM", "anim/player_actions_bugnet.zip" ),
Asset( "ANIM", "anim/player_actions_fishing.zip" ),
Asset( "ANIM", "anim/player_actions_boomerang.zip" ),
Asset( "ANIM", "anim/player_bush_hat.zip" ),
Asset( "ANIM", "anim/player_attacks.zip" ),
Asset( "ANIM", "anim/player_idles.zip" ),
Asset( "ANIM", "anim/player_rebirth.zip" ),
Asset( "ANIM", "anim/player_jump.zip" ),
Asset( "ANIM", "anim/player_amulet_resurrect.zip" ),
Asset( "ANIM", "anim/player_teleport.zip" ),
Asset( "ANIM", "anim/wilson_fx.zip" ),
Asset( "ANIM", "anim/player_one_man_band.zip" ),
Asset( "ANIM", "anim/shadow_hands.zip" ),
Asset( "SOUND", "sound/sfx.fsb" ),
Asset( "SOUND", "sound/wilson.fsb" ),
Asset( "ANIM", "anim/beard.zip" ),
Asset( "ANIM", "anim/esctemplate.zip" ),
Asset( "ANIM", "anim/wharangW.zip" ),
Asset( "IMAGE", "images/map_icons/wharang_evil.tex" ),
Asset( "ATLAS", "images/map_icons/wharang_evil.xml" ),
}
local prefabs = {}
-- Custom starting items
local start_inv = {
"yukarihat",
"eardress",
"mihobell"
}
local function applyUpgrades(inst)
local max_upgrades = 10
if inst.level > max_upgrades then
inst.level = max_upgrades
else
local upgrades = math.min(inst.level, max_upgrades)
local hunger_percent = inst.components.hunger:GetPercent()
local health_percent = inst.components.health:GetPercent()
local sanity_percent = inst.components.sanity:GetPercent()
inst.components.hunger.max = math.ceil (150 + upgrades * 5)
inst.components.sanity.max = math.ceil (200 + upgrades * 5)
inst.components.health.maxhealth = math.ceil (150 + upgrades * 5)
inst.components.talker:Say("Level Up! : ".. (inst.level))
inst.components.hunger:SetPercent(hunger_percent)
inst.components.health:SetPercent(health_percent)
inst.components.sanity:SetPercent(sanity_percent)
end
end
local function onEat(inst, food)
local summonchance1 = 0.8
if math.random() < summonchance1 and food and food.components.edible.foodtype == "MEAT" then
inst.level = inst.level + 1
applyUpgrades(inst)
-- inst.components.sanity:DoDelta(inst.components.sanity.max*0.05)
-- inst.components.health:DoDelta(inst.components.health.maxhealth*0.05)
-- inst.SoundEmitter:PlaySound("dontstarve/characters/wx78/levelup")
inst.HUD.controls.status.heart:PulseGreen()
inst.HUD.controls.status.stomach:PulseGreen()
inst.HUD.controls.status.brain:PulseGreen()
inst.HUD.controls.status.brain:ScaleTo(1.3,1,.7)
inst.HUD.controls.status.heart:ScaleTo(1.3,1,.7)
inst.HUD.controls.status.stomach:ScaleTo(1.3,1,.7)
end
end
local function onKill(inst,data)
if math.random() < 0.9 and data.inst:HasTag("monster") then
inst.level = inst.level + 1
applyUpgrades(inst)
end
end
local function updatestats(inst)
if GetClock():IsDay() and not GetWorld():IsCave() then
inst.components.combat.damagemultiplier = 1
inst.components.locomotor.walkspeed = TUNING.WILSON_WALK_SPEED
inst.components.locomotor.runspeed = TUNING.WILSON_RUN_SPEED
inst.Light:Enable(false)
inst.AnimState:SetBuild("esctemplate")
inst.MiniMapEntity:SetIcon("esctemplate.tex")
elseif GetClock():IsDusk() and not GetWorld():IsCave() then
inst.components.combat.damagemultiplier = 1.1
inst.components.locomotor.walkspeed = TUNING.WILSON_WALK_SPEED * 1.1
inst.components.locomotor.runspeed = TUNING.WILSON_RUN_SPEED * 1.1
elseif GetClock():IsNight() and not GetWorld():IsCave() then
inst.components.combat.damagemultiplier = 1.2
inst.components.locomotor.walkspeed = TUNING.WILSON_WALK_SPEED * 1.2
inst.components.locomotor.runspeed = TUNING.WILSON_RUN_SPEED * 1.2
inst.Light:Enable(true)
if GetClock():GetMoonPhase() == "full" then
inst.components.talker:Say("Full Moon")
inst.AnimState:SetBuild("wharangW")
inst.MiniMapEntity:SetIcon("wharang_evil.tex")
end
end
end
local fn = function(inst)
-- choose which sounds this character will play
inst.soundsname = "willow"
inst.MiniMapEntity:SetIcon( "esctemplate.tex" )
inst.level = 0;
-- Stats
inst.components.hunger:SetMax(150)
inst.components.sanity:SetMax(200)
inst.components.health:SetMaxHealth(150)
inst.components.hunger.hungerrate = TUNING.WILSON_HUNGER_RATE * 1
-- Damage multiplier (optional)
inst.components.combat.damagemultiplier = 1
-- inst.components.walkspeed=4
inst.entity:AddLight()
inst.Light:SetRadius(9)
inst.Light:SetFalloff(1)
inst.Light:SetIntensity(.5)
inst.Light:SetColour(128/255,128/255,255/255)
inst.Light:Enable(false)
inst.components.eater:SetOnEatFn(onEat)
TheInput:AddKeyUpHandler(KEY_L, function()
inst.components.talker:Say("Level : ".. (inst.level))
end)
--KILL!
-- inst:ListenForEvent("killed", onkill)
inst:ListenForEvent( "entity_death", function(wrld, data) onKill(inst, data) end, GetWorld())
inst:ListenForEvent( "dusktime", function() updatestats(inst) end , GetWorld())
inst:ListenForEvent( "daytime", function() updatestats(inst) end , GetWorld())
inst:ListenForEvent( "nighttime", function() updatestats(inst) end , GetWorld())
updatestats(inst)
inst.components.talker:Say("I'm liyitong")
end
return MakePlayerCharacter("esctemplate", prefabs, assets, fn, start_inv)
<file_sep>/Cocos2d-js/template-v3.13/README.md
#cocos2d-js-v3.13 template
cocos2d-js-v3.13 template<file_sep>/Cocos2d-js/hairpinrun/README.md
# Cocos2d-JS Project
* ## 发卡快跑
官方的demo稍加改变
## 使用方式
你需要将这些文件放到一个本地服务器或在线服务器并通过服务器访问才可以看到正确的结果。
## 帮助链接
- [在线API reference](http://www.cocos2d-x.org/reference/html5-js/V3.0/index.html)
- [下载API Reference](http://www.cocos2d-x.org/filedown/Cocos2d-JS-v3.0-API.zip)
- [文档目录](http://cocos2d-x.org/docs/manual/framework/html5/en)
- [Github仓库](https://github.com/cocos2d/cocos2d-js)
- [在线论坛](http://www.cocoachina.com/bbs/thread.php?fid=59)
=========================
# English Version
## usage
You need to host these files on a web server or local web server to see the result.
## Useful links
- [Online API reference](http://www.cocos2d-x.org/reference/html5-js/V3.0/index.html)
- [Downloadable API Reference](http://www.cocos2d-x.org/filedown/Cocos2d-JS-v3.0-API.zip)
- [Document root](http://cocos2d-x.org/docs/manual/framework/html5/en)
- [Github repository](https://github.com/cocos2d/cocos2d-js)
- [Online forum](http://discuss.cocos2d-x.org/category/javascript)<file_sep>/Cocos2d-js/hairpinrun/src/resource.js
var res = {
hello_bg_png: "res/helloBG.png",
start_n_png: "res/start_n.png",
start_s_png: "res/start_s.png",
play_bg_png: "res/PlayBG.png",
runner_png: "res/running.png",
runner_plist: "res/running.plist",
map_png: "res/map.png",
map00_tmx: "res/map00.tmx",
map01_tmx: "res/map01.tmx",
background_png: "res/background.png",
background_plist: "res/background.plist",
restart_n_png: "res/restart_n.png",
restart_s_png: "res/restart_s.png",
bgm_mp3: "res/bgm.mp3",
jump_mp3: "res/jump.mp3",
pickup_coin_mp3: "res/pickup_coin.mp3"
}
var g_resources = [
//image
res.hello_bg_png,
res.start_n_png,
res.start_s_png,
res.play_bg_png,
res.runner_png,
res.runner_plist,
res.map_png,
res.background_png,
res.background_plist,
res.restart_n_png,
res.restart_s_png,
//tmx
res.map00_tmx,
res.map01_tmx,
//sound
res.bgm_mp3,
res.jump_mp3,
res.pickup_coin_mp3
]
<file_sep>/Don't Starve Mod/esctemplate/scripts/prefabs/mihobell.lua
local assets={
Asset("ATLAS", "images/inventoryimages/mihobell.xml"),
Asset("ANIM", "anim/mihobell.zip"),
}
local SPAWN_DIST = 30
local trace = function() end
--------------------------------------------------------------
local function RebuildTile(inst)
if inst.components.inventoryitem:IsHeld() then
local owner = inst.components.inventoryitem.owner
inst.components.inventoryitem:RemoveFromOwner(true)
if owner.components.container then
owner.components.container:GiveItem(inst)
elseif owner.components.inventory then
owner.components.inventory:GiveItem(inst)
end
end
end
--------------------------------------------------------------
local function MorphUpBell(inst)
inst.BellState = "UP"
RebuildTile(inst)
miho = miho or TheSim:FindFirstEntityWithTag("miho")
if miho then
if miho.components.follower.leader ~= inst then
miho.components.follower:SetLeader(inst)
end
end
end
local function MorphNormalBell(inst)
inst.BellState = "NOM"
RebuildTile(inst)
end
--------------------------------------------------------------
local function GetSpawnPoint(pt)
local theta = math.random() * 2 * PI
local radius = SPAWN_DIST
local offset = FindWalkableOffset(pt, theta, radius, 12, true)
if offset then
return pt+offset
end
end
local function SpawnMiho(inst)
trace("mihobell - SpawnMiho")
local pt = Vector3(inst.Transform:GetWorldPosition())
trace(" near", pt)
local spawn_pt = GetSpawnPoint(pt)
if spawn_pt then
trace(" at", spawn_pt)
local miho = SpawnPrefab("miho")
if miho then
miho.Physics:Teleport(spawn_pt:Get())
miho:FacePoint(pt.x, pt.y, pt.z)
return miho
end
else
trace("mihobell - SpawnMiho: Couldn't find a suitable spawn point for miho")
end
end
local function StopRespawn(inst)
trace("mihobell - StopRespawn")
if inst.respawntask then
inst.respawntask:Cancel()
inst.respawntask = nil
inst.respawntime = nil
end
end
local function RebindMiho(inst, miho)
miho = miho or TheSim:FindFirstEntityWithTag("miho")
if miho then
inst.AnimState:PlayAnimation("idle", true)
inst:ListenForEvent("death", function() inst:OnMihoDeath() end, miho)
if miho.components.follower.leader ~= inst then
miho.components.follower:SetLeader(inst)
end
return true
end
end
local function RespawnMiho(inst)
trace("mihobell - Respawnmiho")
StopRespawn(inst)
local miho = TheSim:FindFirstEntityWithTag("miho")
if not miho then
miho = SpawnMiho(inst)
end
RebindMiho(inst, miho)
end
local function StartRespawn(inst, time)
StopRespawn(inst)
local respawntime = time or 0
if respawntime then
inst.respawntask = inst:DoTaskInTime(respawntime, function() RespawnMiho(inst) end)
inst.respawntime = GetTime() + respawntime
inst.AnimState:PlayAnimation("dead", true)
end
end
local function OnMihoDeath(inst)
StartRespawn(inst, TUNING.CHESTER_RESPAWN_TIME)
end
local function FixMiho(inst)
inst.fixtask = nil
if not RebindMiho(inst) then
inst.AnimState:PlayAnimation("dead", true)
inst.components.inventoryitem:ChangeImageName(inst.closedEye)
if inst.components.inventoryitem.owner then
local time_remaining = 0
local time = GetTime()
if inst.respawntime and inst.respawntime > time then
time_remaining = inst.respawntime - time
end
StartRespawn(inst, time_remaining)
end
end
end
local function OnPutInInventory(inst)
if not inst.fixtask then
inst.fixtask = inst:DoTaskInTime(1, function() FixMiho(inst) end)
end
end
local function OnSave(inst, data)
trace("mihobell - OnSave")
data.BellState = inst.BellState
local time = GetTime()
if inst.respawntime and inst.respawntime > time then
data.respawntimeremaining = inst.respawntime - time
end
end
local function OnLoad(inst, data)
if data and data.BellState then
if data.BellState == "UP" then
inst:MorphUpBell()
end
end
if data and data.respawntimeremaining then
inst.respawntime = data.respawntimeremaining + GetTime()
end
end
local function GetStatus(inst)
trace("smallbird - GetStatus")
if inst.respawntask then
return "WAITING"
end
end
local function fn(Sim)
local inst = CreateEntity()
inst.entity:AddTransform()
inst.entity:AddAnimState()
inst.entity:AddMiniMapEntity()
inst.MiniMapEntity:SetIcon("mihobell.tex")
inst.entity:AddSoundEmitter()
inst:AddTag("mihobell")
inst:AddTag("irreplaceable")
inst:AddTag("nonpotatable")
MakeInventoryPhysics(inst)
inst.entity:AddDynamicShadow()
inst.DynamicShadow:SetSize( 1, .5 )
inst.AnimState:SetBank("mihobell")
inst.AnimState:SetBuild("mihobell")
inst.AnimState:PlayAnimation("idle", true)
inst:AddComponent("inventoryitem")
inst.components.inventoryitem:SetOnPutInInventoryFn(OnPutInInventory)
inst.components.inventoryitem.atlasname = "images/inventoryimages/mihobell.xml"
inst.BellState = "NOM"
inst:AddComponent("inspectable")
inst.components.inspectable.getstatus = GetStatus
inst.components.inspectable:RecordViews()
inst:AddComponent("leader")
inst.MorphNormalBell = MorphNormalBell
inst.MorphUpBell = MorphUpBell
inst.OnLoad = OnLoad
inst.OnSave = OnSave
inst.OnMihoDeath = OnMihoDeath
inst.fixtask = inst:DoTaskInTime(1, function() FixMiho(inst) end)
return inst
end
STRINGS.NAMES.MIHOBELL = "Fox Bell"
STRINGS.CHARACTERS.GENERIC.DESCRIBE.MIHOBELL = {
"Mysterious Bells. Where is Miho?",
"Come on, Miho! Come on!",
"Cute bell sounds.",
}
return Prefab( "common/inventory/mihobell", fn, assets)<file_sep>/Cocos2d-js/hairpinrun/src/playScene.js
var PlayScene = cc.Scene.extend({
space: null,
shapesToRemove: [],
ctor: function () {
this._super();
},
onEnter: function () {
this._super();
cc.audioEngine.playMusic(res.bgm_mp3, true);
this.initPhysics();
this.gameLayer = new cc.Layer();
//add Background layer and Animation layer to gameLayer
this.gameLayer.addChild(new BackgroundLayer(this.space), 0, TagOfLayer.Background);
this.gameLayer.addChild(new GameLayer(this.space), 0, TagOfLayer.Game);
this.addChild(this.gameLayer);
this.addChild(new StatusLayer(), 0, TagOfLayer.Status);
this.scheduleUpdate();
},
initPhysics: function () {
//1. new space object
this.space = new cp.Space();
//2. setup the Gravity
this.space.gravity = cp.v(0, -350);
// 3. set up Walls
var wallBottom = new cp.SegmentShape(
this.space.staticBody,
cp.v(0, g_groundHeight),// start point
cp.v(4294967295, g_groundHeight),// MAX INT:4294967295
0);// thickness of wall
this.space.addStaticShape(wallBottom);
this.space.addCollisionHandler(SpriteTag.runner, SpriteTag.coin,
this.collisionCoinBegin.bind(this), null, null, null);
this.space.addCollisionHandler(SpriteTag.runner, SpriteTag.rock,
this.collisionRockBegin.bind(this), null, null, null);
},
collisionCoinBegin: function (arbiter, space) {
var shapes = arbiter.getShapes();
this.shapesToRemove.push(shapes[1]);
cc.audioEngine.playEffect(res.pickup_coin_mp3);
this.getChildByTag(TagOfLayer.Status).updateCoin(1);
},
collisionRockBegin: function (arbiter, space) {
cc.log("==>game over");
cc.director.pause();
cc.audioEngine.stopMusic();
this.addChild(new GameOverLayer());
},
update: function (dt) {
this.space.step(dt);
var gLayer = this.gameLayer.getChildByTag(TagOfLayer.Game);
var eyeX = gLayer.getEyeX();
this.gameLayer.setPosition(-eyeX, 0);
for (var i = 0; i < this.shapesToRemove.length; i++) {
var shape = this.shapesToRemove[i];
this.gameLayer.getChildByTag(TagOfLayer.Background).removeObjectByShape(shape);
}
this.shapesToRemove = [];
}
});
<file_sep>/Cocos2d-js/hairpinrun/src/menuScene.js
var MenuScene = cc.Scene.extend({
ctor: function () {
this._super();
},
onEnter: function () {
this._super();
var layer = new MenuLayer();
layer.init();
this.addChild(layer);
}
});
var MenuLayer = cc.Layer.extend({
ctor: function () {
this._super();
},
init: function () {
this._super();
var size = cc.director.getWinSize();
var bgSprite = new cc.Sprite(res.hello_bg_png);
bgSprite.setPosition(size.width / 2, size.height / 2);
this.addChild(bgSprite);
var menuItem = new cc.MenuItemSprite(
new cc.Sprite(res.start_n_png),
new cc.Sprite(res.start_s_png),
function () {
cc.log("==>start game");
cc.director.runScene(new PlayScene());
}, this);
var menu = new cc.Menu(menuItem);
menu.setPosition(size.width / 2, size.height / 2);
this.addChild(menu);
}
});
| 8b5b6032539d20706d8906e967969abffebc0bdc | [
"JavaScript",
"Markdown",
"Lua"
] | 20 | Lua | fakaka/Game | fd87aff00fd6572384ae5d2d4bd20fe13def3dad | 0a05e901d856f05c3d046359dac83088255cc484 |
refs/heads/master | <file_sep>from math import sqrt
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import LineString
from pathlib import Path
import sys
sys.path.insert(0, '../minigrid-optimiser')
from mgo import mgo
def load_clusters(clusters_file, grid_dist_connected=1000, minimum_pop=200):
"""
"""
# Read in the clusters file, convert to desired CRS (ostensibly better for distances) and convert to points, filter on population along the way
clusters = gpd.read_file(str(clusters_file))
# This is the Africa Albers Equal Area Conic EPSG: 102022
epsg102022 = '+proj=aea +lat_1=20 +lat_2=-23 +lat_0=0 +lon_0=25 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
clusters = clusters.to_crs(epsg102022)
clusters['conn_start'] = 0
clusters.loc[clusters['grid_dist'] <= grid_dist_connected, 'conn_start'] = 1
clusters = clusters.loc[clusters['pop_sum'] > minimum_pop]
clusters = clusters.sort_values('pop_sum', ascending=False) # so that biggest (and thus connected) city gets index=0
clusters = clusters.reset_index().drop(columns=['index'])
return clusters
def create_network(clusters):
"""
We then take all the clusters and calculate the optimum network that connects them all together.
The ML model returns T_x and T_y containing the start and end points of each new arc created
"""
clusters_points = clusters.copy()
clusters_points.geometry = clusters_points['geometry'].centroid
clusters_points['X'] = clusters_points.geometry.x
clusters_points['Y'] = clusters_points.geometry.y
df = pd.DataFrame(clusters_points)
points = df[['X', 'Y']].as_matrix()
T_x, T_y = mgo.get_spanning_tree(points)
# This point and line data is then copied into two arrays, called *nodes* and *network*,
# containing the clusters and lines, respectively. Each element represents a single cluster or joining arc,
# and has data within describing the coordinates and more.
# **Structure for nodes**
# 0 index
# 1 x
# 2 y
# 3 area_m2
# 4 pop_sum
# 5 connected
# 6 new_conn
# 7 off_grid_cost
# 8 [connected arc indices]
#
# **Structure for network**
# 0 index
# 1 xs
# 2 ys
# 3 xe
# 4 ye
# 5 node index first point
# 6 node index last point
# 7 existing
# 8 arc length
# 9 whether enabled
df['conn_end'] = df['conn_start']
df['off_grid_cost'] = 0
nodes = df[['X', 'Y', 'area_m2', 'pop_sum', 'conn_start', 'conn_end', 'off_grid_cost']].reset_index().values.astype(int).tolist()
# add an empty list at position 8 for connected arc indices
for node in nodes:
node.append([])
counter = 0
network = []
for xs, ys, xe, ye in zip(T_x[0], T_y[0], T_x[1], T_y[1]):
xs = int(xs)
ys = int(ys)
xe = int(xe)
ye = int(ye)
length = int(sqrt((xe - xs)**2 + (ye - ys)**2))
network.append([counter, xs, ys, xe, ye, None, None, 1, length, 1])
counter += 1
network, nodes = connect_network(network, nodes, 0)
# for every node, add references to every arc that connects to it
for arc in network:
nodes[arc[5]][8].append(arc[0])
nodes[arc[6]][8].append(arc[0])
# set which arcs don't already exist (and the remainder do!)
for node in nodes:
if node[5] == 0:
connected_arcs = [network[arc_index] for arc_index in node[8]]
for arc in connected_arcs:
arc[7] = 0
arc[9] = 0
return network, nodes
def connect_network(network, nodes, index):
"""
Then we need to tell each arc which nodes it is connected to, and likewise for each node
Each arc connects two nodes, each node can have 1+ arcs connected to it
"""
cur_node = nodes[index]
for arc in network:
found = 0
if arc[5] == None and arc[6] == None: # if this arc has no connected nodes
if (arc[1] == cur_node[1] and arc[2] == cur_node[2]): # if the xs and ys match a node
found = 3 # point towards position 3 (xe) for the next node
if (arc[3] == cur_node[1] and arc[4] == cur_node[2]): # if the xe and ye match a node
found = 1 # point towards position 1 (xs) for the next node
if found:
arc[5] = cur_node[0] # tell this arc that this node is its starting point
for node in nodes:
if node[0] != cur_node[0]: # make sure we look at hte other end of the arc
if node[1] == arc[found] and node[2] == arc[found+1]:
arc[6] = node[0] # tell this arc that this node is its ending point
network, nodes = connect_network(network, nodes, node[0]) # and investigate downstream
break
return network, nodes
def run_model(network, nodes, demand_per_person_kw_peak, mg_gen_cost_per_kw, mg_cost_per_m2, cost_wire_per_m, grid_cost_per_m2):
"""
"""
# First calcaulte the off-grid cost for each unconnected settlement
for node in nodes:
if node[5] == 0:
node[7] = node[4]*demand_per_person_kw_peak*mg_gen_cost_per_kw + node[3]*mg_cost_per_m2
# Then we're ready to calculate the optimum grid extension.
# This is done by expanding out from each already connected node,
# finding the optimum connection of nearby nodes.
# This is then compared to the off-grid cost and if better,
# these nodes are marked as connected.
# Then the loop continues until no new connections are found.
# This function recurses through the network, dragging a current c_ values along with it.
# These aren't returned, so are left untouched by aborted side-branch explorations.
# The best b_ values are returned, and are updated whenever a better configuration is found.
# Thus these will remmber the best solution including all side meanders.
def find_best(nodes, network, index, prev_arc, b_pop, b_length, b_nodes, b_arcs, c_pop, c_length, c_nodes, c_arcs):
if nodes[index][6] == 0: # don't do anything with already connected nodes
c_pop += nodes[index][4]
c_length += network[prev_arc][8]
c_nodes = c_nodes[:] + [index]
c_arcs = c_arcs[:] + [prev_arc]
if c_pop/c_length > b_pop/b_length:
b_pop = c_pop
b_length = c_length
b_nodes[:] = c_nodes[:]
b_arcs[:] = c_arcs[:]
connected_arcs = [network[arc_index] for arc_index in nodes[index][8]]
for arc in connected_arcs:
if arc[9] == 0 and arc[0] != prev_arc:
goto = 6 if arc[5] == index else 5 # make sure we look at the other end of the arc
nodes, network, b_pop, b_length, best_nodes, best_arcs = find_best(
nodes, network, arc[goto], arc[0], b_pop, b_length, b_nodes, b_arcs, c_pop, c_length, c_nodes, c_arcs)
return nodes, network, b_pop, b_length, b_nodes, b_arcs
while True: # keep looping until no further connections are added
to_be_connected = []
for node in nodes:
if node[6] == 1: # only start searches from currently connected nodes
connected_arcs = [network[arc_index] for arc_index in node[8]]
for arc in connected_arcs:
if arc[9] == 0:
goto = 6 if arc[5] == node[0] else 5
# function call a bit of a mess with all the c_ and b_ values
nodes, network, b_length, b_pop, b_nodes, b_arcs = find_best(
nodes, network, arc[goto], arc[0], 0, 1e-9, [], [], 0, 1e-9, [], [])
# calculate the mg and grid costs of the resultant configuration
best_nodes = [nodes[i] for i in b_nodes]
best_arcs = [network[i] for i in b_arcs]
mg_cost = sum([node[7] for node in best_nodes])
grid_cost = (cost_wire_per_m * sum(arc[8] for arc in best_arcs) +
grid_cost_per_m2 * sum([node[3] for node in best_nodes]))
if grid_cost < mg_cost:
# check if any nodes are already in to_be_connected
add = True
for index, item in enumerate(to_be_connected):
if set(b_nodes).intersection(item[1]):
if b_pop/b_length < item[0]:
del to_be_connected[index]
else:
add = False # if the existing one is better, we don't add the new one
break
if add:
to_be_connected.append((b_pop/b_length, b_nodes, b_arcs))
# mark all to_be_connected as actually connected
if len(to_be_connected) >= 1:
print(len(to_be_connected))
for item in to_be_connected:
for node in item[1]:
nodes[node][6] = 1
for arc in item[2]:
network[arc][9] = 1
else:
break # exit the loop once nothing is added
return network, nodes
def spatialise(network, nodes, clusters):
"""
And then do a join to get the results back into a polygon shapefile
"""
# prepare nodes and join with original clusters gdf
nodes_df = pd.DataFrame(columns=['index', 'X', 'Y', 'area_m2', 'pop_sum', 'conn_start', 'conn_end',
'og_cost', 'arcs'], data=nodes)
nodes_df = nodes_df[['index', 'conn_end', 'og_cost']]
clusters_joined = clusters.merge(nodes_df, how='left', left_index=True, right_index=True)
# do the same for the network array
network_df = pd.DataFrame(columns=['index', 'xs', 'ys', 'xe', 'ye', 'node_start', 'node_end',
'existing', 'length', 'enabled'], data=network)
network_geometry = [LineString([(arc[1], arc[2]), (arc[3], arc[4])]) for arc in network]
network_gdf = gpd.GeoDataFrame(network_df, crs=clusters.crs, geometry=network_geometry)
clusters_joined = clusters_joined.to_crs(epsg=4326)
network_gdf = network_gdf.to_crs(epsg=4326)
return network_gdf, clusters_joined
<file_sep>from electrificationplanner import clustering
import numpy as np
from affine import Affine
import rasterio
from pathlib import Path
TEST_DATA = Path('test_data')
RASTER_FILE = TEST_DATA / 'raster.tif'
BOUNDARY_FILE = TEST_DATA / 'boundary.gpkg'
BOUNDARY_LAYER = 'boundary'
CLIPPED_RASTER = TEST_DATA / 'clipped.tif'
GRID_FILE = TEST_DATA / 'grid.gpkg'
CLUSTERS_FILE = TEST_DATA / 'clusters.gpkg'
def test_clip_raster():
clipped, affine, crs = clustering.clip_raster(RASTER_FILE, BOUNDARY_FILE, BOUNDARY_LAYER)
assert isinstance(clipped, np.ndarray)
assert isinstance(affine, Affine)
assert isinstance(crs, rasterio.crs.CRS)
def test_clusters():
clipped_dataset = rasterio.open(CLIPPED_RASTER)
clipped = clipped_dataset.read()
affine = clipped_dataset.transform
crs = clipped_dataset.crs
clusters = clustering.create_clusters(clipped, affine, crs)
assert all(clusters.columns == ['geometry', 'raster_val'])
assert all(clusters['geometry'].type == 'Polygon')
assert clusters['raster_val'].dtype == float
clusters = clustering.filter_merge_clusters(clusters)
assert all(clusters.columns == ['area_m2', 'geometry'])
clusters = clustering.cluster_pops(clusters, RASTER_FILE)
assert all(clusters.columns == ['area_m2', 'geometry', 'pop_sum'])
clusters = clustering.cluster_grid_distance(clusters, GRID_FILE, clipped[0].shape, affine)
assert all(clusters.columns == ['area_m2', 'geometry', 'pop_sum', 'grid_dist'])
clustering.save_clusters(clusters, CLUSTERS_FILE)
assert Path(CLUSTERS_FILE).is_file()
<file_sep>test:
python3 -m pytest electrificationplanner
<file_sep>"""
clusters module for electrification-planner
Provides functions to read in a raster population dataset
and convert to discrete vector polgons, each with a set
population value. Additionally calculate each polygon's
distance from a provided grid infrastructure vector.
"""
import json
from pathlib import Path
import numpy as np
from scipy import ndimage
import geopandas as gpd
import rasterio
from rasterio.mask import mask
from rasterio.features import shapes, rasterize
from rasterstats import zonal_stats
def clip_raster(raster, boundary, boundary_layer='gadm36_UGA_0'):
"""
Clip the raster to the given administrative boundary.
Parameters
----------
raster: string, pathlib.Path or rasterio.io.DataSetReader
Location of or already opened raster.
boundary: string, pathlib.Path or geopandas.GeoDataFrame
The poylgon by which to clip the raster.
boundary_layer: string, optional
For multi-layer files (like GeoPackage), specify the layer to be used.
Returns
-------
tuple
Three elements:
clipped: numpy.ndarray
Contents of clipped raster.
affine: affine.Affine()
Information for mapping pixel coordinates
to a coordinate system.
crs: dict
Dict of the form {'init': 'epsg:4326'} defining the coordinate
reference system of the raster.
"""
if isinstance(raster, Path):
raster = str(raster)
if isinstance(raster, str):
raster = rasterio.open(raster)
crs = raster.crs
if isinstance(boundary, Path):
boundary = str(boundary)
if isinstance(boundary, str):
if '.gpkg' in boundary:
driver = 'GPKG'
else:
driver = None # default to shapefile
boundary_layer = '' # because shapefiles have no layers
boundary = gpd.read_file(boundary, layer=boundary_layer, driver=driver)
boundary = boundary.to_crs(crs=raster.crs)
coords = [json.loads(boundary.to_json())['features'][0]['geometry']]
# mask/clip the raster using rasterio.mask
clipped, affine = mask(dataset=raster, shapes=coords, crop=True)
return clipped, affine, crs
def create_clusters(raster, affine, crs):
"""
Create a polygon GeoDataFrame from the given raster
Parameters
----------
raster: numpy.ndarray
The raster data to use.
affine: affine.Affine()
Raster pixel mapping information.
crs: dict
Dict of the form {'init': 'epsg:4326'} defining the coordinate
reference system to use.
Returns
-------
clusters: geopandas.GeoDataFrame
A GeoDataFrame with integer index and two columns:
geometry contains the Shapely polygon representations
raster_val contains the values from the raster
"""
geoms = list(({'properties': {'raster_val': v}, 'geometry': s}
for i, (s, v)
in enumerate(shapes(raster, mask=None, transform=affine))))
clusters = gpd.GeoDataFrame.from_features(geoms)
clusters.crs = crs
return clusters
# TODO Could instead filter at the raster stage?
def filter_merge_clusters(clusters, max_block_size_multi=5, min_block_pop=50, buffer_amount=150):
"""
The vectors created by create_clusters() are a single square for each raster pixel.
This function does the follows:
- Remove overly large clusters, caused by defects in the input raster.
- Remove clusters with population below a certain threshold.
- Buffer the remaining clusters and merge those that overlap.
Parameters
----------
clusters: geopandas.GeoDataFrame
The unprocessed clusters created by create_clusters()
max_block_size_multi: int, optional
Remove clusters that are more than this many times average size. Default 5.
min_block_pop: int, optional
Remove clusters with below this population. Default 50.
buffer_amount: int, optional
Distance in metres by which to buffer the clusters before merging. Default 150.
Returns
-------
clusters: geopandas.GeoDataFrame
The processed clusters.
"""
# remove blocks that are too big (basically artifacts)
clusters['area_m2'] = clusters.geometry.area
clusters = clusters[clusters['area_m2'] < clusters['area_m2'].mean() * max_block_size_multi]
# remove blocks with too few people
clusters = clusters[clusters['raster_val'] > min_block_pop]
# buffer outwards so that nearby blocks will overlap
clusters['geometry'] = clusters.geometry.buffer(buffer_amount)
# and dissolve the thousands of blocks into a single layer (with no attributes!)
clusters['same'] = 1
clusters = clusters.dissolve(by='same')
# To get our attributes back, we convert the dissolves polygon into singleparts
# This means each contiguous bubble becomes its own polygon and can store its own attributes
crs = clusters.crs
clusters = clusters.explode()
clusters = clusters.reset_index()
# no longer needed in GeoPandas >= 0.4.0
# clusters['geometry'] = clusters[0]
# clusters = gpd.GeoDataFrame(clusters)
# clusters.crs = crs
clusters = clusters.drop(columns=['same', 'level_1', 'raster_val']) # raster_val is no longer meaningful
# And then add the polygon's area back to its attributes
clusters["area_m2"] = clusters['geometry'].area
return clusters
def cluster_pops(clusters, raster, affine=None):
"""
The filter_merge_clusters() process loses the underlying raster values.
So we need to use rasterstats.zonal_stats() to get it back.
Parameters
----------
clusters: geopandas.GeoDataFrame
The processed clusters.
raster: str, pathlib.Path or numpy.ndarray
Either a path to the raster, or an already imported numpy.ndarray with the data.
affine: affine.Affine(), optional
If a numpy ndarray is passed above, the affine is also needed.
Returns
-------
clusters: geopandas.GeoDataFrame
The processed clusters.
"""
if isinstance(raster, Path):
raster = str(raster)
if isinstance(raster, str):
pop_sums = zonal_stats(clusters, raster, stats='sum')
else:
pop_sums = zonal_stats(clusters, raster, affine=affine, stats='sum', nodata=0)
clusters['pop_sum'] = [x['sum'] for x in pop_sums]
return clusters
def cluster_grid_distance(clusters, grid, shape, affine):
"""
Use a vector containing grid infrastructure to determine
each cluster's distance from the grid.
Parameters
----------
clusters: geopandas.GeoDataFrame
The processed clusters.
grid: str, pathlib.Path or geopandas.GeoDataFrame
Path to or already imported grid dataframe.
shape: tuple
Tuple of two integers representing the shape of the data
for rasterizing grid. Sould match the clipped raster.
affine: affine.Affine()
As above, should match the clipped raster.
Returns
-------
clusters: geopandas.GeoDataFrame
The processed clusters.
"""
if isinstance(grid, Path):
grid = str(grid)
if isinstance(grid, str):
grid = gpd.read_file(grid)
grid = grid.to_crs(crs=clusters.crs)
grid = grid.loc[grid['geometry'].length > 0]
grid_raster = rasterize(grid.geometry, out_shape=shape, fill=1,
default_value=0, all_touched=True, transform=affine)
dist_raster = ndimage.distance_transform_edt(grid_raster) * affine[0]
dists = zonal_stats(vectors=clusters, raster=dist_raster, affine=affine, stats='min', nodata=1000)
clusters['grid_dist'] = [x['min'] for x in dists]
return clusters
def save_clusters(clusters, out_path):
"""
Convert to EPSG:4326 and save to the specified file.
clusters: geopandas.GeoDataFrame
The processed clusters.
out_path: str or pathlib.Path
Where to save the clusters file.
"""
if isinstance(out_path, Path):
out_path = str(out_path)
if '.gpkg' in out_path:
driver = 'GPKG'
else:
driver = None
clusters = clusters.to_crs(epsg=4326)
clusters.to_file(out_path, driver=driver)
<file_sep>**This is no longer maintained, active development is in [openelec](https://github.com/carderne/openelec)**
## electrification-planner
[](https://travis-ci.org/carderne/electrification-planner)
A tool for modelling the optimal pathways t.o improving electricity access.
Described in my blog post here: [Modelling the optimum way to achieve universal electrification](https://rdrn.me/modelling-universal-electrification/)
<file_sep>numpy>=1.14.2
pandas>=0.22.0
geopandas>=0.4.0
shapely>=1.6.4
astroML>=0.3
scipy>=1.0.0
rasterio>=1.0.7
rasterstats>=0.13.0
| 4543c6e8191d7010320d764567a6f54bd4164539 | [
"Markdown",
"Python",
"Makefile",
"Text"
] | 6 | Python | ollawone/electrification-planner | 9870d1ba87234e949c2b62724597b6dc25c631b0 | ee55683f312ec9b914ca209f9e4b1f16a0273530 |
refs/heads/master | <repo_name>roblum/comment_system<file_sep>/register_angular.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<link rel="shortcut icon" href="../../assets/ico/favicon.ico">
<title>Responsive Slider</title>
</head>
<body ng-app="myApp">
<div class="container" ng-controller="messagesController">
<form id='register' ng-submit="add()" method='post' accept-charset='UTF-8'>
<input name="name" ng-model="name"/>
<button type="submit">register</button>
</form>
</div>
<?php
if($_POST){
$con=mysqli_connect('localhost', 'root', 'root','comment_system');
// Check connection
if (mysqli_connect_errno()) {
echo "Failed to connect to MySQL: " . mysqli_connect_error();
}
$sql = 'INSERT INTO users '.
'(name) '.
'VALUES ("' . $_POST["name"] . '")';
mysqli_query($con,$sql);
echo "success";
mysqli_close($con);
}
?>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.19/angular.min.js"></script>
<!--<script src="main.js"></script>-->
<script>
var myApp = angular.module('myApp',[]);
myApp.controller('messagesController', function($scope, $http, $templateCache) {
var method = 'POST';
var url = 'url.php';
$scope.codeStatus = "";
$scope.add = function() {
var inputData = document.querySelector('#register input').value;
var FormData = "name=" + inputData
$http({
method: "POST",
url: "register_angular.php",
data: FormData,
headers: {'Content-Type': 'application/x-www-form-urlencoded'},
cache: $templateCache
}).
success(function(response) {
$scope.codeStatus = response.data;
console.log('success ' + $scope.codeStatus)
}).
error(function(response) {
$scope.codeStatus = response || "Request failed";
console.log('fail ' + $scope.codeStatus)
});
return false;
};
});
</script>
</body>
</html>
<file_sep>/README.md
comment_system
==============
creating comment system with angular, php, mysql
ideas for upcoming projects:
search engine
populate top results
gmaps - hangout spots
embedded commenting
tagging users
user logins
horizontal sliding banner/images
cookies - user info
Search engine user interface - carousel for all search result(news, web,shopping,maps)
<file_sep>/register.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<link rel="shortcut icon" href="../../assets/ico/favicon.ico">
<title>Responsive Slider</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
</head>
<body ng-app="myApp">
<div class="container" ng-controller="messagesController">
<form id='comment-form' onsubmit="form_submit()" method='post' accept-charset='UTF-8'>
<input name="name" ng-model="name"/>
<textarea name="comment" ng-model="comment"></textarea>
<button type="submit">Submit</button>
</form>
<form id='get-user' method='get' accept-charset='UTF-8'>
<button type="submit">Submit</button>
</form>
</div>
<?php
if($_POST){
$con = new mysqli('localhost', 'root', 'root','comment_system');
// Check connection
if (mysqli_connect_errno()) {
echo "Failed to connect to MySQL: " . mysqli_connect_error();
}
$sqlName = 'INSERT INTO users '.
'(name) '.
'VALUES ("' . $_POST["name"] . '");';
$sqlComment = 'INSERT INTO comments (date, comment) VALUES (NOW(),"' . $_POST["comment"] . '");';
$query = $sqlName . $sqlComment;
error_log($query);
$con->multi_query($query);
//mysqli_query($con, $query);
// mysqli_query($con,$sqlComment);
echo "success";
mysqli_close($con);
}
if($_GET){
error_log('get requested');
}
error_log(var_dump($_GET));
?>
<script>
function form_submit(){
var inputName = document.querySelector("#comment-form input[name=name]").value
,inputComment = document.querySelector("#comment-form textarea[name=comment]").value
,xhr;
if (window.XMLHttpRequest) { // Mozilla, Safari, ...
xhr = new XMLHttpRequest();
} else if (window.ActiveXObject) { // IE 8 and older
xhr = new ActiveXObject("Microsoft.XMLHTTP");
}
var data = "name=" + inputName + '&comment=' + inputComment;
xhr.open("POST", "register.php", true);
xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
xhr.send(data);
}
// function get_user(){
// if (window.XMLHttpRequest) { // Mozilla, Safari, ...
// xhr = new XMLHttpRequest();
// } else if (window.ActiveXObject) { // IE 8 and older
// xhr = new ActiveXObject("Microsoft.XMLHTTP");
// }
// //var data = "name=" + inputName + '&comment=' + inputComment;
// xhr.open("GET", "register.php", true);
// xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
// xhr.send();
// }
$(document).ready(function(){
$('#get-user').submit(function(){
$.get('register.php', function(responseText) {
alert(responseText);
});
});
});
</script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.19/angular.min.js"></script>
<script src="main.js"></script>
</body>
</html>
<file_sep>/index.php
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<link rel="shortcut icon" href="../../assets/ico/favicon.ico">
<title>Responsive Slider</title>
</head>
<body ng-app="myApp">
<div class="container" ng-controller="messagesController">
<input name="name" ng-model="name"/>
<input ng-model="comment"/>
<div>{{name}}</div>
</div>
<a href="/register.php"> register </a>
<?php
$link = mysql_connect(
':/Applications/MAMP/tmp/mysql/mysql.sock',
'root',
'root'
);
if(! $link )
{
die('Could not connect: ' . mysql_error());
}
/*
$sql = 'INSERT INTO comments '.
'(id,name, date, comment) '.
'VALUES ( "' . echo '", "XYZ", 2000, NOW() )';
mysql_select_db('test_db');
$retval = mysql_query( $sql, $conn );
if(! $retval )
{
die('Could not enter data: ' . mysql_error());
}
echo "Entered data successfully\n";
mysql_close($conn);
*/
?>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.19/angular.min.js"></script>
<script src="main.js"></script>
</body>
</html>
| 8c002c1a202e8a2be44c67f9502867f8708e80b3 | [
"Markdown",
"PHP"
] | 4 | PHP | roblum/comment_system | 4157b253a39edebf963b32c179b0f9855da6028a | 1a56cecc04090373c0bc4002cb7d042ac49a899a |
refs/heads/master | <file_sep># -*- coding: utf-8 -*-
import os
import argparse
import regex as re
# import re # to use regular expressions
import time
# import numpy as np
import fnmatch
from lxml import etree, objectify
import datetime
import pandas as pd
import getpass
import requests
import io
import subprocess
import sys
#===============================================================================
# Function to time functions
#===============================================================================
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args,**kw)
te = time.time()
# print '%r (%r, %r) %2.2f sec' % \
# (method.__name__, args, kw, te-ts)
print('%r %2.2f sec' % \
(method.__name__, te-ts))
return result
return timed
#===============================================================================
# Class if needed
#===============================================================================
class WebLichtWrapper(object):
"""Extract recipes from rezeptewikiorg XML dump and produces a TEI/DTABf file.
"""
@timeit
def __init__(self):
self.tcf = 'http://www.dspin.de/data/textcorpus'
self.url = 'https://weblicht.sfs.uni-tuebingen.de/WaaS/api/1.0/chain/process'
self.xml = 'http://www.w3.org/XML/1998/namespace'
self.log = {}
self.cli()
self.apikey = getpass.getpass('Enter your API key:')
# self.xmldir = os.path.join(self.outdir,'source','xml')
# self.metadir = os.path.join(self.outdir,'metadata')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.main() # function running in the background
def __str__(self):
if self.success > 0:
message = [
"{} recipes processed!".format(str(self.success)),
"Guten Appetit!"
]
else:
message = [
"{} recipes processed!".format(str(self.success)),
"Ups! Maybe something went wrong!"
]
if len(self.log) > 0:
print('Error log:',self.log)
return " ".join(message)
# Function to get all files in a directory
def get_files(self, directory, fileclue):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def read_xml(self,infile):
"""Parse the XML file."""
parser = etree.XMLParser(remove_blank_text=True,encoding="utf-8")
with open(infile, encoding='utf-8',mode='r') as input:
return etree.parse(input, parser)
def deprettyfy(self,tree):
# tree to string
tree = etree.tostring(tree, encoding="utf-8", method="xml")
tree = tree.decode('utf-8')
tree = re.sub(r"(\n) +(<)", r"\1\2", tree)
tree = re.sub(r"> *<", r">\n<", tree)
tree = re.sub(r"\n\n+", r"\n", tree)
tree = etree.fromstring(tree)
tree = etree.ElementTree(tree)
return tree
def serialize(self,tree,outdir,outfile):
outpath = os.path.join(outdir,outfile+'.vrt')
tree.write(outpath, xml_declaration=True, encoding='utf-8')
pass
def weblichtfy(self,element):
input = element.text.encode('utf-8')
intcf = self.read_xml('utils/tcf_template.xml')
text = intcf.xpath('//x:text',namespaces = {'x':self.tcf})[0]
text.text = element.text
multiple_files = {'chains': open(self.chain, 'rb'),
'content': ('input.xml', etree.tostring(intcf)),
# 'content': input,
'apikey': self.apikey}
with requests.Session() as s:
s.mount(self.url,requests.adapters.HTTPAdapter(max_retries=5))
r = s.post(self.url, files = multiple_files, timeout=(5.0, 10.00),allow_redirects = True)
if r.status_code == 200:
output = etree.ElementTree(etree.fromstring(r.content))
else:
# print(r.status_code, r.content, r.headers, r.request.headers)
print('Error {}: {}'.format(r.status_code, r.content.decode()))
output = False
# output = None
return output
def get_word(self,tcf,token_id):
word = tcf.xpath('//x:token[@ID="{}"]'.format(token_id),namespaces = {'x':self.tcf})[0].text
return word
def get_lemma(self,tcf,token_id):
lemma = tcf.xpath('//x:lemma[@tokenIDs="{}"]'.format(token_id),namespaces = {'x':self.tcf})[0].text
return lemma
def get_pos(self,tcf,token_id):
pos = tcf.xpath('//x:tag[@tokenIDs="{}"]'.format(token_id),namespaces = {'x':self.tcf})[0].text
return pos
def get_norm(self,tcf,token_id):
norm = tcf.xpath('//x:correction[@tokenIDs="{}"]'.format(token_id),namespaces = {'x':self.tcf})
if len(norm) == 0:
output = self.get_word(tcf,token_id)
else:
output = norm[0].text
return output
# <orthography><correction operation="replace" tokenIDs="w1">Willst_du</correction>
def transform(self,tcf,p):
sentences = tcf.xpath('//x:sentence', namespaces = {'x':self.tcf})
for sentence in sentences:
token_ids = sentence.attrib['tokenIDs'].split(' ')
new_sentence = etree.SubElement(p,'s')
tokens = []
for token_id in token_ids:
word = self.get_word(tcf,token_id)
pos = self.get_pos(tcf, token_id)
lemma = self.get_lemma(tcf, token_id)
norm = self.get_norm(tcf, token_id)
tokens.append([
word,
pos,
lemma,
norm
])
tokens = '\n'.join(['\t'.join(x) for x in tokens])
new_sentence.text = '\n'+tokens+'\n'
pass
def tcf2vrt(self,tcf):
"""Converts TCF ElementTree into VRT"""
p = etree.Element(self.element)
self.transform(tcf, p)
return p
def remove_namespaces(self, tree):
for elem in tree.getiterator():
if not hasattr(elem.tag, 'find'): continue # (1)
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
objectify.deannotate(tree, cleanup_namespaces=True)
etree.strip_attributes(tree, '{}id'.format('{'+self.xml+'}'))
pass
def main(self):
# get all input files
infiles = self.get_files(self.indir, '*.xml')
self.success = 0
for infile in infiles:
# parse file
text_id = os.path.splitext(os.path.basename(infile))[0]
inxml = self.read_xml(infile)
outxml = etree.Element('text',id=text_id)
# strip namespaces
self.remove_namespaces(inxml)
# find elements containing text to be processed
elements = inxml.xpath('//div//{}'.format(self.element))
for i, element in enumerate(elements):
tcf = self.weblichtfy(element)
tries = 10
while tcf == False and tries > 0:
tcf = self.weblichtfy(element)
tries -= 1
if tcf == False:
print('Chunk {} in {} could not be processed!'.format(i, text_id))
self.log[text_id] = i
else:
vrt = self.tcf2vrt(tcf)
outxml.append(vrt)
outxml = self.deprettyfy(outxml)
self.serialize(outxml, self.outdir, text_id)
nerrors = list(self.log.keys()).count(text_id)
if nerrors > 0:
print(text_id,nerrors)
else:
print(text_id)
self.success += 1
# os.remove(infile)
pass
# def cli(self):
# """CLI parses command-line arguments"""
# parser = argparse.ArgumentParser()
# parser.add_argument("-i","--input", help="input directory.")
# parser.add_argument("-m","--metadata", help = "metadata file.")
# parser.add_argument("-o","--output", help="output directory.")
# parser.add_argument("-t","--test", choices = ['contemporary','historical'], help = "run in test mode.")
# args = parser.parse_args()
# noneargs = [x for x in args.__dict__.values()].count(None)
# if noneargs == 3 and args.test != None:
# print("Running in test mode!")
# self.indir = 'test/{}/vrt'.format(args.test)
# self.outdir = 'test/{}/meta'.format(args.test)
# self.metadata = 'test/metadata/{}-metadata.csv'.format(args.test)
# elif noneargs > 1 and args.test == None:
# options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
# options = ', '.join(options)
# exit_message = '\n'.join(["You forgot option(s): {}".format(options),
# "Provide option '-t [contemporary|historical]' to run in test mode: 'python3 {} -t contemporary'".format(os.path.basename(__file__)),
# "Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
# )
# sys.exit(exit_message)
# else:
# self.indir = args.input
# self.outdir = args.output
# self.metadata = args.metadata
# pass
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", help="input directory.")
parser.add_argument("-o","--output", help="output directory.")
parser.add_argument("-c","--chain", help = "chain file.")
parser.add_argument("-e","--element", default='p', help = "tag of the element to be processed by WebLicht.")
parser.add_argument("-t","--test", choices = ['contemporary','historical'], help = "run in test mode.")
args = parser.parse_args()
noneargs = [x for x in args.__dict__.values()].count(None)
if noneargs == 3 and args.test != None:
print("Running in test mode!")
self.indir = 'test/{}/tei'.format(args.test)
self.outdir = 'test/{}/vrt'.format(args.test)
self.chain = 'utils/chain_{}.xml'.format(args.test)
elif noneargs > 1 and args.test == None:
options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
options = ', '.join(options)
exit_message = '\n'.join(["You forgot option(s): {}".format(options),
"Provide option '-t [contemporary|historical]' to run in test mode: 'python3 {} -t contemporary'".format(os.path.basename(__file__)),
"Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
)
sys.exit(exit_message)
else:
self.indir = args.input
self.outdir = args.output
self.chain = args.chain
self.element = args.element
pass
# def cli(self):
# """CLI parses command-line arguments"""
# parser = argparse.ArgumentParser()
# parser.add_argument("-i","--input", help="input directory.")
# parser.add_argument("-o","--output", help="output directory.")
# parser.add_argument("-c","--chain", required = True, help = "chain file.")
# parser.add_argument("-e","--element", default='p', help = "tag of the element to be processed by WebLicht.")
# args = parser.parse_args()
# noneargs = [x for x in args.__dict__.values()].count(None)
# if noneargs == 2:
# print("Running in test mode!")
# self.indir = 'test/contemporary/tei'
# self.outdir = 'test/contemporary/vrt'
# elif noneargs < 2 and noneargs > 0:
# options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
# options = ', '.join(options)
# exit_message = '\n'.join(["You forgot option(s): {}".format(options),
# "Provide no option to run in test mode: 'python3 {}'".format(os.path.basename(__file__)),
# "Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
# )
# sys.exit(exit_message)
# else:
# self.indir = args.input
# self.outdir = args.output
# self.chain = args.chain
# self.element = args.element
# pass
print(WebLichtWrapper())<file_sep># -*- coding: utf-8 -*-
import os
import argparse
import regex as re
# import re # to use regular expressions
import time
import fnmatch
from lxml import etree, objectify
import datetime
import pandas as pd
import sys
import unidecode as ud
#===============================================================================
# Function to time functions
#===============================================================================
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args,**kw)
te = time.time()
# print '%r (%r, %r) %2.2f sec' % \
# (method.__name__, args, kw, te-ts)
print('%r %2.2f sec' % \
(method.__name__, te-ts))
return result
return timed
#===============================================================================
# Class if needed
#===============================================================================
class WikiExtractor(object):
"""Extract metadata and recipes from rezeptewikiorg XML dump.
Produces a CSV file and TEI files respectively.
"""
@timeit
def __init__(self):
self.ns = 'http://www.mediawiki.org/xml/export-0.6/'
self.xml = 'http://www.w3.org/XML/1998/namespace'
self.tei = 'http://www.tei-c.org/ns/1.0'
self.tei_template = 'utils/tei_lite_template.xml'
self.metadata = {}
self.licenses = {
'Wurm':{'resp':'<NAME>',
'license':'<p>Recipe transcribed by <ref target="mailto:<EMAIL>"><NAME></ref> is licensed under a <ref target="http://creativecommons.org/licenses/by/3.0/">Creative Commons Attribution 3.0 Unported License</ref>.</p>'},
'Knopf':{'resp':'<NAME>, <NAME> and <NAME>',
'url':'http://www.uni-giessen.de/gloning/tx/1800hakb.htm',
'license':'<p>Recipes of Knopf 1800 based on the transcriptions at <ref target="http://www.uni-giessen.de/gloning/tx/1800hakb.htm">http://www.uni-giessen.de/gloning/tx/1800hakb.htm</ref> by <NAME>, <NAME> and <NAME> are licensed under a <ref target="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License</ref>.</p>'},
'Franckfurt':{'resp':'<NAME>, <NAME>, <NAME> and <NAME>',
'url':'http://www.uni-giessen.de/gloning/tx/1789ffkb.htm',
'license':'<p>Recipes of Franckfurt 1789 based on the transcriptions at <ref target="http://www.uni-giessen.de/gloning/tx/1789ffkb.htm">http://www.uni-giessen.de/gloning/tx/1789ffkb.htm</ref> by <NAME>, <NAME>, <NAME> and <NAME> are licensed under a <ref target="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License</ref>.</p>'},
'Graz':{'resp':'<NAME>',
'url':'http://www.uni-giessen.de/gloning/tx/graz2.htm',
'license':'<p>Recipes of Grätz 1686 based on the transcriptions at <ref target="http://www.uni-giessen.de/gloning/tx/graz2.htm">http://www.uni-giessen.de/gloning/tx/graz2.htm</ref> by <NAME> are licensed under a <ref target="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License</ref>.</p>'}
} # info about the licenses applying to each source, ideally license statement for TEI template
self.cli()
for odir in [self.xmldir,self.metadir]:
if not os.path.exists(odir):
os.makedirs(odir)
self.main() # function running in the background
def __str__(self):
if self.success > 0:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Guten Appetit!"
]
else:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Ups! Maybe something went wrong!"
]
return " ".join(message)
# Function to get all files in a directory
def get_files(self, directory, fileclue):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def read_xml(self,infile):
"""Parse the XML file."""
parser = etree.XMLParser(remove_blank_text=True,encoding="utf-8")
with open(infile, encoding='utf-8',mode='r') as input:
return etree.parse(input, parser)
def deprettyfy(self,tree):
# tree to string
tree = etree.tostring(tree, encoding="utf-8", method="xml")
tree = tree.decode('utf-8')
tree = re.sub(r"(\n) +(<)", r"\1\2", tree)
tree = re.sub(r"> *<", r">\n<", tree)
tree = re.sub(r"\n\n+", r"\n", tree)
tree = etree.fromstring(tree)
tree = etree.ElementTree(tree)
return tree
def strip_nodes(self,tree,nodes):
for node in nodes:
xslt_strip_nodes = etree.XML('''
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="node()|@*" >
<xsl:copy>
<xsl:apply-templates select="node()|@*" />
</xsl:copy>
</xsl:template>
<xsl:template match="//{}" >
<xsl:apply-templates/>
</xsl:template>
</xsl:stylesheet>
'''.format(node))
transform = etree.XSLT(xslt_strip_nodes)
# try:
# result = transform(result)
# except:
result = transform(tree)
return result
def serialize(self,tree,infile):
outpath = os.path.join(self.outdir,infile+'.vrt')
tree.write(outpath, xml_declaration=True, encoding='utf-8')
pass
def add_text_mixed(self,parent,s):
if len(parent) == 0:
parent.text = (parent.text or "") + '\n' + s
else:
youngest = parent[-1]
youngest.tail = (youngest.tail or "") + '\n' + s
pass
def clean_tree(self,tree,fbasename):
output = self.strip_nodes(tree)
root = output.getroot()
for attribute in root.attrib:
if attribute != 'id':
del root.attrib[attribute]
return output
def cleaninputxml(self,element):
# strip the subelements but keep text
simplified = self.strip_nodes(element,['seg'])
# strip lines
text = simplified.xpath('//{}'.format(element.tag))[0].text
text = text.strip()
# split by lines
text = text.split('\n')
# for each line keep only first field (word form)
text = [re.sub(r'^(.+?)\t.+',r'\1',x) for x in text]
# join separating with white spaces
text = ' '.join(text)
return text
def get_preparation(self,revision):
"""Get the preparation section of a particular revision.
It takes a revision element as input. It returns a string.
"""
title = revision.xpath('//title')
if len(title) > 0:
title = self.cleaninputxml(title[0])
else:
title = ""
preparation = revision.xpath('//body')[0]
preparation = self.cleaninputxml(preparation)
return title, preparation
def add_text_id(self,tei,text_id):
"""Add the id attribute to the text element of a TDABf."""
text = tei.xpath('//x:text', namespaces = {'x':self.tei})[0]
text.set('{}id'.format('{'+self.xml+'}'),text_id)
pass
def add_divs(self,tei,title,preparation):
"""Add the div, head, p and lb to the body element of a TDABf."""
# get the body element
body = tei.xpath('//x:body',namespaces = {'x':self.tei})[0]
# append the first division type recipe
div1 = etree.SubElement(body,'div',attrib={'n':'1','type':'recipe'})
# add the first header as title
head1 = etree.SubElement(div1,'head')
head1.text = title
# check if there are subdivisions
# add the first paragraph
preparation = re.sub(r'\n',r' ',preparation)
# preparation = re.sub(r'\n',r'<lb/>',preparation)
p1 = etree.fromstring('<p>'+preparation+'</p>')
div1.append(p1)
# add the string as the content of the first paragraph
# p1.text = '\n'+preparation+'\n'
pass
def generate_text(self,tei,title,text_id,preparation):
"""Create the text element of a TEI file."""
# add text id
self.add_text_id(tei, text_id)
# add div, div n, div type
self.add_divs(tei, title, preparation)
pass
def generate_teiheader(self,tei,title,source):
# add title
title_element = tei.xpath('//x:title', namespaces = {'x':self.tei})[0]
title_element.text = title
# add authors
resp_element = tei.xpath('//x:name', namespaces = {'x':self.tei})[0]
source_element = tei.xpath('//x:sourceDesc/x:p', namespaces = {'x':self.tei})[0]
license_element = tei.xpath('//x:availability/x:p', namespaces = {'x':self.tei})[0]
if source in self.licenses:
resp_element.text = self.licenses[source]['resp']
source_element.text = self.licenses[source]['url']
license = license_element.getparent()
license.remove(license_element)
license_content = etree.fromstring(self.licenses[source]['license'])
license.append(license_content)
else:
resp_element.text = self.licenses['Wurm']['resp']
license = license_element.getparent()
license.remove(license_element)
license_content = etree.fromstring(self.licenses['Wurm']['license'])
license.append(license_content)
pass
def create_tei(self,title,text_id,preparation,source):
"""Create a TEI lite file from a wiki recipe."""
# get the template
tei = self.read_xml(self.tei_template)
# teiHeader
self.generate_teiheader(tei,title,source)
# generate text
self.generate_text(tei,title,text_id,preparation)
teiasstring = etree.tostring(tei,encoding='utf-8').decode()
teiasstring = re.sub(r'><',r'>\n<',teiasstring)
parser = etree.XMLParser(remove_blank_text=True)
otei = etree.ElementTree(etree.XML(teiasstring,parser))
otei.write(os.path.join(self.xmldir,text_id+'.xml'),encoding='utf-8',pretty_print=True,xml_declaration=True)
return(otei)
def add_metadata(self,text_id,title,source,year):
"""Add metadata instances to a data structure."""
collection = 'historical'
decade = str((int(year)//10)*10)
def get_period(year):
p1 = year[:2]
p2 = year[2:]
if int(p2) < 50:
p2 = '00'
elif int(p2) >= 50:
p2 = '50'
return p1+p2
period = get_period(year)
self.metadata[text_id] = {
'title':title,
'year':year,
'decade':decade,
'period':period,
'source':source,
'collection':collection
}
pass
def extract_info(self,text_id):
source, year, id = text_id.split('_')
text_id = ud.unidecode(source).lower()+'_'+id
return(text_id,source,year)
def create_metadata(self):
outpath = os.path.join(self.metadir,'historical-metadata.csv')
df = pd.DataFrame(self.metadata).transpose()
df.to_csv(outpath, sep = '\t')
pass
def main(self):
# open wikidump file
self.infiles = self.get_files(self.indir, '*.vrt')
self.total = len(self.infiles)
self.success = 0
for infile in self.infiles:
file_id = os.path.splitext(os.path.basename(infile))[0]
print(file_id)
inxml = self.read_xml(infile)
# clean the preparation and get the title
title, preparation = self.get_preparation(inxml)
# get info from file name: text_id, source, year, subid
text_id,source,year = self.extract_info(file_id)
# # for tei: title in tei header, author, availability (license), sourceDesc, div/head/p
tei = self.create_tei(title,text_id,preparation,source)
self.add_metadata(text_id,title,source,year)
self.success += 1
# save metadata
self.create_metadata()
pass
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", help="path to the input folder.")
parser.add_argument("-x","--xml", help="output directory for TEI/XML files.")
parser.add_argument("-m","--meta", help="output directory for the metadata file.")
args = parser.parse_args()
noneargs = [x for x in args.__dict__.values()].count(None)
if noneargs == 3:
print("Running in test mode!")
self.indir ='test/historical/source'
self.xmldir ='test/historical/tei'
self.metadir ='test/metadata'
elif noneargs < 3 and noneargs > 0:
options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
options = ', '.join(options)
exit_message = '\n'.join(["You forgot option(s): {}".format(options),
"Provide no option to run in test mode: 'python3 {}'".format(os.path.basename(__file__)),
"Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
)
sys.exit(exit_message)
else:
self.indir = args.input
self.xmldir = args.xml
self.metadir = args.meta
pass
print(WikiExtractor())<file_sep># Saarbrücken Cookbook Corpus: a recipe for a diachronic study *à la CLARIN-D*

This is the repository for the tutorial [Saarbrücken Cookbook Corpus: a recipe for a diachronic study *à la CLARIN-D*](http://chozelinek.github.io/sacoco).
This **tutorial** will show you step-by-step how to use the **CLARIN-D infrastructure** to **compile** a diachronic corpus of German cooking recipes (the [**Sa**arbrücken **Co**okbook **Co**rpus](http://hdl.handle.net/11858/00-246C-0000-001F-7C43-1)). Afterwards, you will learn how to **exploit** this resource to discover how the conative function has evolved in this register during the last centuries.
## Contents
- `data/`, SaCoCo's corpus data used for this showcase.
- `historical/`
- `source/`
- `index_files/`, folder for CSS, JS, images, figures...
- `img/`, images and other graphic material used in the tutorial.
- `results/`, csv files containing CQP output.
- `test/`, testing material used for the development of the tutorial and the scripts. It mimics the data folder structure. (Use this to check how the scripts work.)
- `contemporary/`
- `meta/`, VRT files with metadata as structural attributes
- `source/`, source XML excerpt
- `tei/`, TEI files
- `vrt/`, VRT files after WebLicht
- `historical/`
- `meta/`, VRT files with metadata as structural attributes
- `source/`, source XML excerpt
- `tei/`, TEI files
- `vrt/`, VRT files after WebLicht
- `metadata/`, folder containg to CSV files generated from raw input, and `.meta` file for CQPweb.
- `utils/`, files used by the scripts: WebLicht tool chains in XML format, XML templates, Relax NG schemas for validation...
- `README.md`, a file describing the contents of the repo.
- `index.html`, HTML version of the tutorial.
- `index.Rmd`, step-by-step guide on how to compile and exploit the SaCoCo corpus to to answer a research question. This is the source code used to generate the `.html` version.
- `cqpwebsetup.html`, HTML version of the tutorial explaining how to set up the SaCoCo corpus for CQPweb.
- `cqpwerbsetup.html`, the source code to generate the `.html` version.
- `SaCoCo.bib`, SaCoCo's bibliography in `.bib` format.
- `sacoco.cqp`, CQP script.
- `metadata4cqpweb.py`, a script to convert extracted metadata in CSV form into the format suitable for CQPweb.
- `texts2corpus.py`, a script to concatenate all texts in one singe XML file.
- `waaswrapper.py`, a script wrapping WebLicht as a Service to process big amounts of data.
- `wikiextractor.py`, a script to extract German recipes from a wiki dump.
- `xmlextractor.py`, a script to transform historical recipes into TEI Lite.
- `requirements.txt`, Python 3 dependencies.
- `_output.yaml`, configuration for markdown to html transformations
## How to contribute
If you find a bug, a spelling mistake, or you want to share different or better solutions, you're more than welcome to submit a pull request with changes to the tutorial materials.
The HTML file of the tutorials are generated using Rmarkdown. Accordingly, the best way to contribute to the tutorial itself is to update the `.Rmd` file, rather than the `.html` files.
<file_sep># -*- coding: utf-8 -*-
import sys
import os
import glob
import codecs # to handle properly unicode
import re # to use regular expressions
import argparse # to parse command-line arguments
import time
import fnmatch
import math
import pandas as pd
#===============================================================================
# Import XML module
#===============================================================================
from lxml import etree
#===============================================================================
# Following code block is only needed if lxml is not used as the parser
#===============================================================================
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args,**kw)
te = time.time()
# print '%r (%r, %r) %2.2f sec' % \
# (method.__name__, args, kw, te-ts)
print('%r %2.2f sec' % \
(method.__name__, te-ts))
return result
return timed
class MetadataForCqpWeb(object):
'''
Instantiate a class whose name is built from CL arguments
'''
@timeit
def __init__(self):
self.cli()
self.success = 0
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.main()
def __str__(self):
if self.success > 0:
message = [
"{} files processed!".format(str(self.success)),
"Guten Appetit!"
]
else:
message = [
"{} files processed!".format(str(self.success)),
"Ups! Maybe something went wrong!"
]
return " ".join(message)
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", nargs='+', help="input files.")
parser.add_argument("-o","--output", help="output file.")
parser.add_argument("-c","--columns", nargs='+', help="columns to be extracted.")
args = parser.parse_args()
noneargs = [x for x in args.__dict__.values()].count(None)
if noneargs == 3:
print("Running in test mode!")
# self.infiles = ['test/metadata/contemporary-metadata.csv']
self.infiles = ['test/metadata/contemporary-metadata.csv','test/metadata/historical-metadata.csv']
self.outfile = 'test/metadata/sacoco.meta'
self.columns = ['year','decade','period','collection','source', 'title']
elif noneargs > 0 and noneargs < 3:
options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
options = ', '.join(options)
exit_message = '\n'.join(["You forgot option(s): {}".format(options),
"Provide no option to run in test mode: 'python3 {}'".format(os.path.basename(__file__)),
"Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
)
sys.exit(exit_message)
else:
self.infiles = args.input
self.outfile = args.output
self.columns = args.columns
self.outdir = os.path.split(self.outfile)[0]
pass
def get_files(self, directory, fileclue):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def main(self):
# merge files in one data frame
if len(self.infiles) == 1:
# read the file
df = pd.read_csv(self.infiles[0], sep = '\t')
self.success = 1
elif len(self.infiles[1:]) >= 1:
# read the first
df = pd.read_csv(self.infiles[0], sep = '\t')
self.success = 1
# read the rest
for file in self.infiles[1:]:
# read file
newdf = pd.read_csv(file, sep = '\t')
# concatenate to first
newdf = newdf.reset_index(drop=True)
df = pd.concat([df, newdf], axis=0)
self.success += 1
# filter the columns we want
df = df.rename(columns = {'Unnamed: 0':'text_id'})
df = df[['text_id']+self.columns]
df.to_csv(self.outfile, sep = '\t', index = False, header = False)
print(MetadataForCqpWeb())<file_sep>---
title: 'Saarbrücken Cookbook Corpus: a recipe for a diachronic study *à la CLARIN-D*'
author: "Universität des Saarlandes"
date: "29 January 2016"
output: html_document
bibliography: SaCoCo.bib
---
[](http://hdl.handle.net/11858/00-246C-0000-001F-7C43-1)
This tutorial will show you step-by-step how to use the CLARIN-D infrastructure to compile a diachronic corpus of German cooking recipes. Afterwards, you will learn how to exploit this resource to discover how the conative function has evolved in this register during the last centuries.
In order to reproduce this showcase successfully, you will need to satisfy the following requirements:
- a Mac, Linux or Windows operating system (tested on Mac OS X, Linux Ubuntu)
- [libxml2 and libxslt](<http://www.xmlsoft.org/downloads.html>)
- 7z
- Python 3, and some packages (`pip3 install -r requirements.txt`)
- lxml
- pandas
- regex
- requests
- R, and some packages
- reshape2
- dplyr
- ggplot2
- internet connection
You also need the materials. Go to our GitHub [repo](https://github.com/chozelinek/sacoco) and clone it.
Ready? Steady! Go!
# Corpus compilation
A corpus is a collection of texts in electronic format. We distinguish three main steps in the process of compiling an electronic corpus:
1. data acquisition and preprocessing
1. linguistic annotation with WebLicht
1. corpus encoding for CQPweb
We have two different sources of data:
- contemporary
- historical
The **historical** recipes were transcribed and digitised manually by <NAME>. We complemented this data set with some transcriptions done by Gloning et al.[^gloning]. In parallel, we obtained a set of **contemporary** recipes from a wiki site devoted to cooking recipes [kochwiki.org](http://www.kochwiki.org/wiki/Hauptseite). Luckily, a XML dump of this site is available at the [Internet Archive](https://archive.org/download/wiki-rezeptewikiorg).
[^gloning]: <http://www.staff.uni-giessen.de/gloning/kobu.htm>
Due to the different nature of our historical and contemporary datasets, the corpus compilation methodology, although following a similar outline, is slightly different.
## Data acquisition and preprocessing {.tabset .tabset-fade .tabset-pills}
Our goal at this stage is to obtain the data in digital form. And afterwards, preprocess the material to obtain a homogeneous minimalist TEI/XML format, that we can easily integrate in our pipeline, namely: WebLicht and CQP.
### Contemporary data
Download a wiki dump from <https://archive.org/download/wiki-rezeptewikiorg>. The file to be downloaded from the archive is 19.8M and gets huge when extracted (1.21G). That's the reason why we don't include it. The GitHub repository you downloaded includes a smaller test file, so you don't have to download the original file for testing, if you don't want to.
```bash
# download the dump to the data/contemporary/source folder
wget -P data/contemporary/source https://archive.org/download/wiki-rezeptewikiorg/rezeptewikiorg-20140325-history.xml.7z
# unzip the file
7z x -odata/contemporary/source data/contemporary/source/rezeptewikiorg-20140325-history.xml.7z rezeptewikiorg-20140325-history.xml
```
> TIP: if you are just testing, you can skip this step. You can find an excerpt of this file in the `test/conteporary/source/` folder.
The size of the extracted file can give you a slight idea of the daunting task of extracting information manually from this file. Thus, we use a python script instead (`wikiextractor.py`) to automatically structure the data and extract the following information:
- a minimal **TEI/XML** file for each recipe containing:
- title, and
- cooking instructions (only the section where the actual cooking procedure is described, no comments, no history of the dish...)
- a CSV file containing metadata for each page such as:
- authors
- ingredients
- tools
- methods
- cuisines
- URL
The input for this script is the huge file `rezeptewikiorg-20140325-history.xml`. It contains thousands of `page` nodes, their `revisions` and the actual `texts`. See an example page below.
```xml
<page>
<title>"Krömpele"-Suppe</title>
<ns>0</ns>
<id>46526</id>
<sha1>rhhwusxi5j205lgcktz71ncz5s12gwu</sha1>
<revision>
<id>262379</id>
<timestamp>2013-10-30T15:27:50Z</timestamp>
<contributor>
<username>CTHOE</username>
<id>927</id>
</contributor>
<comment>Neu angelegt</comment>
<text xml:space="preserve" bytes="1851">{{Rezept|
| Menge = 4 Personen
| Zeit = 30–40 Minuten
| Schwierigkeit = leicht
| Alkohol = nein
| Vegetarisch = nein
| Bild = Kein_Bild.png
|}}
== Zutaten ==
* 175 g [[Zutat:Mehl|Mehl]], gesiebt
* 2–3 [[Zutat:Ei|Eier]]
* 1 Pr. [[Zutat:Salz|Salz]]
* 500 ml [[Zutat:Fleischbrühe|Fleischbrühe]]
* 250 g [[Zutat:Schinkenspeck|Schinkenspeck]]
* frisch geriebener [[Zutat:Muskat|Muskat]]
* 2–3 EL [[Zutat:Schnittlauch|Schnittlauch]]
== Kochgeschirr ==
* 1 [[Zubereitung:Küchenbrett|Küchenbrett]]
* 1 [[Zubereitung:Topf|Topf]]
* 1 [[Zubereitung:Pfanne|Pfanne]]
== Zubereitung ==
* Schnittlauch in kleine Röllchen [[Zubereitung:schneiden|schneiden]]
* Gewürze, Mehl und Eier mit etwas Wasser zu einem dickflüssigen Teig verrühren
* Unter Umständen muss etwas Mehl oder Wasser dazugegeben werden, um die richtige Konsistenz des Teiges zu erreichen
* Den Teig zu großen ''Krömpele'' (Krümel) mit den Händen verreiben
* Etwa 1 l Wasser mit der Brühe [[Zubereitung:aufkochen|aufkochen]]
* Hierin die ''Krömpele'' leicht [[Zubereitung:köcheln|köchelnd]] in etwa 15 Minuten [[Zubereitung:garziehen|garziehen]] lassen
* Zwischenzeitlich den Speck fein [[Zubereitung:würfeln|würfeln]] und goldbraun [[Zubereitung:ausbraten|ausbraten]]
* Speckwürfel in die Suppe schütten, [[Zubereitung:abschmecken|abschmecken]] und mit reichlich Schnittlauchröllchen [[Zubereitung:garnieren|garnieren]] und [[Zubereitung:anrichten|anrichten]]
[[Kategorie:Thüringer Küche]]
[[Kategorie:Nocken]]
[[Kategorie:Vorspeisen]]
[[Kategorie:Suppen]]</text>
</revision>
</page>
```
The script does the following:
1. opens the input XML file
1. gets all `page` nodes
1. filters those recipes corresponding to German speaking regions only
1. for each of those recipes gets the last revision
1. extracts:
1. revision ID
1. page ID
1. year of last revision
1. cuisine
1. authors
1. ingredients
1. tools
1. methods
1. title
1. text with the instructions
1. title and text are saved as a TEI XML file (`data/contemporary/tei`)
1. metadata are saved in a CSV file (`data/metadata/contemporary-metadata.csv`)
To run the script you need to run the following commands from the terminal:
```bash
# run the script
python3 wikiextractor.py -i data/contemporary/source/rezeptewikiorg-20140325-history.xml -x data/contemporary/tei -m data/metadata
```
> TIP: for development/testing purposes, if you just run `python3 wikiextractor.py`, it will work on the testing dataset stored in the `test` folder.
An example for the result TEI files is `wiki_188908.xml` given below (I would include this as an example in the test directory, also a sample metadata file):
```xml
<?xml version='1.0' encoding='UTF-8'?>
<?xml-model href="http://www.tei-c.org/release/xml/tei/custom/schema/relaxng/tei_lite.rng" type="application/xml" schematypens="http://relaxng.org/ns/structure/1.0"?>
<TEI xmlns="http://www.tei-c.org/ns/1.0" xml:lang="de">
<teiHeader>
<fileDesc>
<titleStmt>
<title>Räucherfischmousse im Knusperröllchen auf Gurken-Rahmsalat</title>
<author><NAME></author>
<respStmt>
<resp/>
<name/>
</respStmt>
</titleStmt>
<publicationStmt>
<publisher>Universität des Saarlandes</publisher>
<pubPlace>Saarbrücken</pubPlace>
<availability status="free">
<p>Published under a <ref target="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons Attribution ShareAlike 3.0 License</ref>.</p>
</availability>
<date>2016</date>
</publicationStmt>
<sourceDesc>
<p>http://www.kochwiki.org/w/index.php?oldid=188908</p>
</sourceDesc>
</fileDesc>
</teiHeader>
<text xml:id="wiki-188908">
<body>
<div n="1" type="recipe">
<head>Räucherfischmousse im Knusperröllchen auf Gurken-Rahmsalat</head>
<div n="2" type="contents">
<head>Räucherfischmousse</head>
<p>Das Saiblingsfilet entgräten und in grobe Stücke schneiden. Den Fischfond in einem Topf aufkochen. Die Speisestärke in wenig Wasser glatt rühren, den Fond damit abbinden und auskühlen lassen. Dann die Flüssigkeit mit den Räucherfischstücken in den Mixaufsatz der Küchenmaschine füllen und pürieren "(Falls kein Mixaufsatz oder Küchenmaschine vorhanden einen Zauberstab verwenden)". Die Gelatine in kaltem Wasser einweichen. Einen Topf mit zwei EL Wasser erwärmen und die gut ausgedrückte Gelatine darin auflösen. Während dessen die Schlagsahne halb fest aufschlagen. Die Fischmasse in eine Schüssel füllen und mit der Gelatine sowie etwa der Hälfte des Schlagobers gut vermengen. Dann die restliche Schlagsahne locker unterheben. Das Räucherfischmousse mit Salz sowie Pfeffer abschmecken. Die fertige Fischfüllung mit Klarsichtfolie abdecken und für mindestens 1/2 Stunde im Kühlschrank kalt stellen.</p>
</div>
</div>
</body>
</text>
</TEI>
```
And this is just an example of a few instances of the metadata file:
| | source | year | title | authors | categories | ingredients | methods | tools
---|---|---|---|---|---|---|---|---
wiki-142256 | wiki | 2010 | <NAME> mit saurer Sahne | Vran01, Jozeil | Schweizer Küche | Sahne, Salz, Mehl, Butter | | Schüssel, Küchenwaage, Frischhaltefolie
wiki-150044 | wiki | 2010 | Punschglasur | Jozeil | Österreichische Küche | Eiweiß, Zucker, Orangensaft, Rum | Glasieren | Schüssel, Schneebesen
wiki-158731 | wiki | 2010 | Riebelesuppe | Vran01, Hombre, Jozeil, <NAME> | Schwäbische Küche | Weizenmehl, Brühwürfel, Ei, Salz, Pfeffer, Meersalz | Abschmecken | Schüssel, Topf, Küchenreibe
You can check from the command line if the TEI files are alright:
```bash
for i in data/contemporary/tei/*.xml; do xmllint --noout --relaxng utils/tei_lite.rng $i; done
```
> TIP: for development/testing purposes, just switch the input folder:
```bash
for i in test/contemporary/tei/*.xml; do xmllint --noout --relaxng utils/tei_lite.rng $i; done
```
### Historical data
As a starting point we take the materials in `data/historical/source`. Our goal is to generate a TEI Lite XML for each recipe, and extract the metadata.
The script `xmlextractor.py` will help us with the task of normalizing our data.
`xmlextractor.py`:
1. gets all XML files in the input folder
1. for each file
1. extracts metadata:
1. text ID
1. year
1. authors
1. source
1. title
1. text with the instructions
1. cleans the text from previous annotations
1. adds source and appropriate license to the text
1. title and text are saved as a TEI XML file (`data/historical/tei`)
1. metadata are saved in a CSV file (`data/metadata/historical-metadata.csv`)
To run the script you need to run the following commands from the terminal:
```bash
# run the script
python3 xmlextractor.py -i data/historical/source -x data/historical/tei -m data/metadata
```
> TIP: for development/testing purposes, if you just run `python3 xmlextractor.py`, it will work on the testing dataset stored in the `test` folder.
You can check from the command line if the TEI files are alright:
```bash
for i in data/historical/tei/*.xml; do xmllint --noout --relaxng utils/tei_lite.rng $i; done
```
> TIP: for development/testing purposes, just switch the input folder:
```bash
for i in test/historical/tei/*.xml; do xmllint --noout --relaxng utils/tei_lite.rng $i; done
```
## Data processing with WebLicht
In the previous section, we have seen how to *shape* our data. Once that we have a homogeneous format for both collections, we can start to process the texts with [**WebLicht**](http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/index.php/Main_Page).
We have to process the two collections (historical and contemporary) separately in WebLicht as we need two slightly different pipelines. In both cases, we have to perform the following steps:
1. design a chain in WebLicht
1. authenticate
1. build a tool chain
1. process all recipes with this chain using WaaS (WebLicht as a Service)
1. get an API key for WaaS
1. use a python wrapper to interact with WebLicht
### Logging into WebLicht
We use the Shibboleth Authentication service to log in WebLicht. We will need an identity from a CLARIN identity provider. If your institution is not such a provider you can request an account from the CLARIN provider.
1. Visit the [WebLicht Wiki](http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/index.php/Main_Page), scroll down to the bottom of the page and click on the blue button to `Start WebLicht`.
1. The Shibboleth Authentication service will load. Choose your identity provider (`clarin.eu website account` if you are using a CLARIN account).
1. You will be redirected to an institutional page where you have to provide your user and password.
1. If everything is OK, WebLicht's welcome page will be loaded.
This video prepared by our colleagues at Tübingen precisely illustrate the logging process:
<video width="640" height="360" autobuffer controls preload="auto"><source src="http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/extensions/HTML5video/videos/WebLichtLogin.mp4" type="video/mp4"/><source src="http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/extensions/HTML5video/videos/WebLichtLogin.ogv" type="video/ogg"/><source src="http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/extensions/HTML5video/videos/WebLichtLogin.webm" type="video/webm"/></video>
If you run into problems, read the [FAQ explaining how to log in to WebLicht](http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/index.php/FAQ#Logging_In).
### Building the tool chain to process the data {.tabset .tabset-fade .tabset-pills}
#### Contemporary tool chain
1. Click on `New Chain`.
1. A window will pop-up.
1. There are 3 input modes: click on the rightmost button `Browse`.
1. Choose `utils/tcf_example.xml`.
1. Click on OK.
1. Choose the tools:
1. Berlin: Tokenizer and Sentence
1. Berlin: Part-of-Speech Tagger
1. Download the chain by clicking on `Download chain`
1. Save the XML file as `chain_contemporary.xml` in the folder `utils` of our repository.
For information on how to design a tool chain you can also watch the following video.
<video width="640" height="360" autobuffer controls preload="auto"><source src="http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/extensions/HTML5video/videos/SimpleToolChain.mp4" type="video/mp4"/><source src="http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/extensions/HTML5video/videos/SimpleToolChain.ogv" type="video/ogg"/><source src="http://weblicht.sfs.uni-tuebingen.de/weblichtwiki/extensions/HTML5video/videos/SimpleToolChain.webm" type="video/webm"/></video>
#### Historical tool chain
The process is exactly the same as we used before.
1. Click on `New Chain`.
1. A window will pop-up.
1. There are 3 input modes: click on the rightmost button `Browse`.
1. Choose `utils/tcf_example.xml`.
1. Click on OK.
1. Choose the tools:
1. Berlin: Tokenizer and Sentence
1. Berlin: CAB historical text
1. Download the chain by clicking on `Download chain`
1. Save the XML file as `chain_historical.xml` in the folder `scripts` of our repository.
### Using WebLicht as a service {.tabset .tabset-fade .tabset-pills}
We could now process our texts directly through the user-friendly WebLicht GUI. However, if you have thousands of recipes to be annotated, it is more efficient to use [**WaaS**](https://weblicht.sfs.uni-tuebingen.de/WaaS/) (WebLicht as a Service) to execute our WebLicht chains.
> WebLicht as a Service (WaaS) is a REST service that executes WebLicht chains. This allows you to run WebLicht chains from your UNIX shell, scripts, or programs.
It means that we can write a script to automatize our interaction with WebLicht!
We need at least two things:
1. a WebLicht chain
1. an API key (a kind of "password" to be passed to WaaS)
We already have our WebLicht chains. For the second, go to the [WaaS home page](https://weblicht.sfs.uni-tuebingen.de/WaaS), click on the rightmost menu item at the top of the page called `API Key`. You will be redirected to the already familiar authentication page, choose your institution (`clarin.eu website account` for us), provide your credentials and a new page will load. If you click on the button `Generate` a long string will appear where it reads `Your API key`. Copy the key in a safe place and treat it like it was a password.
Time to actually process our XML files with `WaaS`!
#### Contemporary recipes
We have created a python script to process the recipes with WaaS (`weblichtwrapper.py`).
The goal of the script is to process all TEI/XML files in a folder with WebLicht and save the results in VRT files for their encoding as a corpus for the Corpus Workbench (CWB).
The input is typically a folder with the TEI/XML files we created in previous sections. But in fact we could use any XML file.
The script does the following:
1. gets a list of all files to be transformed
1. finds all nodes containing text to be processed
1. sends a request to WaaS to process the text of a node with the provided chain
1. converts the WaaS response in TCF format to VRT
1. saves the VRT files in the target directory
To run the script you need to invoke the following commands from the terminal
```bash
python3 waaswrapper.py -i data/contemporary/tei -c utils/chain_contemporary.xml -o data/contemporary/vrt
```
Then, you will be prompted to provide your API key.
> TIP: for development/testing purposes, if you just run `python3 waaswrapper.py -t contemporary`, it will work on the testing dataset stored in the `test` folder.
You can get more information on the parameters this script takes by running:
```bash
python3 waaswrapper.py -h
```
The output is a VRT file (one token per line and positional attributes separated by tabs).
```xml
<?xml version='1.0' encoding='UTF-8'?>
<text id="wiki_244969">
<p>
<s>
Das ART d
Brot NN Brot
in APPR in
ca. ADV ca.
1 CARD 1
cm NN Cm
große ADJA groß
Würfel NN Würfel
schneiden VVFIN schneiden
. $. .
</s>
<s>
Die ART d
Sonnenblumenkerne NN Sonnenblumenkern
in APPR in
einer ART eine
Pfanne NN Pfanne
ohne APPR ohne
Öl NN Öl
anrösten VVINF anrösten
und KON und
fein ADJD fein
reiben VVINF reiben
. $. .
</s>
<s>
Mit APPR mit
Sonnenblumenkernen NN Sonnenblumenkern
, $, ,
Stachelbeeren NN Stachelbeere
sowie KON sowie
Minze NN Minze
garnieren VVINF garnieren
und KON und
heiß ADJD heiß
servieren VVINF servieren
. $. .
</s>
</p>
</text>
```
#### Historical recipes
The procedure is exactly the same, the only differences are: the location of the input files, and the chain to be used.
```bash
python3 waaswrapper.py -i data/historical/tei -c utils/chain_historical.xml -o data/historical/vrt
```
> TIP: for development/testing purposes, if you just run `python3 waaswrapper.py -t historical`, it will work on the testing dataset stored in the `test` folder.
## Corpus encoding for CQPweb
We are going to encode our corpus for the [IMS Open Corpus Workbench](http://cwb.sourceforge.net) (a tool initially developed at IMS Stuttgart). This tool will allow us to query the corpus, making the most of the annotation we have obtained with WebLicht.
The CWB expects XML files where two kind of attributes can be added:
- structural (equivalent to XML attributes, and they affect to regions of tokens)
- positional, to add multiple layers of information at token level
In the previous section we created the VRT files with the required positional information. Now, we will complete the annotation by adding structural attributes to the text element from the metadata we stored in a CSV file.
We will have to prepare our data for CQPweb through a series of very simple steps:
1. adding metadata to the VRT files
1. generate a metadata file for CQPweb
### Add metadata to the VRT files {.tabset .tabset-fade .tabset-pills}
To add the metadata as structural attributes, we need:
- VRT files
- metadata as CSV
- a script (`addmetadata.py`)
`addmetadata.py`:
1. obtains of a list of all files to be transformed
1. parses the metadata
1. finds all nodes where the metadata fields will be added as attributes
1. adds corresponding metadata to each node using the `text ID` as key
1. saves the VRT files in the target directory
The output should look like this:
```xml
<?xml version='1.0' encoding='UTF8'?>
<text id="wiki-200141" year="2011" period="2000" authors="NikiWiki|Hombre|Jozeil" decade="2010" title="Bärlauchnockerl" methods="hacken|Abschmecken|anrichten" ingredients="Muskatnuss|Pfeffer|Sauerrahm|Salz|Schmand|Bärlauch|Gelatine" collection="contemporary" cuisines="Oberösterreichische Küche" source="wiki" tools="Küchenreibe|Schlagkessel|Schüssel|Frischhaltefolie|Schneidebrett|Löffel|Messer|Zauberstab|Küchenmaschine">
<p>
<s>
Den ART d
Bärlauch NN Bärlauch
fein ADJD fein
hacken VVINF hacken
. $. .
</s>
</p>
</text>
```
#### Add metadata to the contemporary recipes
We use `addmetadata.py` Python script by running the following command:
```bash
python3 addmetadata.py -i data/contemporary/vrt -m data/metadata/contemporary-metadata.csv -o data/contemporary/meta
```
> TIP: for development/testing purposes, if you just run `python3 addmetadata.py -t contemporary`, it will work on the testing dataset stored in the test folder.
#### Add metadata to the historical recipes
We need to run the command also on the historical recipes indicating the corresponding metadata file, the location of the input files, and the path for the output.
```bash
python3 addmetadata.py -i data/historical/vrt -m data/metadata/historical-metadata.csv -o data/historical/meta
```
> TIP: for development/testing purposes, if you just run `python3 addmetadata.py -t historical`, it will work on the testing dataset stored in the test folder.
### Generate the metadata file for CQPweb
CQPweb helps us to calculate distributions across different subcorpora. Typically, these subcorpora are the result of splitting our corpus according to some variables contained in the metadata. To achieve this we only need to pass a metadata file once, containing the value of the fields we are interested in for each text.
We have already generated two metadata tables:
- contemporary
- historical
We will merge them and will extract only those fields whose distributions shall be displayed in CQPweb, namely:
- year
- decade
- period
- collection
Moreover, we will add the source and the title of the recipe as a free text field (they won't be used for the distributions).
To get this file we use the script `meta2cqpweb.py`.
`meta2cqpweb.py`:
- gets all input files
- for each file:
- extracts relevant columns
- concatenates info from all files
- saves the output as tab-separated plain text file
We use the following command:
```bash
python3 metadata4cqpweb.py -i data/metadata/contemporary-metadata.csv data/metadata/historical-metadata.csv -o data/metadata/sacoco.meta -c year decade period collection source title
```
> TIP: for development/testing purposes, if you just run `python3 metadata4cqpweb.py`, it will work on the testing dataset stored in the test folder.
## Set up a corpus in CQPweb
We have all materials needed to set up a corpus in CQPweb:
- the texts in VRT format
- a metadata file
Now, you need to have access to a CQPweb installation as administrator. There are different options to get CQPweb running, listed in decreasing order of difficulty:
- [install your own CQPweb](http://cwb.sourceforge.net/cqpweb.php#cqpweb):
- on your computer, only you have access to the corpus
- on a server, you can share it with other people
- PROS:
- you have maximum control
- you can share with other people if it is installed on a server
- CONS:
- difficult to install, you need expert knowledge to admin a LAMP stack (Apache, MySQL, PHP), check the [administrator's manual](http://cwb.sourceforge.net/files/CQPwebAdminManual.pdf)
- use [CQPwebInABox](http://cwb.sourceforge.net/cqpweb.php#inabox):
- PROS:
- no installation required, just run a Virtual Machine
- its usage is well documented
- CONS:
- you cannot share your corpus with others
- resource intensive, you will be running a Virtual Machine
- you will have to get familiar with *Lubuntu*
- use [our CQPweb installation](https://fedora.clarin-d.uni-saarland.de/cqpweb):
- PROS:
- you don't have to cope with this section
- you can share your corpus with others
- CONS:
- you have to give us the corpus and the metadata in the right format (but... wait! You have just learnt how to do it!)
- we work together to *clarinify* the resource (not too bad either, see the section on [*clarinifying*](#integration-of-the-resource-in-the-clarin-d-infrastructure) a corpus).
If you don't fulfill all this requirements and/or you don't have experience enough, do not worry. Just jump to the section on [*clarinifying*](#integration-of-the-resource-in-the-clarin-d-infrastructure) and leave the gory details for us.
Nevertheless, we document all the steps to get SaCoCo encoded and installed in CQPweb under a separate cover. Check [CQPweb setup tutorial](cqpwebsetup.html).
## Integration in the CLARIN-D infrastructure
We have created our resource. Now, we can *clarinify* it by:
- getting a [PID](http://www.clarin.eu/content/persistent-identifiers) (Persistent IDentifier) for the corpus
- providing the metadata in [CMDI](http://www.clarin.eu/node/3219) format
- depositing the data and the metadata in a [repository](http://www.clarin.eu/content/depositing-services)
- making it harvestable by the [VLO](http://www.clarin.eu/content/virtual-language-observatory)
- aggregating it to the [FCS](http://weblicht.sfs.uni-tuebingen.de/Aggregator/)
The Universität des Saarlandes as a CLARIN Centre B has the staff and the resources to help you *clarinify* your data. Check [how to deposit data in our repository](http://fedora.clarin-d.uni-saarland.de/depositors.en.html).
Afterwards, your data will be like SaCoCo:
- deposited in a DSA awarded [repository](http://fedora.clarin-d.uni-saarland.de/index.en.html)
- findable in the [VLO](https://vlo.clarin.eu/search?2&q=sacoco)
- searchable through [Federated Content Search](http://weblicht.sfs.uni-tuebingen.de/Aggregator/)
- citable thanks to its PID [hdl:11858/00-246C-0000-001F-7C6F-1](http://hdl.handle.net/11858/00-246C-0000-001F-7C43-1)
# Corpus exploitation
Our diachronic corpus of cooking recipes in German is now ready to be used. We will proceed as follows:
1. We will pose our research question.
1. We will design the operationalisation of this research question.
1. We will actually extract the features with CQPweb.
1. We will visualize and analyse the data with CQPweb/R.
If you want to reproduce every step that we will show you below, you will need to become a user for our [CQPweb installation](https://fedora.clarin-d.uni-saarland.de/cqpweb).
Go to this [URL](https://fedora.clarin-d.uni-saarland.de/cqpweb/usr/?thisQ=create&uT=y) to create a new account. Follow the instructions, and you will have access to SaCoCo in a few minutes.
## Research question
> Has the realisation of the conative function evolved over time in the cooking recipe register?
Our hypothesis is:
> Contemporary cooking recipes show lower linguistic means to address the reader directly than historical ones.
Wurm already discovered that historical texts showed differences in the way the author addressed the reader.
## Operationalisation
> In research design, [...] operationalization is a process of defining the measurement of a phenomenon that is not directly measurable, though its existence is indicated by other phenomena.
We know that German can use different means to convey the conative function. Among them we can trace pronominal and verbal cues:
- pronominal
- second person personal pronouns (direct)
- indefinite pronouns (indirect)
- verbal
- imperatives (direct)
- infinitives (indirect)
Of course, there are more features that could help us to describe this phenomenon better. Can you think of them? How would you operationalise them? Contributions to extend this tutorial are welcome!!!
The next step is to design how we can retrieve these features in a systematic and effective way making use of the linguistic annotation that we have added with WebLicht.
Basically, we will quantify how many instances of these features can be found per text. First, we need to find the instances, then, we will count them. And, finally, we will describe the results and check if the historical recipes significantly differ from their contemporary counterparts.
### Personal pronouns
Second person pronouns are a pronominal indicator of the writer's overt intentions to engage directly with the reader.
- *irreflexives Personalpronomen*
- *substituirendes Possessivpronomen*
- *attribuirendes Possessivpronomen*
- *reflexives Personalpronomen*
### Indefinite pronouns
Indefinite pronouns like *man*, *jemand*, etc. are a pronominal resource that writers can use to avoid addressing the reader directly, but still use active voice forms.
- *substituierendes Indefinitpronomen*
### Imperatives
The imperative is a verbal device that addresses the reader directly; it is an order.
- *Imperativ*
### Infinitives
The usage of infinitives is a strategy to convey verbal instructions without using the imperative in a more impersonal fashion.
- *Infinitiv*
## Feature extraction
OK, we know what we are looking for. Let's see how.
The CWB comes with a query language that enables the interrogation of large text collections using linguistic patterns to retrieve relevant information. We will use it to find the different features that we have discussed above.
Our next mission is to define the queries that will allow us to find the phenomena discussed above in our corpus.
### Queries
CQP is a corpus query language which resembles regular expressions, in the sense that one can define patterns aimed at capturing interesting information. The difference here is that we are not limited to write patterns only relying on word forms. We can combine any linguistic information like lemma and POS to construct more sophisticated patterns.
Here, we will only illustrate the queries used for personal pronouns. If you want to check all of them see file `sacoco.cqp`.
#### Personal pronouns
This macro is aimed at finding personal pronouns, second person. We will look for:
- personal pronouns, second person
- possessive pronouns, second person
- reflexive pronouns
##### Personal pronouns second person
- personal pronouns second person singular
- personal pronouns second person singular appended to a verbal form
- personal pronouns second person plural
This query aims at finding second person singular personal pronouns. We know that second person singular is a token whose lemma is `du`, and its PoS tag is `PPER`, or a token whose surface form can be `du`, `Du`, `tu`, `thu`, etc.
```bash
([lemma="du" & pos="PPER"] | [word="[d|t]h?u" %c])
```
You can check the results at <https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/concordance.php?qname=f0jfreteeu&uT=y>
This one looks for tokens ending in du/tu/thu...
```bash
[word=".+[t|d]h?u" %c]
```
It returns 35 matches in 24 different texts <https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/concordance.php?qname=f0jfsqg6x5&uT=y>
And, finally, this one just looks for tokens whose lemma is `ihr` and their PoS is `PPER`.
```bash
[lemma="ihr" & pos="PPER"]
```
This turns out to be a quite rare phenomenon, just 3 hits in the whole corpus <https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/concordance.php?qname=f0jglv8w52&uT=y>.
In order to see the development of the different features, you have to repeat the steps for all features.
Below you can see how to explore a combination of the three queries above in CQPweb and how to visualize the results in CQPweb and/or R.
## Exploration and visualization {.tabset .tabset-fade .tabset-pills}
### CQPweb
Now, let's get on with it!
First, you will need access to our [CQPweb installation](https://fedora.clarin-d.uni-saarland.de/cqpweb/).
Choose the corpus: in our case [SaCoCo](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/)
Run a query, e.g. [combining the queries for personal pronouns second person](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/concordance.php?theData=%28%28[lemma%3D%22du%22+%26+pos%3D%22PPER%22]%29+|+[word%3D%22[d|t]h%3Fu%22+%25c]%29+|+%28[word%3D%22.%2B[t|d]h%3Fu%22+%25c]%29+|+%28[lemma%3D%22ihr%22+%26+pos%3D%22PPER%22]%29&qmode=cqp&pp=50&del=begin&t=&del=end&uT=y)
```bash
([lemma="du" & pos="PPER"] | [word="[d|t]h?u" %c])
|
[word=".+[t|d]h?u" %c]
|
[lemma="ihr" & pos="PPER"]
```
The headline above the concordance gives you information on your query, the number of hits, in how many different texts the hits occur, etc.
**You can now explore the results more closely:**
Click on one of the instances to [get more context.](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/context.php?batch=0&qname=f0k6o1vuih&uT=y)
Click on the text ID at the beginning of each concordance line to [get information about the text.](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/textmeta.php?text=wiki_270173&uT=y)
**Use the powerful post-processing of CQPweb**, which is available as a drop-down-menu in the upper right corner:
Choose "Frequency breakdown" and click "Go" to get a [frequency list of your query.](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/redirect.php?redirect=breakdown&pp=50&qname=f0k6o1vuih&uT=y)
or Choose "Distribution" and click "Go" to get a distribution of the query results across the subcorpora, in our case, the development over time,
either as ["distribution table"](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/redirect.php?redirect=distribution&pp=50&qname=f0k6o1vuih&uT=y)
or as ["bar chart"](https://fedora.clarin-d.uni-saarland.de/cqpweb/sacoco/redirect.php?classification=__all&showDistAs=graph&crosstabsClass=__none&redirect=refreshDistribution&qname=f0k6o1vuih&pp=50&uT=y).
### R
So, in the last section we have seen how to use CQPweb to test our queries, improved them, and also save the results.
For reproducibility purposes, and to speed up the process, you can also interact with CQP from the command line.
If you managed to encode the corpus, you can extract all four features (2n person personal pronouns, indefinite pronouns, imperatives, infinitives) with a `sacoco.cqp`.
```bash
# create a directory to save the results
mkdir -p results/
# run the cqp script to get all the extractions
cqp -c < sacoco.cqp
```
In CQPweb we can see the result of a query at a time. But what if we want to get different representations?
Well, then you can use R for that. Let's describe very briefly our corpus and the results.
#### Corpus description
We will read the `results/meta.csv` which is a table where each row is a text, and the columns are from left to right: text ID, collection, period, decade, year, source.
```{r}
# import library to format output
library(knitr)
# read metadata
data = read.csv('results/meta.csv', sep = '\t', encoding = 'utf-8', header = F, strip.white = T)
# rename columns
names(data) = c('text_id','collection','period','decade','year','source')
# as factors
data$collection = as.factor(data$collection)
data$period = as.factor(data$period)
data$decade = as.factor(data$decade)
data$year = as.factor(data$year)
data$source = as.factor(data$source)
# print summary
summary(data)
```
Then we read the table for the number of tokens:
```{r, message=F}
# import library to manipulate tables
library(dplyr)
# we write a function to read CQP output
cqpReader = function(filename, feature, data){
# read file
df = read.csv(filename, sep = '\t', encoding = 'utf-8', header = F, strip.white = T)
# count the number of hits per text
df = group_by(df, V1) %>% summarise(feature = n())
# rename first column
names(df) = c('text_id',feature)
# merge with the original table
data = merge(data,df,'text_id',sort=T, all=T)
}
# read tokens
data = cqpReader('results/tokens.csv', 'tokens', data)
data[is.na(data)] = 0
```
Then, we get an overview of the size of our corpus by collection:
```{r}
texts_and_tokens_x_collection = group_by(data, collection) %>% summarise(texts = n(), tokens = sum(tokens))
kable(texts_and_tokens_x_collection, align = 'l')
```
By period:
```{r}
texts_and_tokens_x_period = group_by(data, period) %>% summarise(texts = n(), tokens = sum(tokens))
kable(texts_and_tokens_x_period, align = 'l')
```
By decade:
```{r}
texts_and_tokens_x_decade = group_by(data, decade) %>% summarise(texts = n(), tokens = sum(tokens))
kable(texts_and_tokens_x_decade, align = 'l')
```
<!-- We can show this with CQPweb -->
#### Results
We read the file for the personal pronouns:
```{r}
# read personal pronouns
data = cqpReader('results/pers2.csv', 'pers2', data)
# set NA cells to 0
data[is.na(data)] = 0
```
We can know check visually the differences between contemporary and historical recipes grouping the results by period. You probably have seen that samples are not equal in size. For this reason, we also calculated the relative frequency for each group:
```{r}
# load ggplot2 library to plot graphs
library(ggplot2)
# calculate the relative frequency for pers2
data.rel = group_by(data, period) %>% transform(pers2.rel = (pers2/tokens)*1000 )
# plot lines binding means
ggplot(data=data.rel, aes(x=period, y=pers2.rel, group=period)) +
stat_summary(fun.y=mean, geom="line", linetype = "dotted", aes(group = 1)) +
stat_summary(fun.y=mean, geom="point", size = 3, aes(shape = collection, colour = collection)) +
theme_bw() +
theme(legend.position="bottom")
```
We repeat the same procedure for indefinite pronouns:
```{r}
# read indefinite pronouns
data = cqpReader('results/pisp.csv', 'pisp', data)
data[is.na(data)] = 0
data.rel = group_by(data, period) %>% transform(pisp.rel = (pisp/tokens)*1000 )
ggplot(data=data.rel, aes(x=period, y=pisp.rel, group=period)) +
stat_summary(fun.y=mean, geom="line", linetype = "dotted", aes(group = 1)) +
stat_summary(fun.y=mean, geom="point", size = 3, aes(shape = collection, colour = collection)) +
theme_bw() +
theme(legend.position="bottom")
```
Imperatives:
```{r}
# read imperatives
data = cqpReader('results/vfimp.csv', 'vfimp', data)
data[is.na(data)] = 0
data.rel = group_by(data, period) %>% transform(vfimp.rel = (vfimp/tokens)*1000 )
ggplot(data=data.rel, aes(x=period, y=vfimp.rel, group=period)) +
stat_summary(fun.y=mean, geom="line", linetype = "dotted", aes(group = 1)) +
stat_summary(fun.y=mean, geom="point", size = 3, aes(shape = collection, colour = collection)) +
theme_bw() +
theme(legend.position="bottom")
```
And infinitives:
```{r}
# read infinitives
data = cqpReader('results/vfinf.csv', 'vfinf', data)
data[is.na(data)] = 0
data.rel = group_by(data, period) %>% transform(vfinf.rel = (vfinf/tokens)*1000 )
ggplot(data=data.rel, aes(x=period, y=vfinf.rel, group=period)) +
stat_summary(fun.y=mean, geom="line", linetype = "dotted", aes(group = 1)) +
stat_summary(fun.y=mean, geom="point", size = 3, aes(shape = collection, colour = collection)) +
theme_bw() +
theme(legend.position="bottom")
```
Let's put all this together to be able to compare better the evolution of this phenomena:
```{r}
```
<!-- # Bibliography -->
<file_sep># -*- coding: utf-8 -*-
import sys
import os
import glob
import codecs # to handle properly unicode
import re # to use regular expressions
import argparse # to parse command-line arguments
import time
import fnmatch
import math
import pandas as pd
#===============================================================================
# Import XML module
#===============================================================================
from lxml import etree
#===============================================================================
# Following code block is only needed if lxml is not used as the parser
#===============================================================================
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args,**kw)
te = time.time()
# print '%r (%r, %r) %2.2f sec' % \
# (method.__name__, args, kw, te-ts)
print('%r %2.2f sec' % \
(method.__name__, te-ts))
return result
return timed
class AddMetadata(object):
'''
Instantiate a class whose name is built from CL arguments
'''
@timeit
def __init__(self):
self.cli()
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.add_metadata()
def __str__(self):
if self.success > 0:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Guten Appetit!"
]
else:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Ups! Maybe something went wrong!"
]
return " ".join(message)
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", help="input directory.")
parser.add_argument("-m","--metadata", help = "metadata file.")
parser.add_argument("-o","--output", help="output directory.")
parser.add_argument("-t","--test", choices = ['contemporary','historical'], help = "run in test mode.")
args = parser.parse_args()
noneargs = [x for x in args.__dict__.values()].count(None)
if noneargs == 3 and args.test != None:
print("Running in test mode!")
self.indir = 'test/{}/vrt'.format(args.test)
self.outdir = 'test/{}/meta'.format(args.test)
self.metadata = 'test/metadata/{}-metadata.csv'.format(args.test)
elif noneargs > 1 and args.test == None:
options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
options = ', '.join(options)
exit_message = '\n'.join(["You forgot option(s): {}".format(options),
"Provide option '-t [contemporary|historical]' to run in test mode: 'python3 {} -t contemporary'".format(os.path.basename(__file__)),
"Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
)
sys.exit(exit_message)
else:
self.indir = args.input
self.outdir = args.output
self.metadata = args.metadata
pass
def get_files(self, directory, fileclue):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def read_infile(self,infile):
"""Parse the XML file."""
parser = etree.XMLParser(remove_blank_text=True)
with codecs.open(infile, encoding='utf-8',mode='r+') as input:
return etree.parse(input, parser)
def add_metadata(self):
input_files = self.get_files(self.indir, '*.vrt')
self.total = len(input_files)
self.success = 0
df = pd.read_csv(self.metadata, sep = '\t')
metadata = df.set_index('Unnamed: 0').T.to_dict('dict')
for file in input_files:
output_file = os.path.join(self.outdir,os.path.basename(file))
tree = self.read_infile(file)
root = tree.getroot()
text_id = os.path.splitext(os.path.basename(file))[0]
if text_id in metadata.keys():
for key in metadata[text_id].keys():
root.set(key,str(metadata[text_id][key]))
vrt = etree.tostring(tree, encoding='unicode', method='xml') # convert the XML tree into a string to manipulate it
vrt = re.sub(r"><", r">\n<", vrt)
vrt = re.sub(r">([^<\n])", r">\n\1", vrt)
vrt = re.sub(r"([^\n])<", r"\1\n<", vrt)
vrt = etree.ElementTree(etree.fromstring(vrt)) # parse the string as an element and convert the element in a tree
vrt.write(output_file, encoding="utf8", xml_declaration=True, method="xml")
self.success += 1
else:
print('Text ID "{}" is unknown!'.format(text_id))
print(AddMetadata())<file_sep># -*- coding: utf-8 -*-
import os
import re # to use regular expressions
import argparse # to parse command-line arguments
import time
import fnmatch
import pandas as pd
from lxml import etree
import sys
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args,**kw)
te = time.time()
print('%r %2.2f sec' % \
(method.__name__, te-ts))
return result
return timed
class Texts2Corpus(object):
'''
Instantiate a class whose name is built from CL arguments
'''
@timeit
def __init__(self):
self.cli()
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.main()
def __str__(self):
if self.success > 0:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Guten Appetit!"
]
else:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Ups! Maybe something went wrong!"
]
return " ".join(message)
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", nargs='+', help="path to the input file.")
parser.add_argument("-o", "--output", help="target file where to save the output.")
args = parser.parse_args()
noneargs = [x for x in args.__dict__.values()].count(None)
if noneargs == 2:
print("Running in test mode!")
self.indirs = ['test/contemporary/meta','test/historical/meta']
self.outfile = 'test/sacoco.vrt'
elif noneargs < 2 and noneargs > 0:
options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
options = ', '.join(options)
exit_message = '\n'.join(["You forgot option(s): {}".format(options),
"Provide no option to run in test mode: 'python3 {}'".format(os.path.basename(__file__)),
"Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
)
sys.exit(exit_message)
else:
self.indirs = args.input
self.outfile = args.output
self.outdir = os.path.split(self.outfile)[0]
pass
def get_files(self, directory, fileclue):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def read_infile(self,infile):
"""Parse the XML file."""
parser = etree.XMLParser(remove_blank_text=True)
with open(infile, encoding='utf-8',mode='r+') as input:
return etree.parse(input, parser)
# def main(self):
# input_files = []
# for folder in self.indirs:
# input_files += self.get_files(folder, '*.vrt')
# self.total = len(input_files)
# self.success = 0
# corpus = []
# for file in input_files:
# tree = self.read_infile(file)
# text = tree.getroot()
# sentences = text.xpath('//s')
# for sentence in sentences:
# sentence.text = sentence.text.strip()
# corpus.append(etree.tostring(text, encoding='utf-8', method='xml').decode())
# self.success += 1
#
# # corpus = etree.tostring(corpus, encoding='unicode', method='xml') # convert the XML tree into a string to manipulate it
# # corpus = re.sub(r"><", r">\n<", corpus)
# # corpus = re.sub(r">([^<\n])", r">\n\1", corpus)
# # corpus = re.sub(r"([^\n])<", r"\1\n<", corpus)
# # vrt = etree.ElementTree(etree.fromstring(corpus)) # parse the string as an element and convert the element in a tree
# # vrt.write(self.outfile, encoding="utf8", xml_declaration=True, method="xml")
# corpus = '\n'.join(corpus)
# with open(self.outfile, 'w', encoding='utf-8') as outfile:
# outfile.write(corpus)
def main(self):
input_files = []
for folder in self.indirs:
input_files += self.get_files(folder, '*.vrt')
self.total = len(input_files)
self.success = 0
otree = etree.ElementTree(etree.Element('text'))
corpus = otree.getroot()
for file in input_files:
tree = self.read_infile(file)
text = tree.getroot()
corpus.append(text)
self.success += 1
sentences = corpus.xpath('//s')
for sentence in sentences:
sentence.text = sentence.text.strip()
# corpus = etree.tostring(corpus, encoding='unicode', method='xml') # convert the XML tree into a string to manipulate it
# corpus = re.sub(r"><", r">\n<", corpus)
# corpus = re.sub(r">([^<\n])", r">\n\1", corpus)
# corpus = re.sub(r"([^\n])<", r"\1\n<", corpus)
# vrt = etree.ElementTree(etree.fromstring(corpus)) # parse the string as an element and convert the element in a tree
# vrt.write(self.outfile, encoding="utf8", xml_declaration=True, method="xml")
corpus = etree.ElementTree(corpus)
corpus.write(self.outfile, encoding="utf8", xml_declaration=True, method="xml")
print(Texts2Corpus())<file_sep>---
title: "Get your corpus in CQPweb: a tutorial"
author: "<NAME>"
date: "1 February 2016"
output:
html_document:
toc: false
toc_float: false
---
# Introduction {.tabset .tabset-fade .tabset-pills}
If you are reading this, you probably come from the [SaCoCo tutorial](index.html#set_up_a_corpus_in_cqpweb), you have access to a CQPweb installation as administrator, and you want to encode the corpus. We document below two approaches:
1. advanced
1. "easy"
The first approach is more involved, but allows for much more control and freedom. The second might be helpful, specially if you are a beginner, and the annotation of your corpus is pretty basic.
## Advanced
Let's assume that you have:
- `cqp` installed in your computer
- administrator access to a CQPweb installation
- root access to the server where the CQPweb lives
### Encode the corpus for the CWB
The first thing we need to do is to encode the corpus. This process will create a number of files that will enable to use the CQP language to query the corpus.
Once we have the texts in VRT format, encoding the corpus for the CWB is relatively easy.
Check that you have the corpus work bench installed in the computer, if not, download it and follow these [instructions](http://cwb.sourceforge.net/download.php). We compiled from source version 3.4.8.
Now, run the following commands:
```bash
# create the target folder for encoded data
mkdir -p data/cqp/data
# run the command
cwb-encode -c utf8 -d data/cqp/data -F data/contemporary/meta/ -F data/historical/meta -R data/cqp/sacoco -xsB -S text:0+id+collection+source+year+decade+period+title -S p:0 -S s:0 -P pos -P lemma -P norm
# generate the registry file
cwb-make -r data/cqp -V SACOCO
```
The `cwb-encode`'s parameters explained:
- `-c` to the declare the character encoding
- `-d` path to the target directory were the output will be stored
- `-F` path to the input directory were the VRT files are located
- `-R` path to the registry file
- `-xsB`
- `x` for XML compatibility mode (recognises default entities and
skips comments as well as an XML declaration)
- `s` to skip blank lines in the input
- `B` to strip white spaces from tokens
- `-S` to declare a structural attribute, example:
- `-S text:0+id+authors/`
- `text`, structural attribute to be declared
- `0` embedding levels
- `id` will be an attribute of `text` containing some value
- `-P` to declare positional attributes
Get extensive information on how to encode corpora for the CWB in the [encoding tutorial](http://cwb.sourceforge.net/files/CWB_Encoding_Tutorial.pdf).
> TIP: for development/testing purposes, just run the command below on the test files.
```bash
# create the target folder for encoded data
mkdir -p test/cqp/data
# run the command
cwb-encode -c utf8 -d test/cqp/data -F test/contemporary/meta/ -F test/historical/meta -R test/cqp/sacoco -xsB -S text:0+id+collection+source+year+decade+period+title -S p:0 -S s:0 -P pos -P lemma -P norm
# generate the registry file
cwb-make -r test/cqp -V SACOCO
```
### Upload the corpus to the server and set permissions
Once you have the data you have to upload the file to the server where CQPweb is installed. In our case is the machine `fedora.clarin-d.uni-saarland.de`.
In our case, one needs to connect to the server as `root` user. There are different methods to upload the files:
- via the command line with tools like `scp` or `rsync` which use the `ssh` protocol
- via a FTP client like [Filezilla](https://filezilla-project.org)
Upload the local folder `data/cqp/sacoco/` to the remote folder (in the server) `/data2/cqpweb/indexed`, and the registry file `data/cqp/sacoco` to the folder `/data2/cqpweb/registry`.
Once all files are uploaded, you have to check the ownership of the folder/file:
- the owner should be `wwwrun`
- the group should be `www`
If not just run a couple of commands:
```bash
chown -R wwwrun:www /data2/cqpweb/indexed/sacoco
chown wwwrun:www /data2/cqpweb/registry/sacoco
```
Then, modify the registry file `/data2/cqpweb/registry/sacoco` to indicate the location of the corpus in the server `/data2/cqpweb/indexed/sacoco`.
### Log in as admin in CQPweb
1. Type the URL to your CQPweb installation (e.g. <https://fedora.clarin-d.uni-saarland.de/cqpweb/>)
1. log in with an administrator account, you are redirected to your user account
1. click on `Go to admin control panel` in the left-hand menu **Account actions**.
### Installing the corpus
We can now start installing the corpus:
1. click on `Install a new corpus` in the left menu **Corpora**
1. click on the link `Click here to install a corpus you have already indexed in CWB.` which you will find in the grey row at the top of the page.
1. Fill in the fields
1. Specify a MySQL name for this corpus: `sacoco`
1. Enter the full name of the corpus: `Saarbrücken Cookbook Corpus`
1. Specify the CWB name (lowercase format): `sacoco`
1. Click on the button `Install corpus with settings above` that you will find at the bottom of the page.
A new page will load:
1. click on `Design and insert a text-metadata table for the corpus`
A new page will load:
1. Choose `sacoco.meta` in section `Choose the file containing the metadata`
1. Fill in the field rows in `Describe the contents of the file you have selected`, providing for *Handle* and *Description*:
1. year
1. decade
1. period
1. collection
1. source
1. title
1. Mark `collection` as the primary category.
1. Set `title` as free text
1. Select `Yes please` in section `Do you want to automatically run frequency-list setup?`
1. Finally, click on the button `install metadata table using the settings above`
Now set up the annotation (positional attributes):
1. click on `Manage annotation`, you will find it in the left menu, in section `Admin Tools`.
1. complete the annotation metadata information at the bottom:
1. lemma: *Description:* lemma
1. click on `Go!`
1. pos: *Description:* pos; *Tagset name:* STTS; *External URL:* <http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html>
1. click on `Go!`
1. norm: *Description* norm; *External URL:* <http://www.deutschestextarchiv.de/doku/software#cab>
1. click on `Go!`
1. set `pos` as `Primary annotation` above
1. click on `Update annotation settings`.
Check corpus settings:
1. go to `Corpus settings` in `Admin tools`
1. in `General options`:
1. assign a category in field `The corpus is currently in the following category:` Historical corpora
1. click on the `Update` button
1. provide an external URL: <http://hdl.handle.net/11858/00-246C-0000-001F-7C43-1>
1. click on the `Update` button
We set the access to this corpus open for everybody:
1. go to `Admin Control Panel` in `Admin tools`
1. go to `Manage privileges` in `Users and privileges`
1. scroll to the bottom of the page, there
1. select `sacoco` from list `Generate default privileges for corpus...`
1. click on button `Generate default privileges for this corpus`.
1. go to `Manage group grants` in `Users and privileges`
1. scroll to the bottom, in section `Grant new privilege to group`
1. Select group `everybody`
1. Select a privilege `Normal access privilege for corpus [sacoco]`
1. click on `Grant privilege to group!`
Hurraaaaah! Corpus ready to be queried!
## "Easy"
Let's assume that you have administrator access to a CQPweb installation. We will guide you in the following lines through the process of setting up the corpus.
### Concatenate all texts in a single corpus VRT file
We need a single XML file containing all texts. `texts2corpus.py` helps us to ease the task.
`texts2corpus.py`:
- finds all `.vrt` files contained in the input folders
- gets the `<text>` nodes
- appends the `<text>` nodes to a parent element called `<corpus>`
- saves `<corpus>` as a single XML file
Its usage is pretty simple, just provide the path to the folders containing the `.vrt` files with metadata, and the path to the output folder:
```bash
python3 texts2corpus.py -i data/contemporary/meta data/historical/meta -o data/sacoco.vrt
```
> TIP: for development/testing purposes, if you just run `python3 texts2corpus.py`, it will work on the testing dataset stored in the test folder.
```
### Log in as admin
1. Type the URL to your CQPweb installation (e.g. <https://fedora.clarin-d.uni-saarland.de/cqpweb/>)
1. log in with an administrator account, you are redirected to your user account
1. click on `Go to admin control panel` in the left-hand menu **Account actions**.
### Upload files
We need to upload the corpus file (`sacoco.vrt`) and the metadata file (`sacoco.meta`).
For each file:
1. click on `Upload a file` in the left menu **Uploads**.
1. click on `Choose File`, a dialogue window will open, pick the file you want to upload.
1. click on `Upload File`.
### Installing the corpus
We can now start installing the corpus:
1. click on Install a new corpus in the left menu **Corpora**
1. in install new corpus section
1. provide a MySQL name for the corpus: `sacoco`
1. provide a name for the corpus: `sacoco`
1. provide the full name of the corpus: `Saarbrücken Cookbook Corpus`
1. in `Select files` section
1. select `sacoco.vrt`
1. in `S-attributes` section
1. check the option on the left `Use custom setup`
1. and then add in the boxes on the right:
1. `p:0`
1. in `P-attributes` section
1. check the option on the left `Use custom setup`
1. set the first row as `Primary`
1. add the following three positional attributes, each value is a field:
1. *Handle:* pos; *Description:* Part-Of-Speech; *Tagset:* STTS; *External URL:* <http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html>
1. *Handle:* lemma; *Description:* lemma; *Tagset:* leave it empty; *External URL:* leave it empty.
1. *Handle:* norm; *Description:* orthographic correction by CAB, *Tagset:* leave it empty; *External URL:* <http://www.deutschestextarchiv.de/doku/software#cab>
1. click on `Install corpus with settings above` at the bottom of the page.
A new page will load:
1. click on `Design and insert a text-metadata table for the corpus`
A new page will load:
1. Choose `sacoco.meta` in section `Choose the file containing the metadata`
1. Fill in the field rows in `Describe the contents of the file you have selected`, providing for *Handle* and *Description*:
1. year
1. decade
1. period
1. collection
1. source
1. Select `Yes please` in section `Do you want to automatically run frequency-list setup?`
1. Finally, click on the button `install metadata table using the settings above`
### Admin tools
- Corpus settings: probably nothing to do here; has been set during the installation process
- Manage access: to add user groups for your corpus (otherwise only the superuser can access the corpus!)
- Manage metadata: probably nothing to do here; has been set during the installation process
- Manage text categories: here you can add more "speaking" descriptions for your text categories
- Manage annotation: add descriptions / URLs of documentations for your positional attributes; specify primary/secondary/... annotations for the CQP Simple Query language; specifying annotations here makes them available for restrictions throughout CQPweb (e.g. for the collocation function)
- Manage privileges: Scroll to end of page and generate default privileges for the corpus; select than "Manage group grants", scroll to end of page and select a group and grant it privileges of that particular corpus (normally normal privileges are chosen)
<file_sep># -*- coding: utf-8 -*-
import os
import argparse
import regex as re
# import re # to use regular expressions
import time
import fnmatch
from lxml import etree, objectify
import datetime
import pandas as pd
import sys
#===============================================================================
# Function to time functions
#===============================================================================
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args,**kw)
te = time.time()
# print '%r (%r, %r) %2.2f sec' % \
# (method.__name__, args, kw, te-ts)
print('%r %2.2f sec' % \
(method.__name__, te-ts))
return result
return timed
#===============================================================================
# Class if needed
#===============================================================================
class WikiExtractor(object):
"""Extract metadata and recipes from rezeptewikiorg XML dump.
Produces a CSV file and TEI files respectively.
"""
@timeit
def __init__(self):
self.ns = 'http://www.mediawiki.org/xml/export-0.6/'
self.xml = 'http://www.w3.org/XML/1998/namespace'
self.tei = 'http://www.tei-c.org/ns/1.0'
self.tei_template = 'utils/tei_lite_template.xml'
self.metadata = {}
cuisines = [
"Deutsche Küche",
"Badische Küche",
"Bayerische Küche",
"Berliner Küche",
"<NAME>",
"DDR-Küche",
"Deutscher Käse",
"Hamburger Küche",
"Hessische Küche",
"Mecklenburger Küche",
"Moselländische Küche",
"Niedersächsische Küche",
"Pfälzer Küche",
"Rheinische Küche",
"Saarländische Küche",
"Sachsen-Anhalter Küche",
"Sächsische Küche",
"Schleswig-Holsteinische Küche",
"Schwäbische Küche",
"Thüringer Küche",
"Westfälische Küche",
"Maultaschen",
"Fränkische Küche",
"Currywurst",
"Grünkohl",
"Friesische Küche",
"Fryslâns Küche",
"Groninger Küche",
"Hamburger Küche",
"Quiche",
"Erzgebirgische Küche",
"Spätzle",
"Österreichische Küche",
"Burgenländische Küche",
"<NAME>",
"Niederösterreichische Küche",
"<NAME>",
"<NAME>",
"Weinviertler Küche",
"Oberösterreichische Küche",
"Innviertler Küche",
"<NAME>",
"Mühlviertler Küche",
"Salkammergut Küche",
"Österreichischer Käse",
"<NAME>",
"Salkammergut Küche",
"Steirische Küche",
"<NAME>",
"Ost- und Nordtiroler Küche",
"Südtiroler Küche",
"<NAME>",
"<NAME>",
"<NAME>",
"Appenzeller Küche",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Waadtländer Küche",
"<NAME>",
]
self.isgermancuisine = re.compile(r'\[\[Kategorie:({})\]\]'.format('|'.join(cuisines)))
self.cli()
# self.teidir = os.path.join(self.xmldir,'source','tei')
# self.xmldir = os.path.join(self.outdir,'source','xml')
# self.metadir = os.path.join(self.outdir,'metadata')
for odir in [self.xmldir,self.metadir]:
# for odir in [self.teidir,self.xmldir,self.metadir]:
if not os.path.exists(odir):
os.makedirs(odir)
self.main() # function running in the background
def __str__(self):
if self.success > 0:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Guten Appetit!"
]
else:
message = [
"{} recipes out of {} processed!".format(str(self.success),str(self.total)),
"Ups! Maybe something went wrong!"
]
return " ".join(message)
# Function to get all files in a directory
def get_files(self, directory, fileclue):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def read_xml(self,infile):
"""Parse the XML file."""
parser = etree.XMLParser(remove_blank_text=True,encoding="utf-8")
with open(infile, encoding='utf-8',mode='r') as input:
return etree.parse(input, parser)
def deprettyfy(self,tree):
# tree to string
tree = etree.tostring(tree, encoding="utf-8", method="xml")
tree = tree.decode('utf-8')
tree = re.sub(r"(\n) +(<)", r"\1\2", tree)
tree = re.sub(r"> *<", r">\n<", tree)
tree = re.sub(r"\n\n+", r"\n", tree)
tree = etree.fromstring(tree)
tree = etree.ElementTree(tree)
return tree
def strip_nodes(self,tree):
for node in self.nodes:
xslt_strip_nodes = etree.XML('''
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="node()|@*" >
<xsl:copy>
<xsl:apply-templates select="node()|@*" />
</xsl:copy>
</xsl:template>
<xsl:template match="//{}" >
<xsl:apply-templates/>
</xsl:template>
</xsl:stylesheet>
'''.format(node))
transform = etree.XSLT(xslt_strip_nodes)
try:
result = transform(result)
except:
result = transform(tree)
return result
def serialize(self,tree,infile):
outpath = os.path.join(self.outdir,infile+'.vrt')
tree.write(outpath, xml_declaration=True, encoding='utf-8')
pass
def add_text_mixed(self,parent,s):
if len(parent) == 0:
parent.text = (parent.text or "") + '\n' + s
else:
youngest = parent[-1]
youngest.tail = (youngest.tail or "") + '\n' + s
pass
def get_all_nodes(self,tree):
nodes = tree.findall('.//*')
self.nodes = set(x.tag for x in nodes)
# self.nodes.add(tree.getroot().tag)
pass
def clean_tree(self,tree,fbasename):
output = self.strip_nodes(tree)
root = output.getroot()
for attribute in root.attrib:
if attribute != 'id':
del root.attrib[attribute]
return output
def get_newest_revision(self,page):
"""Get the newest revision for a wiki page.
It takes a page element as input. It returns a revision element.
get_newest_revision(page)
"""
date_format = "%Y-%m-%dT%H:%M:%SZ"
all_timestamps = page.xpath('.//x:timestamp', namespaces = {'x':self.ns})
newest_date = max([datetime.datetime.strptime(x.text, date_format) for x in all_timestamps]).strftime(date_format)
year = re.match(r'(\d{4})-', newest_date).group(1)
for timestamp in all_timestamps:
if timestamp.text == newest_date:
return(timestamp.getparent(),year)
def get_title(self,page):
"""Get the title of a particular revision.
It takes a page element as input. It returns a string.
"""
title = page.xpath('./x:title', namespaces = {'x':self.ns})[0].text
return(title)
def get_preparation(self,revision):
"""Get the preparation section of a particular revision.
It takes a revision element as input. It returns a string.
"""
text = revision.xpath('./x:text', namespaces = {'x':self.ns})[0].text
getpreparation = re.compile(r'== Zubereitung ==\n(.+?)\n== ', re.DOTALL)
preparation = getpreparation.search(text)
if preparation == None:
return(None)
else:
preparation = preparation.group(1)
return(preparation)
def clean_preparation(self, preparation):
"""Cleans preparation section.
It takes as input and returns a string.
"""
preparation = re.sub(r'\* ?',r'',preparation)
preparation = re.sub(r'\[+.+?:.+?\|(.+?)\]+',r'\1',preparation)
preparation = re.sub(r"''",r'"',preparation)
preparation = re.sub(r'\[+.+?\|(.+?)\]+',r'\1',preparation)
preparation = re.sub(r'\[+(.+?)\]+',r'\1',preparation)
preparation = re.sub(r'\[+.+?:.+?\|(.+)\}+', r'\1', preparation)
preparation = re.sub(r'\[+.+?:.+?\|',r'',preparation)
preparation = re.sub(r'\{+Zubereitung:.+?\|(.+?)\]+',r'\1',preparation)
preparation = re.sub(r'\(?Zubereitung:.+?\|(.+?)\]+',r'\1',preparation)
preparation = re.sub(r'(\{\{B\|1\|2)\]\]',r'\1}}',preparation)
preparation = re.sub(r' .+?\|(.+?)\]+',r'\1', preparation)
preparation = re.sub(r'\]+',r'',preparation)
preparation = re.sub(r'(\{+.+?)\)\)',r'\1}}',preparation)
preparation = re.sub(r'\{\{Grad\|(.+?)\}\}',r'\1 °C',preparation)
preparation = re.sub(r'\{\{G\|(.+?)\}\}',r'\1 °C',preparation)
preparation = re.sub(r'\{{(Unterhitze|Alkohol|Umluft|Umbruch|Oberhitze|Grill|OberUnterhitze)\}\}',r'',preparation)
preparation = re.sub(r'\{+B\|1\|3\}+',r'1/3', preparation)
preparation = re.sub(r'\{+B\|3\|8\}+',r'3/8', preparation)
preparation = re.sub(r'\{+B\|1\|8\}+',r'1/8',preparation)
preparation = re.sub(r'\{+B\|1\|2\}+',r'1/2',preparation)
preparation = re.sub(r'\{+B\|2\|3\}+',r'2/3',preparation)
preparation = re.sub(r'\{+B\|4\|5\}+',r'4/5',preparation)
preparation = re.sub(r'\{+B\|1\|4\}+',r'1/4',preparation)
preparation = re.sub(r'\{+B\|3\|4\}+',r'3/4',preparation)
preparation = re.sub(r'<sup>1</sup>/<sub>2</sub>',r'1/2',preparation)
preparation = re.sub(r'<sup>1</sup>∕<sub>4</sub>',r'1/4',preparation)
preparation = re.sub(r'<br.*?>',r'',preparation)
preparation = re.sub(r'<Center>.+?</Center>',r'',preparation,flags=re.DOTALL)
preparation = re.sub(r'<div align.+?>.+?</div>',r'',preparation,flags=re.DOTALL)
preparation = re.sub(r'<span style="color:#ff0000">(.+?)</span>',r'\1',preparation)
preparation = re.sub(r' ',r' ',preparation)
preparation = re.sub(r'^ +',r'',preparation,flags=re.MULTILINE)
preparation = re.sub(r'\n\n+',r'\n',preparation)
preparation = re.sub(r' +\n',r'\n',preparation)
preparation = re.sub(r'<!--.+?-->',r'',preparation)
preparation = re.sub(r'<!-- Verborgen, siehe Diskussionsseite',r'',preparation)
preparation = re.sub(r'<[g|G]allery.*?>.+?</[g|G]allery>',r'',preparation, flags = re.DOTALL)
preparation = re.sub(r'\p{Zs}',r' ',preparation)
preparation = re.sub(r'\n\n+',r'\n',preparation)
preparation = re.sub(r'&',r'&',preparation)
preparation = re.sub(r' +',r' ',preparation)
preparation = re.sub(r'==Bilder==',r'',preparation)
preparation = preparation.strip()
return(preparation)
def get_page_id(self, page):
page_id = page.xpath('./x:id', namespaces = {'x':self.ns})[0].text
return(page_id)
def get_revision_id(self, revision):
revision_id = revision.xpath('./x:id', namespaces = {'x':self.ns})[0].text
return(revision_id)
def get_ingredients(self, revision):
text = revision.xpath('./x:text', namespaces = {'x':self.ns})[0].text
getingredients = re.compile(r'== Zutaten ==\n(.+?)\n== ', re.DOTALL)
ingredients = getingredients.search(text)
if ingredients == None:
return('')
else:
ingredients = re.findall(r'\[\[Zutat:(.+?)\|.+?\]\]',ingredients.group(1))
ingredients = [re.sub(r'\[+.+?:',r'',x) for x in ingredients]
ingredients = [re.sub(r'(.+?)\]+',r'\1',x) for x in ingredients]
ingredients = set(ingredients)
return(ingredients)
def get_tools(self, revision):
text = revision.xpath('./x:text', namespaces = {'x':self.ns})[0].text
gettools = re.compile(r'== Kochgeschirr ==\n(.+?)\n== ', re.DOTALL)
tools = gettools.search(text)
if tools == None:
return('')
else:
tools = re.findall(r'\[\[Zubereitung:(.+?)\|.+?\]\]',tools.group(1))
tools = set(tools)
return(tools)
def get_methods(self, preparation):
getmethods = re.compile(r'\[\[Zubereitung:(.+?)\|.+?\]\]')
methods = getmethods.findall(preparation)
methods = set(methods)
return(methods)
def get_authors(self, page):
authors = page.xpath('.//x:contributor/x:username', namespaces = {'x':self.ns})
authors = [x.text for x in authors]
authors = set(authors)
return(authors)
def add_text_id(self,tei,revision_id):
"""Add the id attribute to the text element of a TDABf."""
text_id = 'wiki_'+revision_id
text = tei.xpath('//x:text', namespaces = {'x':self.tei})[0]
text.set('{}id'.format('{'+self.xml+'}'),text_id)
pass
def add_divs(self,tei,title,preparation):
"""Add the div, head, p and lb to the body element of a TDABf."""
# get the body element
body = tei.xpath('//x:body',namespaces = {'x':self.tei})[0]
# append the first division type recipe
div1 = etree.SubElement(body,'div',attrib={'n':'1','type':'recipe'})
# add the first header as title
head1 = etree.SubElement(div1,'head')
head1.text = title
# check if there are subdivisions
hassubdivisions = re.search(r'==+',preparation)
if hassubdivisions == None:
# add the first paragraph
preparation = re.sub(r'\n',r' ',preparation)
# preparation = re.sub(r'\n',r'<lb/>',preparation)
p1 = etree.fromstring('<p>'+preparation+'</p>')
div1.append(p1)
# add the string as the content of the first paragraph
# p1.text = '\n'+preparation+'\n'
else:
# process subdivisions
preparation = re.sub(r'==+ (.+?) ==+',r'<div n="2" type="contents"><head>\1</head><p>',preparation)
preparation = re.sub(r'([^>])\n(<div)',r'\1</p></div>\2',preparation)
preparation = preparation + '</p></div>'
# if preparation[0] != '<':
# preparation = re.sub('.+?</p></div>',r'',preparation)
preparation = '<text>' + preparation + '</text>'
preparation = re.sub(r'<text>[^<].+?<div',r'<text><div',preparation,flags=re.DOTALL)
preparation = re.sub(r'<p>\n',r'<p>',preparation)
preparation = re.sub(r'\n',r' ',preparation)
# preparation = re.sub(r'\n',r'<lb/>',preparation)
preparation = re.sub(r'</head><p><div',r'</head></div><div',preparation)
# preparation = re.sub(r'<lb/>',r' ',preparation)
outxml = etree.fromstring(preparation)
allelements = outxml.xpath('//div')
for element in allelements:
div1.append(element)
# paragraphs = tei.xpath('//x:p',namespaces = {'x':self.tei})
paragraphs = tei.xpath('//p')
for p in paragraphs:
if p.text == None:
pparent = p.getparent()
pparent.remove(p)
pass
def generate_text(self,tei,title,revision_id,preparation):
"""Create the text element of a TEI file."""
# add text id
self.add_text_id(tei, revision_id)
# add div, div n, div type
self.add_divs(tei, title, preparation)
pass
def generate_teiheader(self,tei,title,revision_id,authors):
# add title
title_element = tei.xpath('//x:title', namespaces = {'x':self.tei})[0]
title_element.text = title
# add authors
author_element = tei.xpath('//x:author', namespaces = {'x':self.tei})[0]
author_element.text = ', '.join(authors)
source = tei.xpath('//x:sourceDesc/x:p', namespaces = {'x':self.tei})[0]
url = 'http://www.kochwiki.org/w/index.php?oldid='+revision_id
source.text = url
pass
def create_tei(self,title,revision_id,authors,preparation):
"""Create a TEI lite file from a wiki recipe."""
# get the template
tei = self.read_xml(self.tei_template)
# teiHeader
self.generate_teiheader(tei,title,revision_id,authors)
# generate text
self.generate_text(tei,title,revision_id,preparation)
teiasstring = etree.tostring(tei,encoding='utf-8').decode()
teiasstring = re.sub(r'><',r'>\n<',teiasstring)
parser = etree.XMLParser(remove_blank_text=True)
otei = etree.ElementTree(etree.XML(teiasstring,parser))
obasename = 'wiki_'+revision_id
otei.write(os.path.join(self.xmldir,obasename+'.xml'),encoding='utf-8',pretty_print=True,xml_declaration=True)
return(otei)
def create_xml(self,tei,revision_id):
"""Create a simplified XML file only containing the text to be processed with WebLicht."""
for elem in tei.getiterator():
if not hasattr(elem.tag, 'find'): continue # (1)
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
objectify.deannotate(tei, cleanup_namespaces=True)
etree.strip_attributes(tei, '{}id'.format('{'+self.xml+'}'))
content = tei.xpath('./text/body/div')[0]
text = etree.Element('text', id = 'wiki_'+revision_id)
text.append(content)
outpath = os.path.join(self.xmldir,'wiki_'+revision_id+'.xml')
tree = etree.ElementTree(text)
tree.write(outpath, encoding = 'utf-8', pretty_print=True, xml_declaration=True)
pass
def add_metadata(self,revision_id,title,authors,ingredients,tools,methods,year,categories):
"""Add metadata instances to a data structure."""
def formatasfeature(values):
if len(values) == 0:
output = "|"
else:
output = "|{}|".format('|'.join(values))
return output
authors = formatasfeature(authors)
ingredients = formatasfeature(ingredients)
tools = formatasfeature(tools)
methods = formatasfeature(methods)
categories = formatasfeature(categories)
source = 'wiki'
collection = 'contemporary'
decade = str((int(year)//10)*10)
def get_period(year):
p1 = year[:2]
p2 = year[2:]
if int(p2) < 50:
p2 = '00'
elif int(p2) >= 50:
p2 = '50'
return p1+p2
period = get_period(year)
self.metadata['wiki_'+revision_id] = {
'title':title,
'year':year,
'decade':decade,
'period':period,
'source':source,
'authors':authors,
'ingredients':ingredients,
'tools':tools,
'methods':methods,
'cuisines':categories,
'collection':collection
}
pass
def extract_info(self,page,revision,preparation):
title = self.get_title(page)
page_id = self.get_page_id(page)
revision_id = self.get_revision_id(revision)
authors = self.get_authors(page)
ingredients = self.get_ingredients(revision)
tools = self.get_tools(revision)
methods = self.get_methods(preparation)
preparation = self.clean_preparation(preparation)
return(page_id,revision_id,title,authors,ingredients,tools,methods,preparation)
def create_metadata(self):
outpath = os.path.join(self.metadir,'contemporary-metadata.csv')
df = pd.DataFrame(self.metadata).transpose()
df.to_csv(outpath, sep = '\t')
pass
def main(self):
# open wikidump file
inxml = self.read_xml(self.infile)
all_pages = [x.getparent() for x in inxml.xpath('//x:ns[text()="0"]', namespaces = {'x':self.ns})]
self.total = len(all_pages)
self.success = 0
for page in all_pages:
if self.isgermancuisine.search(etree.tostring(page, encoding='utf-8').decode()):
categories = set(self.isgermancuisine.findall(etree.tostring(page, encoding='utf-8').decode()))
revision, year = self.get_newest_revision(page)
preparation = self.get_preparation(revision)
if preparation == None:
continue
else:
page_id,revision_id,title,authors,ingredients,tools,methods,preparation = self.extract_info(page,revision,preparation)
tei = self.create_tei(title,revision_id,authors,preparation)
# xml = self.create_xml(tei,revision_id)
self.add_metadata(revision_id,title,authors,ingredients,tools,methods,year,categories)
self.success += 1
# save metadata
self.create_metadata()
pass
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", help="path to the input file.")
parser.add_argument("-x","--xml", help="output directory for TEI/XML files.")
parser.add_argument("-m","--meta", help="output directory for the metadata file.")
args = parser.parse_args()
noneargs = [x for x in args.__dict__.values()].count(None)
if noneargs == 3:
print("Running in test mode!")
self.infile ='test/contemporary/source/rezeptewikiorg-20140325-history.xml'
self.xmldir ='test/contemporary/tei'
self.metadir ='test/metadata'
elif noneargs < 3 and noneargs > 0:
options = ["'-"+k[0]+"'" for k,v in args.__dict__.items() if v == None]
options = ', '.join(options)
exit_message = '\n'.join(["You forgot option(s): {}".format(options),
"Provide no option to run in test mode: 'python3 {}'".format(os.path.basename(__file__)),
"Get help with option '-h': 'python3 {} -h'".format(os.path.basename(__file__))]
)
sys.exit(exit_message)
else:
self.infile = args.input
self.xmldir = args.xml
self.metadir = args.meta
pass
print(WikiExtractor())<file_sep>et-xmlfile==1.0.1
jdcal==1.2
lxml==3.5.0
numpy==1.10.4
openpyxl==2.3.3
pandas==0.17.1
python-dateutil==2.4.2
pytz==2015.7
regex==2016.1.10
requests==2.9.1
six==1.10.0
Unidecode==0.4.19
wheel==0.24.0
| 9a6081e4d59d5dd68f4257c23f3d469c68b74705 | [
"Markdown",
"Python",
"Text",
"RMarkdown"
] | 10 | Python | xindavidlee/sacoco | 47ef221491bfc8e2b8af6baa1e36f82405c3d7bd | 745dd784541dca70d9763cb081cc3871e97c2c5f |
refs/heads/master | <file_sep>package com.laaficionmanda.android.bo;
/**
* Clase para el manejo de
* la información del usuario.
* @author Esteban
*
*/
public class Usuario {
/**
* Constructor de la clase.
*/
public void Usuario(){
}
/*Setters y Getters*/
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
public String getAppID() {
return appID;
}
public void setAppID(String appID) {
this.appID = appID;
}
public String getUrlImage() {
return urlImage;
}
public void setUrlImage(String urlImage) {
this.urlImage = urlImage;
}
/*Atributos de la clase*/
private String id;
private String email;
private String appID;
private String urlImage;
}
<file_sep>Prueba
======
Prueba para android<file_sep>package com.laaficionmanda.android.ui;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import android.app.Activity;
import android.content.Context;
import android.content.res.XmlResourceParser;
import android.os.Bundle;
import android.view.InflateException;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.view.Window;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.ProgressBar;
import android.widget.TextView;
import com.laaficionmanda.android.R;
/**
* A class that implements the action bar pattern for pre-Honeycomb devices.
*/
public class ActionBarHelperBase extends ActionBarHelper {
private static final String MENU_RES_NAMESPACE = "http://schemas.android.com/apk/res/android";
private static final String MENU_ATTR_ID = "id";
//private static final String MENU_ATTR_SHOW_AS_ACTION = "showAsAction";
protected Set<Integer> mActionItemIds = new HashSet<Integer>();
protected ActionBarHelperBase(Activity activity) {
super(activity);
}
/** {@inheritDoc} */
@Override
public void onCreate(Bundle savedInstanceState) {
mActivity.requestWindowFeature(Window.FEATURE_CUSTOM_TITLE);
}
/** {@inheritDoc} */
@Override
public void onPostCreate(Bundle savedInstanceState) {
mActivity.getWindow().setFeatureInt(Window.FEATURE_CUSTOM_TITLE,
R.layout.actionbar);
setupActionBar();
SimpleMenu menu = new SimpleMenu(mActivity);
mActivity.onCreatePanelMenu(Window.FEATURE_OPTIONS_PANEL, menu);
mActivity.onPrepareOptionsMenu(menu);
for (int i = 0; i < menu.size(); i++) {
MenuItem item = menu.getItem(i);
if (mActionItemIds.contains(item.getItemId())) {
addActionItemFromMenuItem(item);
}
}
}
/**
* Sets up the ibility action bar with the given title.
*/
private void setupActionBar() {
final ViewGroup actionBar = getActionBar();
if (actionBar == null) {
return;
}
// Add Home button
SimpleMenu tempMenu = new SimpleMenu(mActivity);
// SimpleMenuItem homeItem = new SimpleMenuItem(tempMenu,
// android.R.id.home, 0, mActivity.getString(R.string.app_name));
SimpleMenuItem homeItem = new SimpleMenuItem(tempMenu,
R.id.actionbar_home, 0, mActivity.getString(R.string.app_name));
if (showBackButton) {
homeItem.setIcon(R.drawable.ic_home_back);
} else {
homeItem.setIcon(R.drawable.ic_home);
}
addActionItemFromMenuItem(homeItem);
if(mActivity.getTitle().equals(mActivity.getString(R.string.app_name))) {
ImageView titleImage = new ImageView(mActivity, null, R.attr.actionbarTitleStyle);
titleImage.setImageResource(R.drawable.ic_main_title);
actionBar.addView(titleImage);
}else {
LinearLayout.LayoutParams springLayoutParams = new LinearLayout.LayoutParams(0, ViewGroup.LayoutParams.FILL_PARENT);
springLayoutParams.weight = 1;
TextView titleText = new TextView(mActivity, null, R.attr.actionbarTitleStyle);
titleText.setLayoutParams(springLayoutParams);
titleText.setText(mActivity.getTitle());
actionBar.addView(titleText);
}
}
/** {@inheritDoc} */
@Override
public void setRefreshActionItemState(boolean refreshing) {
View refreshButton = mActivity
.findViewById(R.id.actionbar_item_refresh);
View refreshIndicator = mActivity
.findViewById(R.id.actionbar_item_refresh_progress);
if (refreshButton != null) {
refreshButton.setVisibility(refreshing ? View.GONE : View.VISIBLE);
}
if (refreshIndicator != null) {
refreshIndicator.setVisibility(refreshing ? View.VISIBLE
: View.GONE);
}
}
/**
* Action bar helper code to be run in
* {@link Activity#onCreateOptionsMenu(android.view.Menu)}.
*
* NOTE: This code will mark on-screen menu items as invisible.
*/
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Hides on-screen action items from the options menu.
for (Integer id : mActionItemIds) {
menu.findItem(id).setVisible(false);
}
return true;
}
/** {@inheritDoc} */
@Override
protected void onTitleChanged(CharSequence title, int color) {
TextView titleView = (TextView) mActivity
.findViewById(R.id.actionbar_title);
if (titleView != null) {
titleView.setText(title);
}
}
/**
* @see com.suigeneris.android.exigorep.ui.ActionBarHelper#setItemVisibility(int, int)
*/
@Override
public void setItemVisibility(int item, int visibility) {
View actionbarItem = mActivity.findViewById(item);
if (actionbarItem != null) {
actionbarItem.setVisibility(visibility);
}
}
/**
* Returns a {@link android.view.MenuInflater} that can read action bar
* metadata on pre-Honeycomb devices.
*/
@Override
public MenuInflater getMenuInflater(MenuInflater superMenuInflater) {
return new WrappedMenuInflater(mActivity, superMenuInflater);
}
/**
* Returns the {@link android.view.ViewGroup} for the action bar on phones
* (ibility action bar). Can return null, and will return null on
* Honeycomb.
*/
private ViewGroup getActionBar() {
return (ViewGroup) mActivity.findViewById(R.id.actionbar);
}
/**
* Adds an action button to the ibility action bar, using menu
* information from a {@link android.view.MenuItem}. If the menu item ID is
* <code>menu_refresh</code>, the menu item's state can be changed to show a
* loading spinner using
* {@link com.example.android.actionbar.ActionBarHelperBase#setRefreshActionItemState(boolean)}
* .
*/
private View addActionItemFromMenuItem(final MenuItem item) {
final int itemId = item.getItemId();
final ViewGroup actionBar = getActionBar();
if (actionBar == null) {
return null;
}
// Create the button
// TODO Descomentar cuando el project target sea superior a android-14
// ImageButton actionButton = new ImageButton(
// mActivity,
// null,
// itemId == android.R.id.home ? R.attr.actionbarItemHomeStyle
// : R.attr.actionbarItemStyle);
// actionButton
// .setLayoutParams(new ViewGroup.LayoutParams(
// (int) mActivity
// .getResources()
// .getDimension(
// itemId == android.R.id.home ? R.dimen.actionbar_button_home_width
// : R.dimen.actionbar_button_width),
// ViewGroup.LayoutParams.FILL_PARENT));
// Se utiliza un id propio ya que android.R.id.home no existe en el API8
ImageButton actionButton = new ImageButton(mActivity, null,
itemId == R.id.actionbar_home ? R.attr.actionbarItemHomeStyle
: R.attr.actionbarItemStyle);
actionButton
.setLayoutParams(new ViewGroup.LayoutParams(
(int) mActivity
.getResources()
.getDimension(
itemId == R.id.actionbar_home ? R.dimen.actionbar_button_home_width
: R.dimen.actionbar_button_width),
ViewGroup.LayoutParams.FILL_PARENT));
if (itemId == R.id.menu_refresh) {
actionButton.setId(R.id.actionbar_item_refresh);
} else {
actionButton.setId(itemId);
}
actionButton.setImageDrawable(item.getIcon());
actionButton.setScaleType(ImageView.ScaleType.CENTER);
actionButton.setContentDescription(item.getTitle());
actionButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
mActivity
.onMenuItemSelected(Window.FEATURE_OPTIONS_PANEL, item);
}
});
actionBar.addView(actionButton);
if (item.getItemId() == R.id.menu_refresh) {
// Refresh buttons should be stateful, and allow for indeterminate
// progress indicators,
// so add those.
ProgressBar indicator = new ProgressBar(mActivity, null,
R.attr.actionbarProgressIndicatorStyle);
final int buttonWidth = mActivity.getResources()
.getDimensionPixelSize(R.dimen.actionbar_button_width);
final int buttonHeight = mActivity.getResources()
.getDimensionPixelSize(R.dimen.actionbar_height);
final int progressIndicatorWidth = buttonWidth / 2;
LinearLayout.LayoutParams indicatorLayoutParams = new LinearLayout.LayoutParams(
progressIndicatorWidth, progressIndicatorWidth);
indicatorLayoutParams.setMargins(
(buttonWidth - progressIndicatorWidth) / 2,
(buttonHeight - progressIndicatorWidth) / 2,
(buttonWidth - progressIndicatorWidth) / 2, 0);
indicator.setLayoutParams(indicatorLayoutParams);
indicator.setVisibility(View.GONE);
indicator.setId(R.id.actionbar_item_refresh_progress);
actionBar.addView(indicator);
}
return actionButton;
}
/**
* A {@link android.view.MenuInflater} that reads action bar metadata.
*/
private class WrappedMenuInflater extends MenuInflater {
MenuInflater mInflater;
public WrappedMenuInflater(Context context, MenuInflater inflater) {
super(context);
mInflater = inflater;
}
@Override
public void inflate(int menuRes, Menu menu) {
loadActionBarMetadata(menuRes);
mInflater.inflate(menuRes, menu);
}
/**
* Loads action bar metadata from a menu resource, storing a list of
* menu item IDs that should be shown on-screen (i.e. those with
* showAsAction set to always or ifRoom).
*
* @param menuResId
*/
private void loadActionBarMetadata(int menuResId) {
XmlResourceParser parser = null;
try {
parser = mActivity.getResources().getXml(menuResId);
int eventType = parser.getEventType();
int itemId;
//int showAsAction;
boolean eof = false;
while (!eof) {
switch (eventType) {
case XmlPullParser.START_TAG:
if (!parser.getName().equals("item")) {
break;
}
itemId = parser.getAttributeResourceValue(
MENU_RES_NAMESPACE, MENU_ATTR_ID, 0);
if (itemId == 0) {
break;
}
// TODO Descomentar cuando el project target sea superior a android-14
// showAsAction =
// parser.getAttributeIntValue(MENU_RES_NAMESPACE,
// MENU_ATTR_SHOW_AS_ACTION, -1);
// if (showAsAction == MenuItem.SHOW_AS_ACTION_ALWAYS ||
// showAsAction == MenuItem.SHOW_AS_ACTION_IF_ROOM) {
// mActionItemIds.add(itemId);
// }
// Siempre agrega el item, no se utiliza el atribto showAsAction
mActionItemIds.add(itemId);
break;
case XmlPullParser.END_DOCUMENT:
eof = true;
break;
}
eventType = parser.next();
}
} catch (XmlPullParserException e) {
throw new InflateException("Error inflating menu XML", e);
} catch (IOException e) {
throw new InflateException("Error inflating menu XML", e);
} finally {
if (parser != null) {
parser.close();
}
}
}
}
}
<file_sep>package com.laaficionmanda.android.service;
public interface SyncService {
public String helloto(String name);
}
<file_sep>package com.laaficionmanda.android.fb;
import java.util.LinkedList;
public class SessionEvents {
private static LinkedList<AuthListener> mAuthListeners = new LinkedList<AuthListener>();
private static LinkedList<LogoutListener> mLogoutListeners = new LinkedList<LogoutListener>();
/**
* Asocia el listener dado con este objecto de Facebook. El callback de la interfaz
* del listener será invocado cuando el evento de autenticación ocurra.
*
* @param listener
* El objecto callback que notifica a la aplicación cuando
* la autenticación ocurra.
*/
public static void addAuthListener(AuthListener listener) {
mAuthListeners.add(listener);
}
/**
* Remueve el listener dado de la lista de aquellos que serán notificados
* cuando el evento de autenticación ocurra.
*
* @param listener
* El objecto callback que notifica a la aplicación cuando
* events happen.
*/
public static void removeAuthListener(AuthListener listener) {
mAuthListeners.remove(listener);
}
/**
* Asocia el listener dado con este objecto de Facebook. El callback de la interfaz
* del listener será invocado cuando el evento de logout ocurra.
*
* @param listener
* The callback object for notifying the application when log out
* starts and finishes.
*/
public static void addLogoutListener(LogoutListener listener) {
mLogoutListeners.add(listener);
}
/**
* Remove the given listener from the list of those that will be notified
* when logout occurs.
*
* Remueve el listener dado de la lista de aquellos que serán notificados
* cuando el logout ocurra.
*
* @param listener
* The callback object for notifying the application when log out
* starts and finishes.
* El objecto callback por notificar a la aplicación
* cuando el logout inicie y finalice.
*/
public static void removeLogoutListener(LogoutListener listener) {
mLogoutListeners.remove(listener);
}
public static void onLoginSuccess() {
for (AuthListener listener : mAuthListeners) {
listener.onAuthSucceed();
}
}
public static void onLoginError(String error) {
for (AuthListener listener : mAuthListeners) {
listener.onAuthFail(error);
}
}
public static void onLogoutBegin() {
for (LogoutListener l : mLogoutListeners) {
l.onLogoutBegin();
}
}
public static void onLogoutFinish() {
for (LogoutListener l : mLogoutListeners) {
l.onLogoutFinish();
}
}
}<file_sep>package com.laaficionmanda.android.ui;
import com.laaficionmanda.android.R;
import com.laaficionmanda.android.R.layout;
import com.laaficionmanda.android.R.menu;
import com.laaficionmanda.android.util.BackgroundTask;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.view.Menu;
import android.view.Window;
public class LauncherActivity extends BaseActivity {
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
getWindow().requestFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.activity_launcher);
new BackgroundTask() {
@Override
public void work() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
@Override
public void done() {
startNextActivity();
}
};
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.activity_launcher, menu);
return true;
}
public void startNextActivity() {
startActivity(new Intent(this, LoginActivity.class));
finish();
}
@Override
public void initComponents() {
// TODO Auto-generated method stub
}
}
<file_sep>package com.laaficionmanda.android.ui;
/**
*
* TeamSelectionActivity
* @author <NAME>
* @date 25/07/2012
*
*/
public class TeamSelectionActivity extends BaseActivity{
@Override
public void initComponents() {
// TODO Auto-generated method stub
}
}
<file_sep>package com.laaficionmanda.android.ui;
/**
*
* TeamScheduleActivity
* @author <NAME>
* @date 25/07/2012
*
*/
public class TeamScheduleActivity extends BaseActivity{
@Override
public void initComponents() {
// TODO Auto-generated method stub
}
}
<file_sep>package com.laaficionmanda.android.service;
public interface LoginService {
}
| 1e303c5b330e2f1977ded4a2eb45af3534e645e8 | [
"Markdown",
"Java"
] | 9 | Java | Parkursoft/Prueba | dd2e17c4654988f4dc41b0c04d5324f80a4c0e8d | e011e8ccffd3f9d77aec0fb57f37d35899810c3f |
refs/heads/master | <file_sep>import axios from 'axios';
import { getLocalStorage } from '@troyexu/yl-fn';
/** @format */
const obj = {
"zh-hans": "cn",
"zh-hant": "tw"
};
/** @format */
let apiurl = '';
let i18n = null;
const initFn = function (i18n, baseUrl) {
apiurl = baseUrl;
};
const service = axios.create({
baseURL: apiurl,
timeout: 5000
}); // request interceptor
service.interceptors.request.use(config => {
config.headers['Authorization'] = `Bearer ${getLocalStorage('token')}`;
return config;
}, error => {
// Do something with request error
Promise.reject(error);
});
service.interceptors.request.use(config => {
let addOption = {
lang: obj[getLocalStorage('lang')],
device: 'mobile'
};
config.data = Object.assign(config.data || {}, addOption);
return config;
}, error => {
// Do something with request error
Promise.reject(error);
}); // response interceptor
service.interceptors.response.use(async response => {
let resData = response.data;
switch (resData.code) {
case 1012:
resData.codeMsg = resData.data[0];
break;
case 2026:
resData.codeMsg = resData.message;
break;
case 2009:
case 2034:
resData.codeMsg = i18n.tc('errorMsg.__2034') + parseInt(resData.data.customer_dama);
break;
case 1010:
resData.codeMsg = resData.message;
window.location = '/#/login';
break;
default:
resData.codeMsg = i18n.tc('errorMsg.__' + resData.code) || i18n.tc('errorMsg.__2009');
break;
}
return response;
}, async error => {
let errData;
errData = error.response;
errData.code = error.response.status;
errData.codeMsg = i18n.tc('errorCode.__' + errData.status) || i18n.tc('errorCode.__1000');
return Promise.reject(errData);
});
export { initFn, service };
<file_sep>/** @format */
import Vue from "vue"
import VueI18n from "vue-i18n"
import messages from "@src/i18n"
Vue.use(VueI18n)
const numberFormats = {
"zh-hans": {
// 一般數字
currency: {
style: "currency", //要使用的格式樣式,使用貨幣格式
currency: "CNY", //貨幣格式化中使用的貨幣符號.
useGrouping: true, //分隔符號,
currencyDisplay: "symbol", //前面象徵符號
minimumFractionDigits: 0 //小數幾位
},
// 有中文字
moneyCurrency: {
// style: "currency", //要使用的格式樣式,使用貨幣格式
// currency: "CNY", //貨幣格式化中使用的貨幣符號.
// currencyDisplay: "symbol", //前面象徵符號
minimumFractionDigits: 0, //小數幾位
notation: "compact"
}
},
"en-us": {
currency: {
style: "currency", //要使用的格式樣式,使用貨幣格式
currency: "USD", //貨幣格式化中使用的貨幣符號.
useGrouping: true, //分隔符號,
currencyDisplay: "symbol", //前面象徵符號
minimumFractionDigits: 0 //小數幾位
}
},
"zh-hant": {
currency: {
style: "currency", //要使用的格式樣式,使用貨幣格式
currency: "TWD", //貨幣格式化中使用的貨幣符號.
useGrouping: true, //分隔符號,
currencyDisplay: "symbol", //前面象徵符號
minimumFractionDigits: 0 //小數幾位
}
}
}
const i18n = new VueI18n({
locale: "zh-hans",
fallbackLocale: "en-us", //如果該翻譯沒詞彙預設英文
messages,
numberFormats
})
export default ({ app }) => {
// Set i18n instance on app
app.i18n = i18n
}
export { i18n }
<file_sep>/** @format */
import Vue from "vue"
import { extend, ValidationProvider, ValidationObserver } from "vee-validate"
import { i18n } from "src/boot/i18n.js"
import { required, between, integer } from "vee-validate/dist/rules"
extend("required", {
...required,
message: i18n.tc("form.__required")
})
extend("integer", {
...integer,
message: i18n.tc("form.__integer")
})
extend("between", {
...between,
message: (field, args) => {
let min = args.min
let max = args.max
return i18n.t("form.__between", { max, min })
}
})
extend("account", {
validate(value) {
const regex = /^(?=.*\d)(?=.*[A-Za-z])[a-zA-Z\d]{4,20}$/
return regex.test(value)
},
//需要英文+数字组成4〜20码
message: i18n.tc("form.__accountRegex")
})
//只能輸入中文或英文
extend("cnEn", {
validate(value) {
let regex = /[\u4E00-\u9FA5A-Za-z]/g
return regex.test(value)
},
message: () => {
return i18n.tc("form.__cnEn")
}
})
extend("min", {
validate(value, { length }) {
// value 當前輸入值
// length 傳入驗證數值
return value.length >= length
},
params: ["length"], //配合messag-key
message: (field, args) => {
// filed當前輸入欄位
// arg 傳入驗證數值
let num = args.length
return i18n.tc("form.__minLength", num, { num })
}
})
extend("max", {
validate(value, { length }) {
return value.length <= length
},
params: ["length"],
message: (field, args) => {
let num = args.length
return i18n.tc("form.__maxLength", num, { num })
}
})
extend("email", {
validate(value) {
let regex = /[^@ \t\r\n]+@[^@ \t\r\n]+\.[^@ \t\r\n]+/
return regex.test(value)
},
message: i18n.tc("form.__emailTip")
})
// 英文數字
extend("enNum", {
validate(value) {
let regex = /^(?![0-9]+$)(?![a-zA-Z]+$)[0-9A-Za-z]/
return regex.test(value)
},
message: i18n.tc("form.__enAddNum")
})
// 空白
extend("space", {
validate(value) {
let regex = /^[^\s]*$/
return regex.test(value)
},
message: i18n.tc("form.__notEmptySpace")
})
//必須數字
extend("numeric", {
validate(value) {
let regex = /^[0-9]*$/
return regex.test(value)
},
message: i18n.tc("form.__numeric")
})
// 需要兩個不同
extend("twiceDiff", {
validate(value, field) {
return value !== field.diffValue
},
params: ["diffName", "diffValue"],
message: (field, args) => {
let diffField = i18n.tc(`form.__${args.diffName}.word`)
let fieldName = i18n.tc(`form.__${field}.word`)
return fieldName + i18n.tc("form.__twiceDiff", diffField, { diffField })
}
})
// 需要兩個相同
extend("twiceSame", {
validate(value, field) {
return value == field.diffValue
},
params: ["diffName", "diffValue"],
message: (field, args) => {
let diffField = i18n.tc(`form.__${args.diffName}.word`)
let fieldName = i18n.tc(`form.__${field}.word`)
return fieldName + i18n.tc("form.__twiceSame", diffField, { diffField })
}
})
Vue.component("ValidationProvider", ValidationProvider)
Vue.component("ValidationObserver", ValidationObserver)
<file_sep>import Vue from 'vue'
import Dev from './serve.vue'
import router from './router/index.js'
Vue.config.productionTip = false
import { axios } from '../src/index'
console.log(111, axios(11, 23))
new Vue({
render: (h) => h(Dev),
router,
}).$mount('#app')
<file_sep>/** @format */
const obj = {
"zh-hans": "cn",
"zh-hant": "tw",
}
export default obj
<file_sep>'use strict';Object.defineProperty(exports,'__esModule',{value:true});var axios=require('axios'),ylFn=require('@troyexu/yl-fn');function _interopDefaultLegacy(e){return e&&typeof e==='object'&&'default'in e?e:{'default':e}}var axios__default=/*#__PURE__*/_interopDefaultLegacy(axios);function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) {
try {
var info = gen[key](arg);
var value = info.value;
} catch (error) {
reject(error);
return;
}
if (info.done) {
resolve(value);
} else {
Promise.resolve(value).then(_next, _throw);
}
}
function _asyncToGenerator(fn) {
return function () {
var self = this,
args = arguments;
return new Promise(function (resolve, reject) {
var gen = fn.apply(self, args);
function _next(value) {
asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value);
}
function _throw(err) {
asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err);
}
_next(undefined);
});
};
}/** @format */
var obj = {
"zh-hans": "cn",
"zh-hant": "tw"
};var apiurl = '';
var i18n = null;
var initFn = function initFn(i18n, baseUrl) {
apiurl = baseUrl;
};
var service = axios__default['default'].create({
baseURL: apiurl,
timeout: 5000
}); // request interceptor
service.interceptors.request.use(function (config) {
config.headers['Authorization'] = "Bearer ".concat(ylFn.getLocalStorage('token'));
return config;
}, function (error) {
// Do something with request error
Promise.reject(error);
});
service.interceptors.request.use(function (config) {
var addOption = {
lang: obj[ylFn.getLocalStorage('lang')],
device: 'mobile'
};
config.data = Object.assign(config.data || {}, addOption);
return config;
}, function (error) {
// Do something with request error
Promise.reject(error);
}); // response interceptor
service.interceptors.response.use( /*#__PURE__*/function () {
var _ref = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee(response) {
var resData;
return regeneratorRuntime.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
resData = response.data;
_context.t0 = resData.code;
_context.next = _context.t0 === 1012 ? 4 : _context.t0 === 2026 ? 6 : _context.t0 === 2009 ? 8 : _context.t0 === 2034 ? 8 : _context.t0 === 1010 ? 10 : 13;
break;
case 4:
resData.codeMsg = resData.data[0];
return _context.abrupt("break", 15);
case 6:
resData.codeMsg = resData.message;
return _context.abrupt("break", 15);
case 8:
resData.codeMsg = i18n.tc('errorMsg.__2034') + parseInt(resData.data.customer_dama);
return _context.abrupt("break", 15);
case 10:
resData.codeMsg = resData.message;
window.location = '/#/login';
return _context.abrupt("break", 15);
case 13:
resData.codeMsg = i18n.tc('errorMsg.__' + resData.code) || i18n.tc('errorMsg.__2009');
return _context.abrupt("break", 15);
case 15:
return _context.abrupt("return", response);
case 16:
case "end":
return _context.stop();
}
}
}, _callee);
}));
return function (_x) {
return _ref.apply(this, arguments);
};
}(), /*#__PURE__*/function () {
var _ref2 = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee2(error) {
var errData;
return regeneratorRuntime.wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
errData = error.response;
errData.code = error.response.status;
errData.codeMsg = i18n.tc('errorCode.__' + errData.status) || i18n.tc('errorCode.__1000');
return _context2.abrupt("return", Promise.reject(errData));
case 4:
case "end":
return _context2.stop();
}
}
}, _callee2);
}));
return function (_x2) {
return _ref2.apply(this, arguments);
};
}());exports.initFn=initFn;exports.service=service;<file_sep>Cli-build lib
test
ls
<file_sep>// import i18n from './i18n.js'
// import sweetalert from './sweetalert.js'
// import validate from './validate.js'
import axios from './axios.js'
// export { i18n, sweetalert, validate, axios }
export { axios }
| c6b2ed853957ec38c06af2b14b74100e269f3537 | [
"JavaScript",
"Markdown"
] | 8 | JavaScript | TroyeXu/troyemixin | 10a613bd8ef9be354672f26773c12cc046093ad1 | 3930b87141615a5cfc1f47116e6a9adce6280a86 |
refs/heads/master | <repo_name>anastasiossouris/PDES-pspp<file_sep>/concurrent/cache_aligned_allocator.hpp
#ifndef CACHE_ALIGNED_ALLOCATOR_HPP_
#define CACHE_ALIGNED_ALLOCATOR_HPP_
#include <tbb/cache_aligned_allocator.h>
namespace concurrent{
/**
* An allocator that allocates memory on cache line boundaries for the purpose of avoid false-sharing. It can be used as a direct
* replacement of std::allocator<T>.
*
* Currently, this is a wrapper for the cache_aligned_allocator provided by TBB, so refer to http://www.threadingbuildingblocks.org/docs/help/reference/memory_allocation/cache_aligned_allocator_cls.htm
* for more details.
*/
template<typename T>
using cache_aligned_allocator = tbb::cache_aligned_allocator<T>;
} // namespace concurrent
#endif /* CACHE_ALIGNED_ALLOCATOR_HPP_ */
<file_sep>/context.cpp
#include <cassert>
#include "context.hpp"
namespace pdes{
void context::spawn(task* simtask, context::time_type t, context::size_type prio){
assert(simtask != nullptr);
assert(t >= now());
using handle_type = event_pool_handle;
// we must set the timestamp and the priority of the task before we insert them in the fel
// so that they get insterted correctly
simtask->set_timestamp(t);
simtask->set_priority(prio);
handle_type pool_handle = fel.push(simtask);
simtask->set_context(this);
simtask->set_pool_handle(pool_handle);
}
void context::spawn_delayed(task* simtask, context::time_type delay, context::size_type prio){
assert(simtask != nullptr);
using handle_type = event_pool_handle;
// we must set the timestamp and the priority of the task before we insert them in the fel
// so that they get insterted correctly
simtask->set_timestamp(now() + delay);
simtask->set_priority(prio);
handle_type pool_handle = fel.push(simtask);
simtask->set_context(this);
simtask->set_pool_handle(pool_handle);
}
void context::spawn_sleeping(task* simtask){
assert(simtask != nullptr);
// it is enough to record ourselves as the context for the task
simtask->set_context(this);
}
void context::send_message(task* simtask, context::time_type t, context::size_type prio){
assert(simtask != nullptr);
// insert the task in the messages fel
std::unique_lock<std::mutex> lock{messages_lock};
using handle_type = event_pool_handle;
// we must set the timestamp and the priority of the task before we insert them in the messages fel
// so that they get insterted correctly
simtask->set_timestamp(t);
simtask->set_priority(prio);
handle_type pool_handle = messages_fel.push(simtask);
simtask->set_context(this);
simtask->set_pool_handle(pool_handle);
lock.unlock();
lock.release();
}
void context::run_top(){
assert(top_ready());
task* simtask = fel.top();
assert(simtask != nullptr);
assert(simtask->get_timestamp() >= now());
current_time = simtask->get_timestamp();
simtask->run();
}
void context::execute_until(time_type endtime){
// drain messages
drain_messages();
// now execute from the local fel
while (is_top_task_le_threshold(endtime)){
run_top();
}
}
void context::drain_messages(){
std::lock_guard<std::mutex> lk{messages_lock};
// merge the messages into the main fel
fel.merge(messages_fel);
}
} // namespace pdes
<file_sep>/concurrent/affinity.hpp
#ifndef __AFFINITY_HPP_IS_INCLUDED__
#define __AFFINITY_HPP_IS_INCLUDED__ 1
#include <stdexcept>
#define _GNU_SOURCE
#include <unistd.h>
#include <pthread.h>
namespace concurrent{
struct affinity{
/**
* Set's the affinity of the current thread to the passed core.
*
* \param core The core to which to set the affinity of the current thread
* \throw runtime_error If the affinity cannot be set
*/
void operator()(int core) const{
(*this)(core, pthread_self());
}
/**
* Set's the affinity of the thread with the given id to the passed core.
*
* \param core The core to which to set the affinity of the current thread
* \param id The identifier of the thread.
* \throw runtime_error If the affinity cannot be set
*/
void operator()(int core, pthread_t id){
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(core, &cpuset);
if (pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset)){
throw std::runtime_error("call to pthread_setaffinity_np() failed");
}
}
};
} // namespace concurrent
#endif
<file_sep>/engine.hpp
#ifndef CONTEXT_HPP_
#define CONTEXT_HPP_
#include <cassert>
#include <algorithm>
#include <vector>
#include <memory>
#include <iterator>
#include <functional>
#include <exception>
#include <stdexcept>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <atomic>
#include "concurrent/simple_barrier.hpp"
#include "concurrent/tournament_tree.hpp"
#include "concurrent/termination_detection_barrier.hpp"
#include "concurrent/cache_aligned_allocator.hpp"
#include "concurrent/affinity.hpp"
#include "boost/intrusive/list.hpp"
#include "context.hpp"
#include "traits.hpp"
#include "parameters.hpp"
#include "util/xorshift.hpp"
namespace pdes{
class engine{
public:
using time_type = traits::time_type;
using size_type = traits::size_type;
private:
using barrier_type = concurrent::simple_barrier<size_type>;
using termination_detection_type = concurrent::termination_detection_barrier<size_type>;
using tournament_tree_type = concurrent::tournament_tree<time_type>;
public:
/**
* Class engine has only one instance and following the Singleton Pattern this method provides access to that single instance.
* Note that this method is not thread-safe.
*
* \return Pointer to the singleton instance of class engine.
* \throw bad_alloc If the singleton instance cannot be allocated the first time.
*/
static engine* get_instance(){
if (!_instance){
_instance = new engine{};
}
return _instance;
}
// non-copyable and non-movable
engine(const engine&) = delete;
engine& operator=(const engine&) = delete;
engine(engine&&) = delete;
engine& operator=(engine&&) = delete;
void init(parameters& params){
// First we must initialize the data for the worker threads
lookahead = params.get_lookahead();
endtime = params.get_endtime();
num_threads = params.get_num_threads();
std::vector<size_type> affs = params.get_thread_aff();
NO_STEAL_REQUEST = num_threads;
phases_meet_point.reset(new concurrent::sense_barrier<size_type>{num_threads + 1});
tournament_tree.reset(new concurrent::tournament_tree<time_type>{num_threads});
td.reset(new concurrent::termination_detection_barrier<size_type>{num_threads});
// Initialize the steal-free list for each worker thread
steal_free_list.resize(num_threads);
for (auto& vec_ctx_l : steal_free_list){
vec_ctx_l.resize(num_threads);
}
// Initialize the deceased, steal_request and steal_response arrays
std::vector<std::atomic<bool>, concurrent::cache_aligned_allocator<std::atomic<bool> > > __deceased(num_threads);
std::vector<std::atomic<size_type>, concurrent::cache_aligned_allocator<std::atomic<size_type> > > __steal_request(num_threads);
std::vector<std::atomic<context_list_type*>, concurrent::cache_aligned_allocator<std::atomic<context_list_type*> > > __steal_response(num_threads);
using std::swap;
assert(deceased.size() == 0);
assert(steal_request.size() == 0);
assert(steal_response.size() == 0);
swap(deceased, __deceased);
swap(steal_request, __steal_request);
swap(steal_response, __steal_response);
for (auto& x : deceased){
x.store(false);
}
for (auto& x : steal_request){
x.store(NO_STEAL_REQUEST);
}
for (auto& x : steal_response){
x.store(&NO_STEAL_RESPONSE); // this is no needed but...
}
// initialize the exceptions
exceptions.resize(num_threads);
// we initialize start_simulation flag also here
start_simulation.store(false, std::memory_order_seq_cst);
// initialize the phases data. these must be done while holding the lock for memory visibility effects
// also note that since all threads will acquire the init_phase_lock they will see the changes we make above
{
std::unique_lock<std::mutex> lk{init_phase_lock};
init_phase_done = false;
}
{
std::unique_lock<std::mutex> lk{context_distribution_phase_lock};
contexts_distributed_done = false;
}
{
std::unique_lock<std::mutex> lk{cleanup_phase_lock};
cleanup_phase_begin = false;
}
// create the threads
concurrent::affinity aff_setter;
threads.reserve(num_threads);
for (size_type i = 0; i < num_threads; ++i){
size_type id = i;
size_type core = aff[i];
std::thread t = std::thread(&thread_pool::worker_thread,
this,
id
);
threads.push_back(std::move(t));
// set the affinity for this created thread
try{
aff_setter(core, t.native_handle());
}
catch(...){
// tell alive threads to terminate and then wait for them
{
std::unique_lock<std::mutex> lk{init_phase_lock};
init_phase_done = true;
init_phase_ok = false;
}
init_phase_done_condition.notify_all();
for (size_type j = 0; j < i; ++j){
threads[j].join();
}
throw ;
}
}
// inform the threads that initialization went fine
{
std::unique_lock<std::mutex> lk{init_phase_lock};
init_phase_done = true;
init_phase_ok = true;
}
init_phase_done_condition.notify_all();
}
/**
* Distribute the simulation contexts. The engine expects pointers to the contexts
* so the iterators must be iterators to pointers to contexts.
*
* This operation can be called only once before the start() method.
*
* The container from which the iterators are passed here can and should be freed by the client code
* after this method returns.
*/
template<class InputIt>
void distribute_contexts(InputIt begin_context, InputIt end_context){
using iterator_type = InputIt;
// how many contexts do we have?
size_type ncontexts = std::distance(begin_context, end_context);
// roughly how many contexts each worker thread will get
size_type range = ncontexts/num_threads;
// distribute the contexts to the threads
for (size_type i = 0; i < num_threads; ++i){
// find the range of contexts for the i-th worker thread
iterator_type first = begin_context;
std::advance(first, i*range);
iterator_type last = end_context;
if (i != num_threads - 1){
last = begin_context;
std::advance(last, (i+1)*range);
}
// give the contexts to the i-th worker thread
for (iterator_type context = first; context < last; ++context){
local_contexts_all[i].push_front(*context);
}
}
// give the signal that the contexts have been distributed
{
// also note that this lock ensures that the worker threads will see correctly the contexts at start
std::unique_lock<std::mutex> lk{context_distribution_phase_lock};
contexts_distributed_done = true;
}
context_distribution_phase_start_condition.notify_all();
// wait for the threads to collect their contexts
phases_meet_point->await(num_threads);
// we no longer need the local_contexts_all vector so we clean it up
for (size_type i = 0; i < num_threads; ++i){
local_contexts_all[i].clear();
}
local_contexts_all.clear();
}
void start(){
start_simulation.store(true, std::memory_order_seq_cst);
}
std::vector<std::exception_ptr> await(){
// wait for the worker threads to finish the simulation
phases_meet_point->await(num_threads); // we get the last id
return exceptions;
}
void cleanup(){
{
std::unique_lock<std::mutex> lk{cleanup_phase_lock};
cleanup_phase_begin = true;
}
cleanup_phase_start_condition.notify_all();
// wait for every worker thread to finish their cleanup
phases_meet_point->await(num_threads);
// now wait for the threads instances to finish
std::for_each(threads.begin(), threads.end(), std::mem_fn(&std::thread::join));
// we can now deallocate the internal data
phases_meet_point.reset();
tournament_tree.reset();
td.reset();
deceased.clear();
steal_request.clear();
steal_response.clear();
for (size_type i = 0; i < num_threads; ++i){
for (size_type j = 0; j < num_threads; ++j){
streal_free_list[i][j].clear();
}
steal_free_list[i].clear();
}
steal_free_list.clear();
exceptions.clear();
}
private:
//! Private constructor because engine is a singleton class
engine(){}
// the singleton instance of class engine
static engine* _instance;
time_type lookahead; //! Simulation's lookahead parameter
time_type endtime; //! Simulation's endtime parameter
size_type num_threads; //! Number of worker threads
std::vector<std::thread> threads; //! Worker threads
/**
* During the initialization the thread calling init() creates the worker threads and sets the affinities for each one.
* Because something may go wrong, like a thread could not be created or the affinity could not be set for one of the threads,
* we could have a situation where some threads have been created and some not and the main thread needs to exit. In that case we need to
* notify the alive threads to stop. To do that we follow the following simple protocol:
* Each thread that starts waits until the initialization phase ends using a condition variable init_phase_done_condition.
* The main thread executing init() in case of success (all threads have been started and the affinities have been set successfully)
* sets to the variable init_phase_ok the value true and then signals the init_phase_done_condition condition variable. If something goes
* wrong then it sets false to init_phase_ok. In this way, after the alive threads get notified from the init_phase_done_condition they check
* the value of init_phase_ok to know whether they can proceed or not. To guard the thread from spurious returns from the init_phase_done_condition
* condition variable await() method we use a init_phase_done that is initialized to false and is set to true before the
* condition is signaled by the main thread running init(). Since each condition variable must be accompanied by a lock we use a
* mutex variable called init_phase_lock.
*/
bool init_phase_ok;
bool init_phase_done;
std::mutex init_phase_lock;
std::condition_variable init_phase_done_condition;
/**
* After the init phase has been successfully completed, the worker threads wait for the contexts to be distributed and to collect them.
* We use condition variables for this as well. context_distribution_phase_start_condition is a condition variable that signals when
* then worker threads must collect the contexts. To guard from spurious wakeups we use a boolean flag contexts_distributed_done that
* is set to true by the thread running init() before signaling the condition variable, and we use a context_distribution_phase_lock
* as a lock for the condition variable.
*/
bool contexts_distributed_done;
std::mutex context_distribution_phase_lock;
std::condition_variable context_distribution_phase_start_condition;
/**
* After the contexts have been distributed the worker threads must wait for the start() signal. We use a atomic flag for this to avoid
* the waiting period introduced by condition variables because we want all worker threads to start at about the same time.
*/
std::atomic<bool> start_simulation;
/**
* After the simulation has ended the worker threads must wait for the cleanup() signal. We use again condition variables here.
* cleanup_phase_start_condition is the condition variable. cleanup_phase_begin is a flag to guard from spurious wakeups and
* cleanup_phase_lock is the lock associated with the condition variable.
*/
bool cleanup_phase_begin;
std::mutex cleanup_phase_lock;
std::condition_variable cleanup_phase_start_condition;
/**
* After the threads have collected the contexts they must notify the main thread (that called distributed_contexts()). We use a barrier
* for this. That is, after the main thread has signaled the contexts_distribution_phase_start_condition, it then waits on the barrier.
* When the worker threads collect their contexts they meet at the barrier. We call this barrier phases_meet_point.
*
* We use again this barrier when the main thread needs to wait for the simulation to end in the await() method. After each thread determines
* simulation's termination it meets at phases_meet_point barrier and then proceeds on to the cleanup phase. It is also used when the cleanup
* phase is over.
*/
std::unique_ptr<barrier_type> phases_meet_point;
/**
* This is the tournament tree for the implementation of a global min-reduction operation.
* It is used in compute_threshold() function by the worker threads in order to compute the threshold
* for the next round after they have computed their local thresholds.
*/
std::unique_ptr<tournament_tree_type> tournament_tree;
/**
* For the worker threads to know when a round is terminated we use a termination detection mechanism.
*
* When a thread has work to do it says so by calling set_active() on the termination detection object.
* When a thread doesn't have work to do it says so by calling set_inactive() on the termination detection object.
* A method td.terminate() returns true if no threads has any more work to do and thus the work-stealing computation
* for the round terminates.
*/
std::unique_ptr<termination_detection_type> td;
/**
* This part regards how do the worker threads compute the threshold for the next round.
* Consider we are at round i. Each worker during the i-th round keeps track of two variables: context_threshold and event_threshold
*
* How is context_threshold computed:
* Say that at round i the threshold is threshold(i). Worker thread WTk will execute all some contexts conservatively up to time
* threshold(i). For each such context let next_event be the first event with timestamp later than theshold(i) (this is where the worker
* thread stops executing this context). context_threshold holds the minimum of the timestamps of those next_event events for all contexts.
*
* How is event_threshold computed:
* Say that a worker thread WTk executes a context and that context generates an event for another context using the delayed() methods.
* event_threshold is the minimum enactment time of the messages generated by contexts executed by WTk.
*
* At the end of round i, WTk computes its threshold as min{context_threshold, event_threshold} + lookahead and uses this value
* for the global min reduction operation.
*
* Since these variables are local to each thread, we declare them thread_local.
*/
thread_local static time_type context_threshold;
thread_local static time_type event_threshold;
/**
* Each worker thread has local a set of simulation contexts to work with named local_contexts.
* Since for the work-stealing implementation we will need to perform delete and splice operations we do not use a vector
* but a list. Each simulation context that a worker thread executes, is placed in a second list denoted committed_contexts.
* To preserve some memory we do not use a list storing pointers to the simulation contexts, but instead we use a intrusive list.
* The intrusive list has added locality benefits as well.
*/
using context_list_type = boost::intrusive::make_list<context, boost::intrusive::constant_time_size<false>,
boost::intrusive::base_hook<context_list_base_hook> >::type;
using context_list_iterator_type = typename context_list_type::iterator;
/**
* Since each local_contexts and committed_contexts are local to each worker thread we use thread_local
* storage for them.
*/
thread_local static context_list_type local_contexts;
thread_local static context_list_type committed_contexts;
/**
* This is used to distribute the contexts. At initialization we place the contexts for worker thread id in local_contexts_all[id].
* Then, worker thread id will drain them into its local contexts set.
*/
std::vector<context_list_type> local_contexts_all;
/**
* Work-Stealing Implementation details:
*
* We use the receiver-initiated algorithm as described in 'Scheduling Parallel Programs by Work-Stealing
* with Private Deques'. For this we use for each worker thread Wti the following registers:
* + deceased: A SWMR boolean register indicating whether Wti is deceased or
* not.
* + steal_request: An atomic register that can be accessed using the
* compare&swap() primitive. It is used by the thief worker threads when they
* choose Wti as their victim. In particular, when steal_request i is NO_STEAL_REQUEST then no
* worker thread has made a steal request to Wti. If another worker thread Wtj
* wants to make a steal request to Wti then it will try to atomically write its
* identity j to steal_request i if it is still NO_STEAL_REQUEST using the compare&swap() primitive.
* + steal_response: A MWSR that can be read only by Wti and written by any
* other worker thread. Its purpose is for the victim worker thread to pass the simulation contexts
* to Wti when Wti has successfully made a steal request to the
* victim.Before making a steal request, Wti makes steal_response i
* NO_STEAL_RESPONSE and then makes a steal request to Wtj (until successful). Then it local
* spins to steal_response i until Wtj passes the response (which could either a null pointer or a pointer to some contexts).
*
* Since those must be visible to all worker threads they are not thread local and we use plain vectors
* to store them.
*
* Also, note that in order to avoid false-sharing we use a cache-alignment for these variables. This is critical since they are
* read frequently by the thief/victim threads.
*/
std::vector<std::atomic<bool>, concurrent::cache_aligned_allocator<std::atomic<bool> > > deceased;
std::vector<std::atomic<size_type>, concurrent::cache_aligned_allocator<std::atomic<size_type> > > steal_request;
std::vector<std::atomic<context_list_type*>, concurrent::cache_aligned_allocator<std::atomic<context_list_type*> > > steal_response;
// we pass a list so as *not* to restrict
// the number of contexts that can be stolen
// e.g. for the steal-half approach we will pass
// a list with many contexts.
// Note that it is important here to have a pointer
// to a context list to have a 'guarantee' that
// the variable will be lock-free
/**
* A sentinel node that is used by worker threads as a steal_response holder until they get a response.
*/
context_list_type NO_STEAL_RESPONSE;
/**
* A value to distinguish whether there is a steal-request by some worker thread in some steal_request[] atomic variable.
* Since worker threads have identifiers in the range [0,num_threads) then we can use num_threads as an 'invalid' thread id.
* Note that -1 is not *conceptually* right since size_type most likely is a unsigned integer type.
*/
size_type NO_STEAL_REQUEST;
/**
* When a thread worker steals from another it gets a context list from the victim's steal_response variable.
* In this case to avoid the need for the victim to allocate a new context list to pass to the thief (which
* later the thief would have to deallocate) we use the following malloc-free mechanism:
*
* Each worker thread has a pre-allocated context list for each other worker-thread. When a thief i steals
* from victim j, victim j will pass to thief i the context list from the pre-allocated pool. Also, to completely
* avoid mallocs, the victim will use a splice operation from its local context list to the pre-allocated context list
* to send to the thief.
*
* In more detail, steal_free_list[victim][thief] is the list used by the victim to pass contexts to the thief worker thread.
*
* In place those in cache-line boundaries as well, because we do not want steal-handoffs from 2 threads (thief/victim) to get in the way
* of other unrelated threads due to false-sharing.
*/
std::vector<std::vector<context_list_type, concurrent::cache_aligned_allocator<context_list_type> >,
concurrent::cache_aligned_allocator<std::vector<context_list_type> > > steal_free_list;
/**
* To steal each thread must use a random number generator. Each one is local to a thread and thus declared thread local.
*/
thread_local static std::random_device rd;
thread_local static util::xorshift gen;
thread_local static std::uniform_int_distribution<> dis;
/**
* This is used by the worker threads to notify us of exceptions.
*/
std::vector<std::exception_ptr> exceptions;
/**
* This is the task run by each worker thread in the simulation.
*/
void worker_thread(
size_type id /** the id of the 'this' worker thread */
){
// First initialize the random number generators
using result_type = std::random_device::result_type;
result_type random_seed{};
while (!(random_seed = rd())){} // zero seeds do not work well with xorshift
gen.seed(random_seed);
using param_type = std::uniform_int_distribution<>::param_type;
param_type range{0, num_threads-1};
dis.param(range);
// Wait for the initialization phase to end and determine whether to proceed to the contexts distribution phase or not
bool proceed{false};
{
std::unique_lock<std::mutex> lk{init_phase_lock};
while (!init_phase_done){
init_phase_done_condition.wait(lk);
}
proceed = init_phase_ok;
}
if (!proceed){ return; }
// Wait for the contexts distribution phase to begin
{
std::unique_lock<std::mutex> lk{context_distribution_phase_lock};
while (!contexts_distributed_done){
context_distribution_phase_start_condition.wait(lk);
}
}
// We can now get our starting contexts. Since at each round we swap the committed contexts with the local contexts
// and start with the local contexts, we initially place our contexts in the committed_contexts list.
committed_contexts.splice(committed_contexts.begin(), local_contexts_all[id]);
// Meet at the barrier to notify that context distribution phase is over
phases_meet_point->await(id);
// Wait for the signal to start the simulation
while (!start_simulation.load(std::memory_order_seq_cst)){}
// Execute the simulation
// initially we are bounded by the lookahead so we let event_threshold be endtime
// and context_threshold be zero.
context_threshold = 0;
event_threshold = endtime;
run_simulation(id);
// Notify the main thread that the simulation has been terminated
phases_meet_point->await(id);
// Wait for the cleanup phase to begin
{
std::unique_lock<std::mutex> lk{cleanup_phase_lock};
while (!cleanup_phase_begin){
cleanup_phase_start_condition.wait(lk);
}
}
// Perform local cleanup here
local_cleanup();
// Notify the main thread that the cleanup phase is over
phases_meet_point->await(id);
}
/**
* This is the top level function that implements the simulation for a worker thread with identifier id.
*/
void run_simulation(
size_type id /** worker thread's identifier */
){
bool no_exceptions = true; // needed to handle exceptions case
// Until the simulation's end time
while (true){
// Compute theshold for the current round
// If an exception was raised then we need to inform all worker threads. One simple thing we can do for that is
// to use 0 as our value in the tournament tree. 0 can be used since we know that it cannot be used otherwise
// (due to + lookahead and the fact that lookahead is nonzero)
time_type threshold = no_exceptions ? compute_threshold(id) : 0;
// Stop simulation if we reached the simulation's end time or got an exception
if (threshold >= endtime || threshold == 0){ break; }
// In this round we will compute context_threshold and event_threshold using minimum operations
// and so we do not want to be restricted by the previous values of those variables. We initialize them
// to the maximum possible value which is endtime.
context_threshold = endtime;
event_threshold = endtime;
// Start executing the contexts using work-stealing
no_exceptions = run_work_stealing_scheduler(id, threshold);
}
}
/**
* Each worker thread calls run_work_stealing_scheduler() after they have computed the threshold time,
* to execute all the contexts.
*
* The idea behind the algorithm is that the worker thread begins by executing the contexts from its own local_contexts set
* and responds to steal-requests in the meantime. When its local_contexts set gets empty it becomes a thief and tries to steal
* some contexts from a victim worker thread. When it finally receives some contexts as a response from a victim worker thread,
* it adds them to its local_contexts set and starts over again.
*
* In the meantime, it uses the termination detection object td to know if the work-stealing computation for the round has ended
* or not.
*
* This method returns true if everything worked fine and false if an exception was raised.
*/
bool run_work_stealing_scheduler(
size_type id, /** worker thread's id */
time_type threshold /** the threshold for the current round */
){
// We start with the contexts we executed in the previous round (which are stored in committed_contexts).
// This splice operation is O(1) and does not use memory operations.
local_contexts.splice(local_contexts.begin(), committed_contexts);
// Notify all that we are alive and we have possibly contexts to give (so that others can
// make us steal requests)
report_alive(id);
td->set_active(id);
// Start with the first context from our local_contexts set
context_list_iterator_type ctx = local_contexts.begin();
// While the work-stealing computation for this round is not over
while (true){
// While local_contexts set is not empty
while (ctx != local_contexts.end()){
// add the context to execute in the committed set
// Note that the context must be spliced at the committed_contexts before we call handle_steal_request
// because handle_steal_request will check the same local_contexts set and the first context that we are about to
// execute next must be removed from there first.
//
// Note also that we add on the beginning of the committed_contexts list and not at the end, because we want to benefit
// from cache locality. Notice that the contexts we execute last here in round R will be placed at the beginning of
// committed_contexts, and thus in round R+1 they will be the first to execute from local_contexts (after we splice
// committed_contexts to local_contexts). Thus, there is a high chance that the data for these tasks reside in our cache.
//
// This splice operation is O(1) and does not use memory operations.
committed_contexts.splice(committed_contexts.begin(), local_contexts, local_contexts.begin());
// Check if we have a pending steal request from a thief
// XXX: add a period handling mechanism here
handle_steal_request(id);
// execute the context by our own
context* current_context = *committed_contexts.begin();
try{
current_context->execute_until(threshold);
}
catch(...){
// this is the only place where an exception may be thrown. we must gracefully shut down here
handle_exception(id, std::current_exception());
return false;
}
// keep track of the time of the event exceeding the threshold to update context_threshold
context_threshold = std::min(context_threshold, current_context->top_ready() ? current_context->top_task_timestamp() : endtime);
// retrieve next context
ctx = local_contexts.begin();
}
assert(local_contexts.empty());
// In this point, the worker thread exhausted its local_contexts set and now it becomes a thief.
// Before however it tries to steal from some victim thread, it must:
// (1) Report deceased so that other worker threads do not make futile steal attempts to this thread
// (2) Notify the termination detection mechanism (this enables threads to know when to stop)
// (3) Block steal requests. Even though we report deceased this is also needed because later then this thread
// tries to steal it will not have to continuously check for steal requests that can happen because those threads
// read deceased[id] before report_deceased(id) is called and thus assume that worker thread id is still alive.
report_deceased(id);
td->set_inactive(id);
block_steal_requests(id);
// Attempt steals
while (true){
if (attempt_steal(id)){ break; }
// Check if it is time to end the current round
if (td->terminate()){
// This is needed because at the next round other threads must be able to make steal requests to us
unblock_steal_requests(id);
return true;
}
}
// Since we got contexts now we can give them back if requested and thus re-enable steal attempts
report_alive(id);
unblock_steal_requests(id);
// Retrieve next context
ctx = local_contexts.begin();
assert(!local_contexts.empty());
}
}
/**
* This function is called by a thief worker thread to make a steal-attempt to some victim worker thread.
*
* If the steal-attempt is successful then true is returned; otherwise, false is returned.
*/
bool attempt_steal(
size_type thief /** thief's identifier */
){
// Choose a victim to make a steal-attempt
size_type victim = choose_victim(thief);
assert(0 <= victim && victim < num_threads && victim != thief);
// Make the steal attempt only if the thread is still alive. This is good for performance (we may get rid of some
// unsuccessful compare&swap() operation because the deceased victim thread has called block_steal_requests()) but it
// is also essential for the termination of the run_work_stealing_scheduler() operation at each simulation round. That is
// because then the thief worker thread will not call td->set_active(thief) and the termination detection object will eventually
// report termination (consider the scenario that all threads synchronously call set_active() see that no one is alive then
// some of the threads call set_inactive() but on the td they do not detect termination because some threads are still active and thus
// they try to steal again~ here we must prevent them from stealing again).
if (!deceased[victim].load(std::memory_order_seq_cst)){
// Before we make a steal attempt cancel out any response from a previous steal attempt
steal_response[thief].store(&NO_STEAL_RESPONSE, std::memory_order_seq_cst);
// Before we make a steal-attempt we declare active. Suppose we weren't, and we did after we had a successful steal request.
// Consider the following scenario:
// (1) All are inactive except from the victim worker thread we choose
// (2) The victim worker threads responds and later reports inactive.
// (3) Now td will report termination and all other threads will proceed to the threshold computation
// (4) We now wake up get the response from the victim and we proceed on our own.
//
// This is not bad for correctness and not necessary for the steal-one approach (because we stole only one task) but it is
// necessary for the steal-half approach since we stole many tasks and can benefit from other worker threads being alive
// and taking some of them from us.
td->set_active(thief);
size_type expected = NO_STEAL_REQUEST;
// Attempt a steal to the victim worker thread
if (steal_request[victim].compare_exchange_strong(expected, thief, std::memory_order_seq_cst, std::memory_order_seq_cst)){
// Successful steal attempt so we wait for the victim to respond
bool success = wait_steal_response(thief, victim);
if (success){
context_list_type* stolen_list = steal_response[thief].load(std::memory_order_seq_cst);
local_contexts.splice(local_contexts.begin(), *stolen_list);
return true;
}
}
// A failed steal attempt so we are not active anymore
td->set_inactive(thief);
}
return false;
}
/**
* This function is called by a thief worker thread to wait for a response to its successful steal-request to the
* victim worker thread.
*
* Returns true if we got a response and false otherwise. Note that false is returned in the case we get nullptr as response.
*/
bool wait_steal_response(
size_type thief, /** thief's identifier */
size_type victim /** victim's identifier */
){
context_list_type* response = nullptr;
while ((response = steal_response[thief].load(std::memory_order_seq_cst)) == &NO_STEAL_RESPONSE){
// Check if the victim has deceased in the meantime
if (deceased[victim].load(std::memory_order_seq_cst)){
// We have two cases to consider here:
// (1) If the victim reports deceased and then calls unblock_steal_requests(), it might be the case
// that we read deceased[victim] before it called report_deceased() and thus then attempt a steal, but made
// the steal after the victim has called unblock_steal_requests() and later terminated. In this case, we must
// reset steal_request[victim] for the next round. In this case a atomic write would suffice.
// (2) We read deceased[victim] false and make a succesful steal attempt. Then the victim thread reports
// deceased and calls block_steal_requests(). We read deceased[id] true but now the victim thread will change
// steal_request using a compare&swap operation and we cannot do a atomic write here. Consider the following scenario:
// (a) The victim thread in the block_steal_requests() makes a failed compare&swap() operation because
// we currently have a active steal requets. It responds to us with null.
// (b) The victim thread writes steal_request[victim] = victim so as to block the steal_requests.
// (c) The thief heres writes steal_request[victim] = NO_STEAL_REQUEST. Thus it cancels the block_steal_requests()
// operation done by the victim.
// What we truly want here is to cancel our request only if we are still the active requester so we use a compare&swap().
steal_request[victim].compare_exchange_strong(thief, NO_STEAL_REQUEST,std::memory_order_seq_cst, std::memory_order_seq_cst);
return false;
}
}
return response != nullptr;
}
/**
* A worker thread calls report_alive() to declare that it has work to do.
*/
void report_alive(
size_type id /** worker thread's identifier */
){
deceased[id].store(false, std::memory_order_seq_cst);
}
/**
* A worker thread calls report_deceased() to declare that it hasn't work to do.
*/
void report_deceased(
size_type id /** worker thread's identifier */
){
deceased[id].store(true, std::memory_order_seq_cst);
}
/**
* A worker thread calls block_steal_requests() after it exhausts its local_contexts set and before
* attempting a steal, to inform others that it does not have any contexts to give and block futile steal requests.
*/
void block_steal_requests(
size_type id /** worker thread's identifier */
){
size_type expected = NO_STEAL_REQUEST;
/**
* To block steal requests we store our own id in steal_request[id] cause in this way
* every steal attempt will fail in the compare&swap() operation since steal_request[id] != NO_STEAL_REQUEST.
*
* You must do a compare&swap() however because it may be the case that someone has made us a steal-request.
* In that case, we respond with null.
*/
if (!steal_request[id].compare_exchange_strong(expected, id, std::memory_order_seq_cst, std::memory_order_seq_cst)){
size_type thief = epxected; // steal_request[id].load(std::memory_order_seq_cst);
// note that this load operation is performed by the compare_exchange_strong()
// operation in case of failure.
steal_response[thief].store(nullptr, std::memory_order_seq_cst);
steal_request[id].store(id, std::memory_order_seq_cst);
}
}
/**
* A worker thread calls unblock_steal_requests() to re-enable steal attempts to itself.
*/
void unblock_steal_requests(
size_type id /** worker thread's identifier */
){
steal_request[id].store(NO_STEAL_REQUEST, std::memory_order_seq_cst);
}
/**
* A worker thread calls this method to check if there is a pending steal-request to itself and responds accordingly.
*/
void handle_steal_request(
size_type id /** worker thread's identifier */
){
size_type thief = steal_request[id].load(std::memory_order_seq_cst);
if (thief == NO_STEAL_REQUEST){ return ; }
// Respond to the thief thread
transfer(id, thief);
// Re-enable steal requests to us
unblock_steal_requests(id);
}
/**
* This method is called by a victim worker thread to respond to a steal-request by a thief worker thread.
*/
void transfer(
size_type victim, /** victim's identifier */
size_type thief /** thief's identifier */
){
if (local_contexts.empty()){
steal_response[thief].store(nullptr, std::memory_order_seq_cst);
return ;
}
context_list_type* steal_list = &steal_free_list[victim][thief];
assert(steal_list->empty());
/**
* Regardless of the steal policy used (steal-one or steal-half) we do not pass the contexts from the beginning of the list
* but the contexts from the end. The rationale is that the contexts at the beginning of the local_contexts list with some
* chance will reside in our cache and we want to execute them.
*/
#if defined(USE_STEAL_ONE_APPROACH)
// steal-one approach
context_list_iterator_type last = local_contexts.end();
--last;
steal_list->splice(steal_list->begin(), local_contexts, last);
#elif defined(USE_STEAL_HALF_APPROACH)
// steal-half approach
context_list_iterator_type first = local_contexts.begin();
context_list_iterator_type last = local_contexts.end();
// --last; XXX:: boost documentation http://www.boost.org/doc/libs/1_55_0/doc/html/boost/intrusive/list.html#idp33604904-bb
// says that in the splice operation below both first and last must point to elements contained in the list. That is, as if the
// range to be spliced is [first,last]. However, [first,last) is what implemented.
size_type half = local_contexts.size()/2;
std::advance(first, half);
steal_list->splice(steal_list->begin(), local_contexts, first, last);
#else
#error "steal approach for the transfer() function not defined"
#endif
steal_response[thief].store(steal_list, std::memory_order_seq_cst);
}
/**
* A thief worker thread calls choose_victim() to choose a victim worker thread.
*
* This implementation chooses randomly among the threads except from the thief.
*/
size_type choose_victim(
size_type thief /** worker thread thief seeks for a victim */
) const{
size_type victim;
do{
victim = dis(gen);
} while (victim == thief);
return victim;
}
/**
* This function is called by the worker threads to compute the threshold for the next round.
*/
time_type compute_threshold(
size_type id /** worker-thread's id */
){
// Find our local threshold
time_type local_threshold = std::min(context_threshold, event_threshold) + lookahead;
// Synchronize with the other worker threads to find the global threshold
termination_detection_type* td_ptr = td.get();
return tournament_tree->compete(id, local_threshold,
[td_ptr](){
td_ptr->reset(); // reset the termination detection object for the next round
}
);
}
/**
* This method is called as a callback whenever the send_message() method is called on the context class.
*
* Consider a worker thread WTk that has executed a context. One of the tasks in this context wants to send a message and thus calls the send_message
* method. The send_message() method will call on_send_message() which will be executed by worker thread WTk. So WTk now can obtain the timestamp of the
* message and use it to update its event_threshold. This timestamp is passed here as parameter.
*/
void on_send_message(time_type timestamp){
event_threshold = std::min(event_threshold, timestamp);
}
/**
* This method is called by a worker thread when an exception is caught.
*/
void handle_exception(size_type id, std::exception_ptr eptr){
assert(eptr != nullptr);
// what we can do here that doesn't disrupt the protocol is to stop ourselves and then wait for everybody else to wait the current
// round. We then leave the notification of the exception that occurred to the tournament tree phase.
// make the steps as if we are out of local contexts and any steal attempt fails
report_deceased(id);
td->set_inactive(id);
block_steal_requests(id);
// store the exception we got for the main thread to know about
// we rely on the phases barrier for memory visibility
exceptions[id] = eptr;
}
};
} // namespace pdes
#endif /* CONTEXT_HPP_ */
<file_sep>/parameters.hpp
#ifndef PARAMETERS_HPP_
#define PARAMETERS_HPP_
#include <stdexcept>
#include <vector>
#include "traits.hpp"
namespace pdes{
/** \brief Defines simulation parameters
*
* class parameters can be used to specify the simulation parameters to be used by the simulation engine.
*
* The client has to specify the following parameters:
* - The simulation's lookahead (lookahead parameter)
* - When the simulation ends (endtime parameter)
* - Number of worker threads to use (num_threads parameter)
* - Core affinity for each worker thread.
* - This parameter has a form of a vector where in place i, where i ranges from 0 to num_threads - 1, the core affinity
* of thread i is stored.
*/
class parameters{
public:
using time_type = traits::time_type;
using size_type = traits::size_type;
parameters() = default;
parameters(const parameters&) = default;
parameters(parameters&&) = default;
parameters& operator=(const parameters&) = default;
parameters& operator=(parameters&&) = default;
// forward-declarations of helper setters
class thread_aff_setter;
class num_threads_setter;
class endtime_setter;
class lookahead_setter;
//! A helper object to set the thread affinities for a simulation parameters object
class thread_aff_setter{
public:
friend class num_threads_setter;
/**
* Sets the thread affinities.
*
* \param a Affinity vector for each thread.
* \throw invalid_argument If the size of the vector a doesn't match the number of worker threads.
*/
void set_thread_aff(std::vector<size_type> a){
params->set_thread_aff(a);
}
private:
parameters* params{nullptr};
/**
* Constructs a new thread_aff_setter object in order to set the thread affinities parameter for p.
*
* \param p The parameters object for which to set the thread affinities parameter
*/
thread_aff_setter(parameters* p) : params{p} {}
thread_aff_setter(const thread_aff_setter&) = default;
thread_aff_setter& operator=(const thread_aff_setter&) = default;
thread_aff_setter(thread_aff_setter&&) = default;
thread_aff_setter& operator=(thread_aff_setter&&) = default;
};
//! A helper object to set the number of threads for a simulation parameters object
class num_threads_setter{
public:
/**
* Sets the number of worker threads to use for the simulation.
*
* \param n Number of worker threads.
* \return A thread_aff_setter object to set the thread affinities parameter for the simulation
* \throw invalid_argument If an invalid number for worker threads has been specified.
*/
thread_aff_setter set_num_threads(size_type n){
params->set_num_threads(n);
return thread_aff_setter{params};
}
private:
parameters* params{nullptr};
friend class endtime_setter;
/**
* Constructs a new num_threads_setter object in order to set the number of threads parameter for p.
*
* \param p The parameters object for which to set the number of threads parameter
*/
num_threads_setter(parameters* p) : params{p} {}
num_threads_setter(const num_threads_setter&) = default;
num_threads_setter& operator=(const num_threads_setter&) = default;
num_threads_setter(num_threads_setter&&) = default;
num_threads_setter& operator=(num_threads_setter&&) = default;
};
//! A helper object to set the endtime for a simulation parameters object
class endtime_setter{
public:
/**
* Sets the endtime parameter for the simulation.
*
* \param e Simulation's endtime
* \return A num_threads_setter object to set the number of worker threads parameter for the simulation.
*/
num_threads_setter set_endtime(time_type e){
params->set_endtime(e);
return num_threads_setter{params};
}
private:
parameters* params{nullptr};
friend class lookahead_setter;
/**
* Constructs a new endtime_setter object in order to set the endtime parameter for p.
*
* \param p The parameters object for which to set the endtime parameter
*/
endtime_setter(parameters* p) : params{p} {}
endtime_setter(const endtime_setter&) = default;
endtime_setter& operator=(const endtime_setter&) = default;
endtime_setter(endtime_setter&&) = default;
endtime_setter& operator=(endtime_setter&&) = default;
};
//! A helper object to set the lookahead for a simulation parameters object
class lookahead_setter{
public:
/**
* Sets the lookahead parameter for the simulation parameter object for which this lookahead_setter was obtained.
*
* \param l Simulation's lookahead
* \return A endtime_setter object to set the endtime simulation parameter.
*/
endtime_setter set_lookahead(time_type l){
params->set_lookahead(l);
return endtime_setter{params};
}
private:
parameters* params{nullptr};
friend class parameters;
/**
* Constructs a new lookahead_setter object in order to set the lookahead parameter for p.
*
* \param p The parameters object for which to set the lookahead parameter
*/
lookahead_setter(parameters* p) : params{p} {}
lookahead_setter(const lookahead_setter&) = default;
lookahead_setter& operator=(const lookahead_setter&) = default;
lookahead_setter(lookahead_setter&&) = default;
lookahead_setter& operator=(lookahead_setter&&) = default;
};
/**
* Obtain a lookahead_setter object.
*
* \return A lookahead_setter object to set the lookahead parameter for this parameters object.
*/
lookahead_setter get_lookahead_setter(){
return lookahead_setter{this};
}
// Getters
/**
* Returns the lookahead parameter for the simulation.
*
* \return Simulation's lookahead
*/
time_type get_lookahead() const{ return lookahead; }
/**
* Returns the endtime parameter for the simulation.
*
* \return Simulation's endtime
*/
time_type get_endtime() const{ return endtime; }
/**
* Returns the number of worker threads parameter for the simulation.
*
* \return Number of worker threads for the simulation.
*/
size_type get_num_threads() const{ return num_threads; }
/**
* Returns the affinity for each worker thread.
*
* \return Each thread's affinity.
*/
std::vector<size_type> get_thread_aff() const{ return thread_aff; }
private:
time_type lookahead; //! The lookahead for the simulation
time_type endtime; //! The time to end the simulation
size_type num_threads; //! Number of worker threads to use in the engine
std::vector<size_type> thread_aff; //! Core affinity per thread
// Setters
/**
* Sets the lookahead parameter for the simulation.
*
* \param l Simulation's lookahead
* \return Reference to the this parameters object.
*/
parameters& set_lookahead(time_type l){ return lookahead = l, *this; }
/**
* Sets the endtime parameter for the simulation.
*
* \param e Simulation's endtime
* \return Reference to the this parameters object.
*/
parameters& set_endtime(time_type e){ return endtime = e, *this; }
/**
* Sets the number of worker threads to use for the simulation.
*
* \param n Number of worker threads.
* \return Reference to the this parameters object.
* \throw invalid_argument If an invalid number for worker threads has been specified.
*/
parameters& set_num_threads(size_type n){ return check_num_threads(n), num_threads = n, *this; }
/**
* Sets the thread affinities.
*
* \param a Affinity vector for each thread.
* \return Reference to the this parameter object.
* \throw invalid_argument If the size of the vector a doesn't match the number of worker threads.
*/
parameters& set_thread_aff(std::vector<size_type> a){ return check_thread_aff(a), thread_aff = a, *this; }
// Helper functions to check the validity of the parameters
void check_num_threads(size_type n) const{
if (n <=0){
throw std::invalid_argument("invalid number of worker threads specified in simulation parameters");
}
}
void check_thread_aff(std::vector<size_type>& a) const{
if (a.size() != num_threads){
throw std::invalid_argument("thread affinities vector size does not match number of worker threads");
}
}
};
/**
* This helper function constructs a parameters object to be used by the engine where the thread affinities policy is specified by
* assigning each worker thread to each core in a cyclic manner. For this reason the number of available hardware contexts must be
* specified as num_cores.
*
* \return A parameters object for the parameters requested.
*/
inline parameters default_parameters(traits::time_type lookahead, //! Simulation's lookahead parameter
traits::time_type endtime, //! Simulation's endtime parameter
traits::size_type num_threads, //! Number of worker threads to use for the simulation
traits::size_type num_cores //! Number of hardware contexts available
){
parameters params;
// create the aff vector
std::vector<traits::size_type> affs(num_threads);
for (traits::size_type i = 0; i < num_threads; ++i){
affs[i] = i%num_cores;
}
params.get_lookahead_setter().set_lookahead(lookahead).set_endtime(endtime).set_num_threads(num_threads).set_thread_aff(affs);
return params;
}
} // namespace pdes
#endif /* PARAMETERS_HPP_ */
<file_sep>/README.md
# PDES-pspp
<file_sep>/detail/priority_compare_generator.hpp
#ifndef PRIORITY_COMPARE_GENERATOR_HPP_
#define PRIORITY_COMPARE_GENERATOR_HPP_
#include <cassert>
#include <functional>
namespace pdes{
namespace detail{
/** \brief Priority comparison between tasks.
*
* Simulation tasks that are derived from base class task are executed in timestamp order. priority_compare provides the function
* to compare tasks according to their timestamp order, so that tasks can be added in ordered containers. To resolve any ties among the
* timestamps of two tasks, priority_compare uses lexicographical ordering on the timestamp and local priority of each task.
*
* That is, task A precedes task B if and only if:
* timestamp(A) < timestamp(B) || (timestamp(A) == timestamp(B) && priority(A) <= priority(B))
*/
template<class Task>
struct priority_compare : public std::binary_function<Task*, Task*, bool>{ // XXX: Validate here that i passed correct arguments from Mayer
/**
* Compare the tasks pointed to by parameters t1 and t2 according to the lexicographical order of the timestamps
* and local priorities for each task.
*
* \param t1 Pointer to the first task
* \param t2 Pointer to the second task
* \return True if t1 precedes t2 and false otherwise.
*/
bool operator()(const Task* t1, const Task* t2) const{
assert(t1 != nullptr);
assert(t2 != nullptr);
return t1->get_timestamp() < t2->get_timestamp()
|| (t1->get_timestamp() == t2->get_timestamp() && t1->get_priority() < t2->get_priority());
}
};
template<class Task>
struct priority_compare_generator{
using type = priority_compare<Task>;
};
} // namespace detail
} // namespace pdes
#endif /* PRIORITY_COMPARE_GENERATOR_HPP_ */
<file_sep>/detail/event_pool_generator.hpp
#ifndef EVENT_POOL_GENERATOR_HPP_
#define EVENT_POOL_GENERATOR_HPP_
#include <functional>
#include <boost/heap/fibonacci_heap.hpp>
#include "priority_compare_generator.hpp"
namespace pdes{
namespace detail{
/**
* Generates the types to use for the event pool structures and the handles for those structures.
*
* \param Task The type of the task
*/
template<class Task>
struct event_pool_generator{
//! Heap structure type used for the event-pool structures.
using event_pool = boost::heap::fibonacci_heap<Task*,
boost::heap::compare<std::binary_negate<priority_compare_generator<Task>::type> > >;
//! Type used for indexing into the event_pool type.
using event_pool_handle = typename event_pool::handle_type;
};
} // namespace detail
} // namespace pdes
#endif /* EVENT_POOL_GENERATOR_HPP_ */
<file_sep>/traits.hpp
#ifndef TRAITS_HPP_
#define TRAITS_HPP_
#include <cstddef>
#include <cstdint>
namespace pdes{
/** \brief Useful types for simulation time, priorities etc.
*
*/
struct traits{
//! Type for simulation time. The current implementation uses a 64-bit unsigned integer type, whose range of values is enough
//! for all simulations. Even though time_type could in theory be changed in the future, an integer type will still be used, as opposed
//! to a double or float for example. The problem with floating-point numbers is that due to rounding errors they end up being slightly
//! imprecise and, consequently, floating-point numbers that ought to be equal often differ in equality tests. This behavior could result
//! in erroneous client code or would require from the client to make complex equality tests for the simulation time adding complexity and
//! overhead to the simulation.
using time_type = std::uint_fast64_t;
//! Type to represent sizes, priorities, etc.
using size_type = std::uint_fast64_t;
};
} // namespace pdes
#endif /* TRAITS_HPP_ */
<file_sep>/task.hpp
#ifndef TASK_HPP_
#define TASK_HPP_
#include <utility>
#include "event_pool.hpp"
#include "traits.hpp"
namespace pdes{
// forward declaration of class context
class context;
/** \brief Base class for simulation tasks.
*
* Implementation notes: The implementation has been adapted from the original PSPP code.
*/
class task{
public:
using time_type = traits::time_type; //! Type for the timestamp of the task
using size_type = traits::size_type; //! Type for the priority of the task
/**
* Constructs a new task
*
* \param t The timestamp of the constructed task
* \param prio The local priority of the task
*/
task(time_type t = time_type{}, size_type prio = size_type{}) :timestamp{t}, priority{prio}, ctx{nullptr} {}
virtual ~task(){}
// non-copyable
task(const task&) = delete;
task& operator=(const task&) = delete;
// movable
//! Move constructor
task(task&& other) : timestamp(other.timestamp), priority(other.priority), ctx(other.ctx), pool_handle(std::move(other.pool_handle)){
other.ctx = nullptr;
}
//! Move assignment operator
task& operator=(task&& other){
if (this != &other){
timestamp = other.timestamp;
priority = other.priority;
ctx = other.ctx;
pool_handle = std::move(other.pool_handle);
other.ctx = nullptr;
}
return *this;
}
/**
* This method must be overriden by concrete simulation tasks. This method is called each time the task executes.
*/
virtual void run() = 0;
/**
* Assigns prio as the local priority of this task.
*
* \param prio The local priority of this task.
*/
void set_priority(size_type prio){ priority = prio; }
/**
* Returns the local priority of this task.
*
* \return The local priority of this task.
*/
size_type get_priority() const{ return priority; }
/**
* Returns the timestamp of this task; that is, the simulated time at which this task will execute.
*
* \return When this task will execute in simulated time.
*/
time_type get_timestamp() const{ return timestamp; }
/**
* Make the task BUSY for a duration t >= 0.
*
* This method should only be called while this task executes (in the run() method).
*
* \param t Duration during which this task must be kept busy.
*/
void wait_for(time_type t);
/**
* Put the task to sleeping mode.
*
* This method should only be called while this task executes (in the run() method).
*/
void sleep();
/**
* Initialize the task to the INIT state.
*
* This method should only be called while this task executes (in the run() method).
*/
void stop();
/**
* Wakes a sleeping task. If the task is in SLEEPING mode then it is awaked and executed at time t with the
* current local priority of this task.
*
* \param t Simulated time when this task is awaken.
* \throw bad_alloc If there is no enough memory to add the task
*/
void wakeup(time_type t);
/**
* Wakes a seeping task. If the task is in SLEEPING mode then it is awaked and executed at time t with prio as
* its priority.
*
* \param t Simulated time when this task is awaken
* \param prio The priority of this task
* \throw bad_alloc If there is no enough memory to add the task
*/
void wakeup(time_type t, size_type prio);
private:
friend class context;
time_type timestamp; //! Simulated time when this task is to be executed
size_type priority; //! Local priority of this task.
context* ctx; //! The context where this task is spawned.
event_pool_handle pool_handle; //! Position of this task in the event-pool
/**
* Assigns the simulated time when this task must execute.
*
* \param t Simulated time when this task must execute.
*/
void set_timestamp(time_type t){ timestamp = t; }
/**
* Assigns the pointer to the context where this task has been spawned.
*
* \param p Pointer to the context where this task is spawned.
*/
void set_context(context* p){
assert(p != nullptr);
ctx = p;
}
/**
* Assigns the pool handle for this task (its place in the event-pool).
*
* \param ph Handle for this task in the event pool.
*/
void set_pool_handle(event_pool_handle ph){
pool_handle = ph;
}
/**
* Returns a pointer to the event-pool structure where this task is spawned.
*
* \return Pointer to the event-pool structure where this task is spawned.
*/
context* get_context() const{ return ctx; }
};
} // namespace pdes
#endif /* TASK_HPP_ */
<file_sep>/task.cpp
#include <cassert>
#include "task.hpp"
#include "context.hpp"
namespace pdes{
void task::wait_for(task::time_type t){
assert(t >= 0);
assert(ctx != nullptr);
timestamp += t;
// this method is executed only when the task is running so we can safely manipulate the local fel of the context
// associated with this task
ctx->fel.increase(pool_handle);
}
void task::sleep(){
assert(ctx != nullptr);
// and we also must remove us from the fel. we can do a pop() here because we know
// that we are running and thus we are at the start of the fel
ctx->fel.pop();
}
void task::stop(){
assert(ctx != nullptr);
// we must remove us from the fel. we can do a pop() here because we know
// that we are running and thus we are at the start of the fel.
ctx->fel.pop();
// re-initialize here
ctx = nullptr;
}
void wakeup(task::time_type t){
wakeup(t, priority);
}
void wakeup(task::time_type t, size_type prio){
assert(ctx != nullptr);
// following the usage guidelines we know that we are executing at the local fel
// and we can safely add the task at the local fel here
timestamp = t;
priority = prio;
pool_handle = ctx->fel.push(this);
}
} // namespace pdes
<file_sep>/concurrent/termination_detection_barrier.hpp
/** \file termination_detection_barrier.hpp
*
* This file contains the termination_detection_barrier class that implements a Termination Detection Barrier as
* introduced in section 17.6 of 'The Art of Multiprocessor Programming' by <NAME> and <NAME>.
*
* author: <NAME> (<EMAIL>)
*/
#ifndef __TERMINATION_DETECTION_BARRIER_HPP_IS_INCLUDED__
#define __TERMINATION_DETECTION_BARRIER_HPP_IS_INCLUDED__ 1
#include <cstddef>
#include <atomic>
namespace concurrent{
/**
* A termination detection barrier enables a set of n threads to detect when a multi-threaded computation as a whole has
* terminated.
*
* Each thread can be either 'active', meaning that is has work to do, or 'inactive', in which case it has none. Once all threads
* have become inactive, then no thread will ever become active again and the multi-threaded computation is terminated.
*
* A thread declares 'active' using the set_active() function and 'inactive' using the set_inactive() function. The terminate() function
* returns true if all threads have declared inactive.
*
* It is assumed that each participating thread has a unique identifier in the range [0,n), where n is provided in the constructor call.
*
* Do not use this class to enforce a happens-before relationship among the participating threads (this is the default behavior).
* Otherwise, to use sequentially consistent semantics define NO_MEMORY_ORDER_OPTIMIZATION before including this header file.
*
* Reference: The Art of Multiprocessor Programming section 17.6 'Termination Detecting Barriers'
*/
template<class SizeType = std::size_t>
class termination_detection_barrier{
public:
using size_type = SizeType;
/**
* Implementation Notes:
*
* We use an atomic counter initialized to the number of participating threads n. At the beginning, all threads are inactive.
* The following transitions modify the counter as:
* o inactive --> active : decrement the counter
* o active --> inactive : increment the counter
*
* Once all threads have declared inactive (called the set_inactive() operation) then the counter is equal to n (the number of
* threads) and the computation is terminated.
*
* From the above description, it follows that the counter is equal at any time to the number of inactive threads.
*
* Since the purpose of this object is not to add happens-before relationship among the participating threads
* then all operations can be relaxed.
*/
/**
* Construct a termination_detection_barrier for a multi-threaded computation where n threads are involved.
*
* Note: this constructor doesn't check the argument n which must be >= 1.
*
* CAUTION: In order for the termination_detection_barrier to be used correctly the client code must ensure that all participating
* threads see the effect of the construction of the object.
*
* \param n The number of threads expected to use this object.
*/
termination_detection_barrier(size_type n) : size{n}, count{n}{}
// non-copyable and non-movable
termination_detection_barrier(const termination_detection_barrier&) = delete;
termination_detection_barrier& operator=(const termination_detection_barrier&) = delete;
termination_detection_barrier(termination_detection_barrier&&) = delete;
termination_detection_barrier& operator=(termination_detection_barrier&&) = delete;
/**
* A thread with identifier id calls set_active(id) before it starts looking for work.
*/
void set_active(size_type id){
set(true);
}
/**
* A thread with identifier id calls set_inactive(id) when it is definitively out of work.
*/
void set_inactive(size_type id){
set(false);
}
/**
* Returns true when all treads are inactive.
*/
bool terminate() const{
// Check if the counter (#inactive threads) is equal to the number of participating threads.
return count.load(
#ifdef NO_MEMORY_ORDER_OPTIMIZATION
std::memory_order_seq_cst
#else
std::memory_order_relaxed
#endif
) == size;
}
/**
* Resets the state of this termination_detection_object (as if it was constructed again).
*/
void reset(){
count.store(size,
#ifdef NO_MEMORY_ORDER_OPTIMIZATION
std::memory_order_seq_cst
#else
std::memory_order_relaxed
#endif
);
}
private:
size_type size; /** number of threads involved in the computation */
std::atomic<size_type> count; /** number of inactive threads */
/**
* Helper function for the set_active() and set_inactive() functions.
*/
void set(bool active){
if (active){
// reduce the number of inactive threads by 1 (since we are now active)
count.fetch_sub(1,
#ifdef NO_MEMORY_ORDER_OPTIMIZATION
std::memory_order_seq_cst
#else
std::memory_order_relaxed
#endif
);
}
else{
// increase the number of inactive threads by 1 (since we are now inactive)
count.fetch_add(1,
#ifdef NO_MEMORY_ORDER_OPTIMIZATION
std::memory_order_seq_cst
#else
std::memory_order_relaxed
#endif
);
}
}
};
} // namespace concurrent
#endif
<file_sep>/context.hpp
#ifndef CONTEXT_HPP_
#define CONTEXT_HPP_
#include <cassert>
#include <mutex>
#include <boost/intrusive/list_hook.hpp>
#include "event_pool.hpp"
#include "task.hpp"
#include "traits.hpp"
namespace pdes{
//! Tag used for the base hook in the intrusive list for contexts in the engine.
struct context_list_base_hook_tag;
//! Base hook for the intrusive list keeping contexts in the engine.
using context_list_base_hook = boost::intrusive::list_base_hook<boost::intrusive::tag<context_list_base_hook_tag>,
boost::intrusive::link_mode<boost::intrusive::normal_link> >;
/** \brief A simulation context that keeps simulation tasks.
*
* Each context conceptually represents a Logical Process (LP) that has an event-queue with tasks to execute in timestamp order
* and which can receive timestamped event messages from other Logical Processes (that is, contexts).
*/
class context : public context_list_base_hook{
public:
using time_type = traits::time_type;
using size_type = traits::size_type;
/**
* Construct a new context with current time _now. The default value of _now is is time_type{}.
*
* \param _now The simulated time from which to start this context.
*/
context(time_type _now = time_type{}) : current_time{_now} {}
~context(){
// we must deallocate user simulation tasks. remember that the policy for user tasks is that we use new/delete for them.
using iterator = event_pool::iterator;
for (iterator it = fel.begin(); it != fel.end(); ++it){
delete *it;
}
}
// non-copyable and non-movable
context(const context&) = delete;
context& operator=(const context&) = delete;
context(context&&) = delete;
context& operator=(context&&) = delete;
/**
* Spawn the simulation task pointed to by simtask to be executed at simulated time t. The priority of the task spawned is
* given by parameter prio.
*
* Tasks must be spawned in a context in increasing timestamp order. That is, if the current simulated time of this context is t, then
* a task that is spawned must be such that its timestamp is greater than or equal to t. This method uses runtime assertions to test
* this condition, as well as to test whether simtask is a null pointer or not, and does not throw exceptions.
*
* This method should be called only when the contexts are created at the start of the simulation and the initial tasks are spawned to
* them before the engine is started. After that, it can only be called by the engine itself. User tasks should not explicitly call spawn() on any task
* while the simulation is executing (due to race conditions that can and will occur). Instead, call method send_message() on the target context
* to achieve the same result.
*
* \param simtask Pointer to the simulation task
* \param t Simulated time when the task must be executed
* \param prio The local priority of the task spawned.
* \throw bad_alloc If memory allocation cannot happen in order to record the task
*/
void spawn(task* simtask, time_type t, size_type prio);
/**
* Spawn the simulation task pointed to by simtask to be executed at time now() + delay. The priority of the task spawned is given by
* parameter prio.
*
* The same usage guidelines apply as method spawn().
*
* \param simtask Pointer to the simulation task
* \param delay Delay from the current time after which to execute the simulation task.
* \param prio The local priority of the task spawned.
* \throw bad_alloc If memory allocation cannot happen in order to record the task
*/
void spawn_delayed(task* simtask, time_type delay, size_type prio);
/**
* Spawn the simulation task pointed to by simtask in sleeping state.
*
* The same usage guidelines apply as method spawn().
*
* \param simtask Pointer to the simulation task
*/
void spawn_sleeping(task* simtask);
/**
* This method is used by any context to send a message to this context. The message is represented by the simulation task pointed to by
* simtask which will be executed at time t. The priority of the message is given by parameter prio.
*
* To avoid race conditions and extra synchronization this method does not check that the timestamp of the message is greater than or
* equal to now(). However this condition will be checked when the message will be executed.
*
* \param simtask Pointer to the simulation task.
* \param t The simulated time when the task is to be executed.
* \param prio The local priority of the task.
* \throw bad_alloc If memory allocation cannot happen in order to record the message
*/
void send_message(task* simtask, time_type t, size_type prio);
/**
* Returns the current simulation time of this context. This returned time has one of the following meanings:
* 1. The start simulated time of this context as specified in the constructor call.
* 2. The timestamp of the currently running simulation task.
* 3. The timestamp of the simulation task that was lastly executed.
*
* That is, this method does not return the timestamp of the task at the top of this context's event-pool.
*
* This method is intended to be used by the engine. The only guaranteed safe place to call this method in client code is from within
* the run() method of the task executing at the top of this context.
*
* \return The current simulation time of this context.
*/
time_type now() const{ return current_time; }
private:
friend class task;
friend class engine;
time_type current_time; //! Current simulation time
event_pool fel; //! Future Event List structure storing the tasks of this context
event_pool messages_fel; //! Future Event List structure storing the messages send to this context during a round
std::mutex messages_lock; //! Mutex to guard access to messages_fel
/**
* Executes the simulation task at the top of the future event list.
*/
void run_top();
/**
* Returns whether there is a task at the top of the main fel or not.
*
* \return True if the main fel is not empty; otherwise, false is returned.
*/
bool top_ready() const{ return !fel.empty(); }
/**
* This method checks if the task at the top of the context event-pool structure has a timestamp less than or equal to
* endtime. If yes then true is returned. Otherwise, false is returned (which is the value returned also if no tasks exist
* in the main fel).
*
* \param endtime Maximum simulated time to check for
* \return True if the condition holds; otherwise false is returned.
*/
bool is_top_task_le_threshold(time_type endtime) const{ return top_ready() && top_task_timestamp() <= endtime; }
/**
* Returns the timestamp of the task on the top of the context. Note that this function assumes that the context is not empty.
*
* \return The timestamp of the task on the top of the context.
*/
time_type top_task_timestamp() const{ return fel.top()->get_timestamp(); }
/**
* Execute simulation tasks from the local future event list of this context until either there are no more
* tasks to execute or endtime has been reached in simulated time.
*
* This method is intended to be executed only by the engine (thus it is private).
*
* \param endtime Maximum simulated time to reach.
*/
void execute_until(time_type endtime);
/**
* This method drains the messages in the messages_fel into the main fel.
*/
void drain_messages();
};
} // namespace pdes
#endif /* CONTEXT_HPP_ */
<file_sep>/concurrent/simple_barrier.hpp
#ifndef SIMPLE_BARRIER_HPP_
#define SIMPLE_BARRIER_HPP_
#include <cstddef>
#include <stdexcept>
#include <mutex>
#include <condition_variable>
namespace concurrent{
template<class SizeType = std::size_t>
class simple_barrier{
public:
using size_type = SizeType;
simple_barrier(size_type n) : expected{n}, arrived{0}, generation{0} {
if (n == 0){
throw std::invalid_argument("invalid argument to simple_barrier constructor");
}
}
// non-copyable
simple_barrier(const simple_barrier&) = delete;
simple_barrier& operator=(const simple_barrier&) = delete;
// movable
simple_barrier(simple_barrier&&) = default;
simple_barrier& operator=(simple_barrier&&) = default;
void await(){
std::unique_lock<std::mutex> lk{lock};
size_type current_generation = generation;
++arrived;
if (arrived == expected){
// initialize for next round
++generation;
arrived = 0;
lk.unlock();
lk.release();
cond.notify_all();
}
else{
// wait for the condition
while (current_generation == generation){
cond.wait(lock);
}
}
}
private:
size_type expected;
size_type arrived;
size_type generation;
std::mutex lock;
std::condition_variable cond;
};
} // namespace concurrent
#endif /* SIMPLE_BARRIER_HPP_ */
<file_sep>/concurrent/tournament_tree.hpp
/** \file tournament_tree.hpp
* \brief Contains the tournament_tree class.
*
* @author <NAME>
* @email <EMAIL>
*/
#ifndef __TOURNAMENT_TREE_HPP_IS_INCLUDED__
#define __TOURNAMEN_TREE_HPP_IS_INCLUDED__ 1
#include<cassert>
#include<functional>
#include<memory>
#include<type_traits>
#include<stdexcept>
#include<atomic>
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE (64)
#endif
namespace concurrent{
namespace internal{
/**
* Tests whether a particular type is suitable for the implementation of the tournament_tree
* that is whether it has nothrow copy/move constructors and assignments operators.
*
* We make this test because the tournament_tree implementation for the time being does not handle exceptions
* (which would complicate the implementation) and thus we require that operations on the Key type be non-throw.
*/
template<typename T>
struct is_key_compliant : public std::integral_constant<
bool,
std::is_nothrow_copy_constructible<T>::value // copy constructor is required
&& std::is_nothrow_copy_assignable<T>::value // copy assignment operator is required
&& std::conditional<std::is_move_constructible<T>::value,
std::is_nothrow_move_constructible<T>,
std::true_type>::type::value // move constructor not required
&& std::conditional<std::is_move_assignable<T>::value,
std::is_nothrow_move_assignable<T>,
std::true_type>::type::value // move assignment operator not required
>{
};
} // namespace internal
/** \class tournament_tree
* \brief Implementation of a tournament tree.
*
* tournament_tree implements a tournament tree for n threads. It is assumed that each thread has a unique index in the range [0,n).
* A tournament tree can be used to select among the values from the n threads of type Key, the one value which is the winner according to
* a comparison function of type Compare. By default, std::less<Key> is used as Compare and thus the minimum over all values is chosen at the end.
*
* A user supplied key comparison function Compare must have the following prototype:
* bool operator()(const Key& lhs, const Key& rhs) const;
* , and return true if lhs is the winner and false otherwise.
*/
template<class Key, class Compare = std::less<Key> >
class tournament_tree{
public:
// enable only for compliant types
static_assert(internal::is_key_compliant<Key>::value,
"tournament_tree requires nothrow constructors and assignment operators");
using size_type = unsigned int; // To represent the identifier of a participant as well as levels
using key_type = Key;
using key_compare = Compare;
// Explicit prohibit copy and move operations. The rationale is that a tournament tree is created
// for a set of threads that are "tied" to it.
tournament_tree(const tournament_tree&) = delete;
tournament_tree& operator=(const tournament_tree&) = delete;
tournament_tree(tournament_tree&&) = delete;
tournament_tree& operator=(tournament_tree&&) = delete;
/**
* Imlementation details:
*
* The implementation is based on the presentation of tournament trees in section 2.1.5 'Mutex for n Processes :
* A Tournament-Based Algorithm' from the book 'Concurrent Programming: Algorithms, Principles, and Foundations'
* by <NAME>. The algorithm has been adapted so that it works for any number of threads and not just with n being a power of 2.
*
* Also, this is a memory-order optimized version utilizing relaxed and acquire-release memory orderings.
*/
private:
/** \brief A match object determines the result of a match between two participants.
*
* A tournament tree consists of a number of match nodes, where at each match node two participants meet and compete.
* The participants are assumed to have indices 0 and 1. To avoid ties we use lexicographical ordering on the value of the items
* and the indices.
*/
class match{
private:
/**
* Comparison based on the lexicographical order of (a,i) and (b,j)
*/
struct tie_compare{
bool operator()(const key_type& a, size_type i, const key_type& b, size_type j) const{
return kcmp(a,b) || (!kcmp(b,a) && i < j);
}
// Note on optimization applied on the comparison function:
//
// Normally the lexicographic order implies the following comparison:
// a < b || (a == b && i < j)
//
// Due to the short-circuit evaluation rules of C++ if a < b then the (a == b && i < j)
// part will not be evaluated. So, if the part (a == b && i < j) is evaluated we know
// that a >= b. To establish that a == b we need only test whether b >= a. And thus we use
// !kcmp(b,a). Notice that if we had instead tested for equality between a and b (which we would
// have to do in case we didn't had short-circuit evaluation) because the comparison function kcmp
// is based on equivalence we would have to do the following test : !kcmp(a,b) && !kcmp(b,a).
// We get rid of the duplicate test !kcmp(a,b).
key_compare kcmp;
};
public:
enum class result{ winner, loser };
match(){
// initialize a match object
#if defined(NO_MEMORY_ORDER_OPTIMIZATION)
arrived[0].store(false, std::memory_order_seq_cst);
arrived[1].store(false, std::memory_order_seq_cst);
done.store(false, std::memory_order_seq_cst);
#else
arrived[0].store(false, std::memory_order_relaxed);
arrived[1].store(false, std::memory_order_relaxed);
done.store(false, std::memory_order_relaxed);
#endif
}
match(const match&) = delete;
match& operator=(const match&) = delete;
match(match&&) = delete;
match& operator=(match&&) = delete;
/**
* Notes on the algorithm used:
* A match node is an object where two participants with indices 0 and 1 meet and compete with each other.
* This functionality is provided with 3 functions.
* 1) First the participants arrive at the compete() function and determine which is the winner and which is the loser.
* 2) The winner proceeds to other match nodes and when it has the winning value it must return to this match node and notify the loser
* of the winning value by calling the set_winning_value() function.
* 3) The loser spins using the wait() function until the winner arrives and notifies it of the winning value.
*
* We use the following variables:
* - item[2] is an array of non-atomic variables used by the participants to deposit their values during the compete() call.
* - arrived[2] is an array of atomic boolean registers used by the participants in the compete() call to know when the other has arrived.
* - winning_value is a non-atomic variable used by the winner to deposit the winning value in the set_winning_value() function.
* - done is an atomic boolean register used to synchronize the winner and the loser. The loser spins on the done variable until
* the winner comes and sets it true. The the loser knows that the winner has stored the winning value.
*
* Steps for operation compete(i,value):
* 1. Wait for the previous round to end: spin until done is false
* 2. Deposit our value: item[i] = value
* 3. Notify that we have arrived: arrived[i] = true
* 4. Wait for the other party to arrive: wait until arrived[1-i] is true
* 5. Get item[i] and item[1-i] and determine which is the winner and which the loser.
*
* Steps for operation set_winning_value(i,value):
* 1. winning_value = value
* 2. Notify loser: done = true
*
* Steps for operation wait(i):
* 1. Wait for winner: wait until done is true
* 2. Cleanup for next round: arrived[i] = false and arrived[j] = false
* 3. Enable next round: done = false.
*
* Notice why the participants at the start of a round need to wait for done to become false. Assume that step 1 of function compete()
* did not exists and consider the following scenario.
* Participants i and j arrive at the compete call. i is the winner and j the loser. However j gets delayed.
* i returns with the winning value writes arrived[i]=false and sets done to true. Then i comes again to compete.
* Without the done variable, that is without knowing whether the loser at the previous round got the value or not,
* i will see that arrived[j] is true (erroneously) and will proceed on its own.
*
* Another think to notice is that the loser is responsible for cleaning up for both him and the winner.
* This is because of the following scenario:
* The winner comes and sets arrived[winner] = true
* The loser comes and sets arrived[loser] = true *but* doesn't see the winner
* The winner proceeds and learns the winning value and comes back to call set_winning_value().
* If the winner were responsible for reseting arrived[winner] then it could happen that
* the loser doesn't see that arrived[winner] is true in the compete() function.
*
* Notes on the memory order optimization:
*
* The following must be established:
* + At the start of a given round when two participants use the compete() function both
* see arrived[0] = arrived[1] = false. This enables them to start a new round waiting for each other.
* + When the participants arrive and meet at the compete() function both must see the value deposited by the other.
* + The loser must see the winning value deposited by the winner when it sees that done is true.
*
* 1) Ensuring that a participant sees the value deposited by its opponent in the compete() function
* Consider participant i that arrives and deposits its value using a store operation on item[i].
* It announces its presence by a store operation to arrived[i] with a value true.
* (a) The store operation on item[i] happens-before the store operation to arrived[i] with value true.
* Participant j=1-i arrives and spins on the arrived[i] variable until it reads true. Consider the load operation
* by j on the arrived[i] atomic variable that returned the value true. At this point, we must establish a synchronizes-with
* relationship between the store operation to arrived[i] with the value true by i and that particular load operation. We do this
* by using a release memory order on the store operation to arrived[i] by i and a acquire memory order on the load operation to arrived[i] by j.
* (b) The store operation on arrived[i] with value true by i synchronizes-with the load operation on arrived[i] by j that returns the value true.
*
* It follows from the fact that a synchronizes-with relationship introduces a happens-before relationship between the two threads, that
* (c) The store operation on arrived[i] with value true by i happens-before the load operation on arrived[i] by j that returns the value true.
*
* By (a),(c) and the transitivity of the happens-before relationship we conclude that:
* (d) The store operation to item[i] by i happens-before the store operation to arrived[i] with value true by i which happens-before
* the load operation on arrived[i] by j that returns the value true which happens-before the load operation on item[i] by j.
* Thus,
* (e) The store operation of item[i] by i happens-before the load operation on item[i] by j
* , and we conclude that j loads the correct value from item[i].
*
* Using the same reasoning with i and j reversed by can show that i also loads the correct value from item[j].
*
* 2) Ensuring that the loser sees the winning value.
* Assume that participant i is a loser and participant j=1-i is a winner. Participant i spins on the done atomic variable until the winner
* arrives and writes true to done.
* Consider the actions of the winner j:
* (a) The store operation on the winning_value happens-before the store operation on the done variable with value true.
* Consider the load operation by i on the done atomic variable that returned the value true. At this point, we must establish a synchronizes-with
* relationship between the store operation to done with the value true by j and that particular load operation. We do this by using
* a release memory order on the store operation to done by j and a acquire memory order on done by i.
* (b) The store operation on done with value true by j synchronizes-with the load operation on done by i that returns the value true.
*
* It follows from the fact that a synchronizes-with relationship introduces a happens-before relationship between two threads, that:
* (c) The store operation on done with value true by j happens-before the load operation on done by i that returns the value true.
*
* By (a),(c) and the transitivity of the happens-before relationship we conclude that:
* (d) The store operation on winning_value by j happens-before the store operation to done with value true by j which happens
* before the load operation of done with value true by i which happens before the load operation of winning_value by i.
* Thus,
* (e) The store operation to winning_value by j happens-before the load operation of winning_value by i.
* , and we conclude that i loads the correct value from winning_value.
*
* 3) Ensuring that at the start of each round both arrived[0] and arrived[1] are false.
* Consider the two participants i and j=1-i. When they call compete() they first spin on the done variable until it is false.
* Initially, done is false and also arrived[0] and arrived[1] are initialized to false and thus we are correct.
* Assume that at round k the claim holds.
* Consider the load operations of both i and j on the done variable that returns the value false at round k+1. The value false was deposited
* by the store operation by the loser on round k on the wait() function.
* Consider the actions of the loser thread in the wait() function:
* (a) The store to arrived[loser] with value false happens-before the store to arrived[winner] with value false which happens-before the store to done with value false by the loser.
*
* At this point, we must establish a synchronizes-with relationship between participants i and j on round k+1 and that particular store
* operation. We do this by using a release memory order on the store operation to done by the loser and a acquire memory order on the
* load operation of done at the start of the compete() function.
* (b) The store operation to done with value false by the loser of the previous round synchronizes-with the load operations on done
* that return that value false by both participants on the beginning of the current round.
*
* If follows from the fact that a synchronizes-with relationship introduces a happens-before relationship between threads, that:
* (c) The store operation to done with value false by the loser of the previous round happens-before the load operations on done
* that return that value false by both participants on the beginning of the current round.
*
* By (a), (b), (c) and the transitivity of the happens-before relationship we conclude that:
* (e) The store operation to arrived[loser] with value false by the loser of the previous round happens-before
* the store operation to arrived[winner] with value false by the loser of the previous round which happens-before the
* store operation to done with value false by the loser of the previous round which happens-before the load operations
* on done that read this value false by both participants on the start of the current value.
* Thus, the participants at the start of the current round start with both arrived[0] and arrived[1] being false.
*
* By the principle of mathematical induction, the claim holds for all rounds k.
*/
/**
* Two participants with ids 0 and 1 can call compete() on a match node. This operation
* returns to the participants the result of the match. If a participant is a winner then
* it is its responsibility to return later and call set_winning_value() on this match node
* so that to inform the loser. On the contrary, if a participant is a loser then it must await
* on this match node for the winner to come back.
*/
result compete(size_type i, const key_type& key){
assert((i == 0 || i == 1) && "match expects 0 or 1 as participant index");
#if defined(NO_MEMORY_ORDER_OPTIMIZATION)
// wait for the previous round to end; this means that the loser from the previous round
// must get the winning value
while (done.load(std::memory_order_seq_cst)){}
size_type j = 1- i; // our opponent
item[i] = key;
arrived[i].store(true, std::memory_order_seq_cst); // we have arrived
// wait for the opponent to arrive
while (!arrived[j].load(std::memory_order_seq_cst)){}
// compete and determine whether we are a winner or a loser
return cmp(item[i], i, item[j], j) ? result::winner : result::loser;
#else
// wait for the previous round to end; this means that the loser from the previous round
// must get the winning value
while (done.load(std::memory_order_acquire)){}
size_type j = 1- i; // our opponent
item[i] = key;
arrived[i].store(true, std::memory_order_release); // we have arrived
// wait for the opponent to arrive
while (!arrived[j].load(std::memory_order_acquire)){}
// compete and determine whether we are a winner or a loser
return cmp(item[i], i, item[j], j) ? result::winner : result::loser ;
#endif
}
/**
* This is used mostly for debugging purposes and is not a functionality that will be kept.
*/
template<class Combiner>
result compete(size_type i, key_type& key, Combiner combiner){
assert((i == 0 || i == 1) && "match expects 0 or 1 as participant index");
#if defined(NO_MEMORY_ORDER_OPTIMIZATION)
// wait for the previous round to end; this means that the loser from the previous round
// must get the winning value
while (done.load(std::memory_order_seq_cst)){}
size_type j = 1- i; // our opponent
item[i] = key;
arrived[i].store(true, std::memory_order_seq_cst); // we have arrived
// wait for the opponent to arrive
while (!arrived[j].load(std::memory_order_seq_cst)){}
// compete and determine whether we are a winner or a loser
result res = cmp(item[i], i, item[j], j) ? result::winner : result::loser ;
if (res == result::winner){
key = combiner(item[i], item[j]);
}
return res;
#else
// wait for the previous round to end; this means that the loser from the previous round
// must get the winning value
while (done.load(std::memory_order_acquire)){}
size_type j = 1- i; // our opponent
item[i] = key;
arrived[i].store(true, std::memory_order_release); // we have arrived
// wait for the opponent to arrive
while (!arrived[j].load(std::memory_order_acquire)){}
// compete and determine whether we are a winner or a loser
result res = cmp(item[i], i, item[j], j) ? result::winner : result::loser ;
if (res == result::winner){
key = combiner(item[i], item[j]);
}
return res;
#endif
}
// Called by a winner to deposit a winning value.
void set_winning_value(size_type i, const key_type& value){
assert((i == 0 || i == 1) && "match expects 0 or 1 as participant index");
#if defined(NO_MEMORY_ORDER_OPTIMIZATION)
winning_value = value;
// notify loser that we are done
done.store(true, std::memory_order_seq_cst);
#else
winning_value = value;
// notify loser that we are done
done.store(true, std::memory_order_release);
#endif
}
// Called by a loser to wait for the winner to deposit the winning value.
key_type wait(size_type i){
assert((i == 0 || i == 1) && "match expects 0 or 1 as participant index");
size_type j = 1-i; // our opponent
#if defined(NO_MEMORY_ORDER_OPTIMIZATION)
// wait for winner to come
while (!done.load(std::memory_order_seq_cst)){}
key_type win = winning_value;
// reset our participant status for that match node for the next round
// note that in the next round there may be another thread in our place
arrived[i].store(false, std::memory_order_seq_cst);
arrived[j].store(false, std::memory_order_seq_cst);
// enable the next round (this round is over!)
done.store(false, std::memory_order_seq_cst);
return win;
#else
// wait for winner to come
while (!done.load(std::memory_order_acquire)){}
key_type win = winning_value;
// reset our participant status for that match node for the next round
// note that in the next round there may be another thread in our place
arrived[i].store(false, std::memory_order_relaxed);
arrived[j].store(false, std::memory_order_relaxed);
// enable the next round (this round is over!)
done.store(false, std::memory_order_release);
return win;
#endif
}
private:
/**
* False-Sharing Effects:
*
* We do not want interference from other match nodes so we could only place each match node at a cache-line and say that we do not care
* about cache-coherent traffic for one match node. However, one can devise situations where by placing each individual data on its own cache-line
* we can save some cache-misses and invalidations.
*/
alignas(CACHE_LINE_SIZE) key_type item[2]; // used by the participants to deposit their values
alignas(CACHE_LINE_SIZE) key_type winning_value; // the winning value for a given round
alignas(CACHE_LINE_SIZE) std::atomic<bool> arrived[2]; // used to count how many parties have arrived to the match object
alignas(CACHE_LINE_SIZE) std::atomic<bool> done; // used to indicate the winning value has been placed by the winner
tie_compare cmp; // used to produce the result of the match
};
using match_result = typename match::result;
public:
struct default_combiner{
key_type operator()(const key_type& winner, const key_type& loser) const{
return winner;
}
};
// Create a tournament tree for _num_participants total participants.
tournament_tree(size_type _num_participants) : num_participants{_num_participants}{
if (num_participants <= 1){
throw std::invalid_argument("invalid number of participants in tournament_tree constructor");
}
// for the tournament tree we need n-1 match nodes but we use counting from 1
// so we allocate n match nodes.
match_tree.reset(new match[num_participants]);
}
/**
* Returns the number of participants allowed to access this tournament tree.
*/
size_type participants() const{
return num_participants;
}
/**
* A participant with id in the range [0,participants()), competes in this tournament tree with item as its value.
* The function returns the result of the competition.
*
* @param id The identifier of the participant.
* @param item The value with which the participant competes.
* @return The result of the competition.
*/
key_type compete(size_type id, const key_type& item){
// Check that we have been given a valid id
assert(valid_id(id) && "invalid id in tournament_tree compete");
// In this implementation we use counting from 1 but we accept ids in the range [0,num_participants)
++id;
// Remember our initial participant id cause it will be needed in the distribution phase.
size_type initial_id = id + num_participants - 1;
key_type compete_result;
size_type node_id = initial_id;
size_type p_id;
size_type level = 1;
match_result result;
// Competition Phase
compete(p_id, node_id, level, result, item);
// Computation Phase
compute(result, node_id, p_id, level, compete_result, item);
// Distribution Phase
distribute(initial_id, compete_result, level);
return compete_result;
}
/**
* A participant with id in the range [0,participants()), competes in this tournament tree with item as its value.
* The function returns the result of the competition.
*
* @param id The identifier of the participant.
* @param item The value with which the participant competes.
* @param exit_callback A function object to call when the winner has been determined but before all threads have been notified.
* @return The result of the competition.
*/
template<class ExitCallback>
key_type compete(size_type id, const key_type& item, const ExitCallback& exit_callback){
// Check that we have been given a valid id
assert(valid_id(id) && "invalid id in tournament_tree compete");
// In this implementation we use counting from 1 but we accept ids in the range [0,num_participants)
++id;
// Remember our initial participant id cause it will be needed in the distribution phase.
size_type initial_id = id + num_participants - 1;
key_type compete_result;
size_type node_id = initial_id;
size_type p_id;
size_type level = 1;
match_result result;
// Competition Phase
compete(p_id, node_id, level, result, item);
// Computation Phase
compute(result, node_id, p_id, level, compete_result, item, exit_callback);
// Distribution Phase
distribute(initial_id, compete_result, level);
return compete_result;
}
template<class ExitCallback, class Combiner>
key_type compete(size_type id, key_type& item, const ExitCallback& exit_callback, Combiner combiner){
// Check that we have been given a valid id
assert(valid_id(id) && "invalid id in tournament_tree compete");
// In this implementation we use counting from 1 but we accept ids in the range [0,num_participants)
++id;
// Remember our initial participant id cause it will be needed in the distribution phase.
size_type initial_id = id + num_participants - 1;
key_type compete_result;
size_type node_id = initial_id;
size_type p_id;
size_type level = 1;
match_result result;
// Competition Phase
while (true){
p_id = node_id&1; // = node_id%2 - our participant id (0 or 1) to compete at the node
// bithack reference: http://graphics.stanford.edu/~seander/bithacks.html#ModulusDivisionEasy
node_id >>= 1; //node_id = node_id/2 the node at which we will compete
result = match_tree[node_id].compete(p_id, item, combiner);
if (result == match_result::loser || is_root(node_id)){
// We stop the competition phase either when we loose or when we reach the root node
break;
}
++level;
}
// Computation Phase
switch(result){
case match_result::loser:
// Losers need first wait for the winners of their nodes to notify them of the result.
compete_result = match_tree[node_id].wait(p_id);
break;
case match_result::winner:
exit_callback();
// The sole winner of course won with his own value
compete_result = item;
match_tree[node_id].set_winning_value(p_id, compete_result);
break;
}
--level; // we are done with this level; at the distribution phase we will start from below
// Distribution Phase
for (; level >= 1; --level){
/**
* In this phase we follow the reverse path from the node we stopped before to the leaf node where we started.
*
* Fortunately, we can compute the node_id for the nodes at each level we passed using the node_id_at_level() function.
* Refer to that function for more information on how the computation is done.
*
* It remains to compute our p_id for the node. Notice that in the computation phase, when we arrived at that node (the child node
* we have just chosen), we calculated our participant id p_id as node_id%2 where node_id is the identifier of the node we were at
* at previous level. So, we can compute p_id by finding the node_id we had at level-1 using node_id_at_level() and taking the modulo 2.
*/
node_id = node_id_at_level(initial_id,level);
p_id = node_id_at_level(initial_id,level-1)&1; // = node_id_at_level(initial_id,level-1)%2;
// bithack reference: http://graphics.stanford.edu/~seander/bithacks.html#ModulusDivisionEasy
match_tree[node_id].set_winning_value(p_id, compete_result);
}
return compete_result;
}
private:
size_type num_participants;
std::unique_ptr<match[]> match_tree{nullptr}; // the tournament tree
/**
* Returns the node identifier for the participant with the given id when it reaches level k.
*/
size_type node_id_at_level(size_type id, size_type k) const{
/**
* Notice that for each level when a participant competes at a node it calculates the node's id
* using the formula node_id/2 where node_id is the previous node_id the participant had. Thus, for each
* level we keep dividing by 2 and since our initial node id (before level=1) is id, when we are at level k
* we have divided our initial id by 2 k times total, thus the formula is id/(2^k).
*
* Also, since the formula includes a division by 2 we can use a right logical shift with k. Since size_type is
* unsigned this is valid.
*/
return (id >> k);
}
/**
* Returns true if the given id is within the range of allowable participant identifiers and false if not.
*/
bool valid_id(size_type id) const{
return 0 <= id && id < num_participants;
}
/**
* Returns true if the given node id represents the root node and false if not.
*/
bool is_root(size_type id) const{
return id == 1;
}
/**
* Competition phase. The participants traverse the tree from their associated leaf node up to the root and
* compete with the other participants that arrive at the nodes.
*/
void compete(size_type& p_id, size_type& node_id, size_type& level, match_result& result, const key_type& item){
while (true){
p_id = node_id&1; // = node_id%2 - our participant id (0 or 1) to compete at the node
// bithack reference: http://graphics.stanford.edu/~seander/bithacks.html#ModulusDivisionEasy
node_id >>= 1; //node_id = node_id/2 the node at which we will compete
result = match_tree[node_id].compete(p_id, item);
if (result == match_result::loser || is_root(node_id)){
// We stop the competition phase either when we loose or when we reach the root node
break;
}
++level;
}
}
/**
* Computation phase. A winner sets the winning value and loser waits for the winner.
*/
void compute(match_result result, size_type node_id, size_type p_id, size_type& level, key_type& compete_result,
const key_type& item){
switch(result){
case match_result::loser:
// Losers need first wait for the winners of their nodes to notify them of the result.
compete_result = match_tree[node_id].wait(p_id);
break;
case match_result::winner:
// The sole winner of course won with his own value
compete_result = item;
match_tree[node_id].set_winning_value(p_id, compete_result);
break;
}
--level; // we are done with this level; at the distribution phase we will start from below
}
/**
* Computation phase. A winner sets the winning value and loser waits for the winner.
*/
template<class ExitCallback>
void compute(match_result result, size_type node_id, size_type p_id, size_type& level, key_type& compete_result,
const key_type& item, const ExitCallback& exit_callback){
switch(result){
case match_result::loser:
// Losers need first wait for the winners of their nodes to notify them of the result.
compete_result = match_tree[node_id].wait(p_id);
break;
case match_result::winner:
exit_callback();
// The sole winner of course won with his own value
compete_result = item;
match_tree[node_id].set_winning_value(p_id, compete_result);
break;
}
--level; // we are done with this level; at the distribution phase we will start from below
}
/**
* Distribution phase for the competition. A participant traverses the tournament tree from top to bottom
* notifying of the result (winning value) the other participants in lower nodes it had won.
*/
void distribute(size_type initial_id, const key_type& compete_result, size_type level){
size_type node_id;
size_type p_id;
// Distribution Phase
for (; level >= 1; --level){
/**
* In this phase we follow the reverse path from the node we stopped before to the leaf node where we started.
*
* Fortunately, we can compute the node_id for the nodes at each level we passed using the node_id_at_level() function.
* Refer to that function for more information on how the computation is done.
*
* It remains to compute our p_id for the node. Notice that in the computation phase, when we arrived at that node (the child node
* we have just chosen), we calculated our participant id p_id as node_id%2 where node_id is the identifier of the node we were at
* at previous level. So, we can compute p_id by finding the node_id we had at level-1 using node_id_at_level() and taking the modulo 2.
*/
node_id = node_id_at_level(initial_id,level);
p_id = node_id_at_level(initial_id,level-1)&1; // = node_id_at_level(initial_id,level-1)%2;
// bithack reference: http://graphics.stanford.edu/~seander/bithacks.html#ModulusDivisionEasy
match_tree[node_id].set_winning_value(p_id, compete_result);
}
}
};
} // namespace concurrent
#endif
<file_sep>/thread.hpp
/*
* thread.hpp
*
* Created on: Jan 5, 2014
* Author: <NAME> (original code)
*/
#ifndef THREAD_HPP_
#define THREAD_HPP_
#include "task.hpp"
/*! \defgroup SIMT Stackless simulation threads and the SIMT macros
*
* To define a stackless simulation thread, you should subclass \c SimThread and define
* a \c run() function.
* Inside the definition of the run() function of a SimThread's subclass,
* you should use the following macros:
* \li \c #SIMT_BEGIN: mark the start of the thread's body.
* \li \c #SIMT_END: mark the end of the thread's body.
* \li \c #SIMT_SLEEP: put the thread to sleep
* \li \c #SIMT_STOP: finish execution of the thread, this is the same as reaching
* the \c #SIMT_END line.
* \li \c #SIMT_BUSY(t): pause execution until simulation time advances by \c t steps
* (to \c now()+t).
* \li \c #SIMT_SUB: define a subroutine inside \c run().
* \li \c #SIMT_CALL: call a subroutne defined inside \c run().
* \li \c #SIMT_RETURN: return from a subroutine call.
*
* These macros are instrumental in the coding of SimThread run fucntions.
* They are modelled after protothreads [Dunkels et al, SenSys2006]
* (see http://www.sics.se/~adam/pt/). You should
* probably read this paper, but we also provide a brief explanation here,
* through a (rather artificial) example:
*
* \code
* struct MyThread : SimThread
* {
* unsigned int x,y; // initially 0, always x<=y
*
* void incY(unsigned int a) { y+=a; wakeup(); }
*
* void run() {
* SIMT_BEGIN;
* while(1) {
* if(x>y)
* SIMT_STOP;
* while(x<y) {
* SIMT_BUSY(100);
* x++;
* cout << "increment x\n";
* }
* SIMT_SLEEP;
* }
* SIMT_END;
* }
* };
* \endcode
*
* The above code defines a SimThread subclass. When instances of MyThread are
* spawned, the run() method is invoked. We see that #SIMT_BEGIN and #SIMT_END
* define a block containing the body of the whole function. Inside this block,
* the macros #SIMT_STOP, #SIMT_BUSY and #SIMT_SLEEP are used to control execution.
* Whenever one of these macros is reached, execution is paused and
* the simulation scheduler is called to resume some other SimTask.
* When this SimThread is resumed, execution seems to resume at the point of the
* macro's "return". However,
* appearances can be deceiving. To better
* appreciate what is going on, we show the code the compiler sees after
* expansion of these macros by the preprocessor:
*
* \code
* struct MyThread : SimThread
* {
* unsigned int x,y; // initially 0, always x<=y
*
* void incY(unsigned int a) { y+=a; wakeup(); }
*
* void run() {
* switch(__lc) { case 0:;
* while(1) {
* if(x>y)
* { this->stop(); return; }
* while(x<y) {
* { this->wait_for(100);
* this->__lc = __LINE__; return; case __LINE__: ;}
* x++;
* cout << x << endl;
* }
* { this->sleep();
* this->__lc = __LINE__; return; case __LINE__: ;}
* }
* this->stop(); };
* }
* };
* \endcode
*
* Are you confused; Well, the SimThread class has an integer attribute __lc,
* (meaning location). This attribute is used by the \c switch statement
* every time \c run() is called, to move execution inside the body of the
* function, at the appropriate \c case labels. The fact that this implies
* jumping inside the body of loops (or other constructs!) is not a
* problem for the C++ compiler. This technique is sometimes called Duff's device.
*
* In this manner we can get the impression of a context switch, without actually
* maintaining a real context or a separate stack for this thread.
* Of course, this technique has several
* limitations, compared to real threads:
* \li Any local variables will not retain their value between sucessive calls. Instead,
* you should define attributes of your thread class to hold such variables that you
* wish to maintain. Note however, that they should be initialized after
* \c SIMT_BEGIN, or else every call to run() will change their value.
* \li You cannot suspend a thread (using #SIMT_BUSY, #SIMT_SLEEP or #SIMT_STOP) from
* a nested function call (called by run()), and return to that other function.
*
* On the other hand, SimThreads have two huge advantages:
* -# they don't consume any memory for a stack, and
* -# "context switching" is extremely fast
*
* Thus, a simulation can have tens of millions of SimThreads running, without exhausting memory.
* Contrast this to \em any stack based thread implementation,
* where a stack has to be at least 4 kbytes (that is, if you want to live dangerously,
* more like 64 kbytes to be on the safe side). This is at least 100 times more
* memory per stack/thread!
*
* Subroutines can be used to encapsulate common functionality inside a \c run() method.
* They are discussed in the tutorial, in section \ref simt_subs.
*
* N.B.: One might think that the limitations of SimThreads are too severe, and only the
* most trivial logic can be implemented with them, but, in my experience, even for
* relatively complex task logic, SimThreads are quite adequate and can express the task
* logic lucidly and succinctly.
*
*/
/*@{*/
/**
* \brief The type used to store the execution point of a SimThread.
*
* Normally, this macro should not appear in user code.
*/
#define SIMT_STATE_TYPE unsigned short
/**
* \brief Begin body of a thread.
*
* Mark the beginning of a SimThread's run() function.
* This macro should appear on a line of its own.
*/
#define SIMT_BEGIN \
goto _reenter_switch; _reenter_switch: ; \
switch(this->__lc) { case 0:
/**
* \brief End body of a thread.
*
* Mark the beginning of a SimThread's run() function.
* This macro should appear on a line of its own.
*/
#define SIMT_END this->stop(); }
/**
* \brief Yield execution of this thread.
*
* This macro will set the point of resumtion where it appears and will
* stop execution of the current thread. Notice that, under normal execution,
* unless the the wakeup time of this thread has been changed, the
* function will execute again immediately; thus this macro will not
* seem to have an effect.
*
* This macro should probably not appear verbatim in user code, unless you know what
* you are doing. It could however be used to define additional macros, besides the SIMT_*
* family of macros.
*/
#define SIMT_YIELD { this->__lc=__LINE__; return; case __LINE__:; }
/**
* \brief Thread is put to sleep.
*/
#define SIMT_SLEEP { this->sleep(); SIMT_YIELD }
/**
* \brief Thread waits for \c t timesteps (in simulation time).
*/
#define SIMT_BUSY(t) { this->wait_for(t); SIMT_YIELD }
/**
* \brief Thread is stopped.
*/
#define SIMT_STOP { this->stop(); return; }
/**
* \brief Declare a local subroutine in a SimThread.
*
* This macro declares the following block of code as
* a local subroutine inside the
* \c run() method of a SimThread.
*
* For example:
* \code
* void run() {
* SIMT_BEGIN;
* SIMT_SUB(print_message) {
* cout << "Hello world\n" << endl;
* SIMT_RETURN;
* }
*
* while(true) {
* SIMT_CALL(print_message);
* SIMT_SLEEP;
* }
* SIMT_END;
* }
* \endcode
*/
#define SIMT_SUB(subroutine_name) if(false) subroutine_name:
/**
* \brief Call a local subroutine in a SimThread.
*/
#define SIMT_CALL(subroutine_name)\
{ _substack.push(__LINE__); goto subroutine_name; case __LINE__:; }
/**
* \brief Return from a local subroutine in a SimThread.
*/
#define SIMT_RETURN { __lc=_substack.pop(); goto _reenter_switch; }
/**
* \brief Declare a bounded stack for local subroutines in a SimThread.
*
* This macro instantiates a space-efficient local subroutine stack,
* when the maximum call depth of local subroutines in a SimThread
* is known to be \c n (or less). If the maximum call depth is unkown,
* or greater than around 20, \c SIMT_UNBOUNDED_STACK should be used.
*
* Either this macro, or \c SIMT_UNBOUNDED_STACK, but not both,
* must appear inside the
* body of every SimThread subclass which employs local subroutines in
* its \c run() method.
*
*/
#define SIMT_BOUNDED_STACK(n) SIMT_bounded_stack<n> _substack;
/**
* \brief Declare an unbounded stack for local subroutines in a SimThread.
*
* This macro instantiates a local subroutine stack.
*
* When the maximum call depth of local subroutines in a SimThread
* is known to be less than around 20, SIMT_BOUNDED_STACK should be used.
*
* Either this macro, or \c SIMT_BOUNDED_STACK, but not both,
* must appear inside the
* body of every SimThread subclass which employs local subroutines in
* its \c run() method.
*
*/
#define SIMT_UNBOUNDED_STACK SIMT_unbounded_stack _substack;
/**
* \brief A stack type instantiated by SIMT_BOUNDED_STACK.
*/
template <size_t n>
struct SIMT_bounded_stack {
SIMT_STATE_TYPE* top;
SIMT_STATE_TYPE vec[n];
inline SIMT_bounded_stack() : top(vec) { }
inline void push(SIMT_STATE_TYPE addr) { *top++ = addr; }
inline SIMT_STATE_TYPE pop() { return *(--top); }
};
/**
* \brief A stack type instantiated by SIMT_UNBOUNDED_STACK.
*/
struct SIMT_unbounded_stack {
std::vector<SIMT_STATE_TYPE> vec;
inline void push(SIMT_STATE_TYPE addr) { vec.push_back(addr); }
inline SIMT_STATE_TYPE pop() { SIMT_STATE_TYPE ret = vec.back(); vec.pop_back(); return ret; }
};
namespace pdes{
/**
* \brief %Stackless simulation threads.
*
* This subclass of task
* is an abstract base class for stackless simulation threads.
* See \ref SIMT for documentation on how to use this class.
*
* There are no additional methods or attributes beyond those
* inherited by \c SimTask.
*
*/
struct thread : public task{
protected:
SIMT_STATE_TYPE __lc;
public:
/**
* \brief Constructor.
*/
thread() : __lc(0) { }
};
} // namespace pdes
#endif /* THREAD_HPP_ */
<file_sep>/event_pool.hpp
#ifndef EVENT_POOL_HPP_
#define EVENT_POOL_HPP_
#include "detail/event_pool_generator.hpp"
namespace pdes{
// forward declaration of task
class task;
using event_pool_generator = detail::event_pool_generator<task>;
using event_pool = typename event_pool_generator::event_pool;
using event_pool_handle = typename event_pool_generator::event_pool_handle;
} // namespace pdes
#endif /* EVENT_POOL_HPP_ */
<file_sep>/engine.cpp
#include "engine.hpp"
namespace pdes{
// the singleton instance for class engine
engine* engine::_instance = nullptr;
// Define the thread-local variables here
thread_local engine::time_type engine::context_threshold;
thread_local engine::time_type engine::event_threshold;
thread_local engine::context_list_type engine::local_contexts;
thread_local engine::context_list_type engine::committed_contexts;
thread_local std::random_device engine::rd;
thread_local xorshift engine::gen;
thread_local std::uniform_int_distribution<> engine::dis;
} // namespace pdes
| c54b2004cd34d2c8540f71487b151b3db73a57ae | [
"Markdown",
"C++"
] | 18 | C++ | anastasiossouris/PDES-pspp | ac7ab348811edcd7506fde31a8277d26f2a93622 | 3a3da28bab40658d381a9a4247a2e4086783eb64 |
refs/heads/master | <file_sep>Sparse Checkout
===============
1. Clone the whole repo
2. cd into the repo
3. run "git config core.sparsecheckout true"
4. run "echo <directory_you_want> >> .git/info/sparse-checkout" for each folder you want
5. run "git read-tree -m -u HEAD"
scripting_projects
==================
### [dyndns_iptables/](./dyndns_iptables/dyndns_hosts_instructions.txt)
The linux firewall (IPTables) allows connections by IP address. IP addresses change for home users and those traveling around. Dynect and others have services called Dynamic DNS that gets updated by a client on the user's machine or router. This script pulls those DNS entries and check them against what is currently in the firewall and then updates the firewall if necessary.
### [route53_automation/](./route53_automation/awscli_on_pfsense.md)
Mostly just automating updating the IP address in a DNS record on Route53 (Amazon AWS DNS service). I use it as a way to have my router update the DNS record I use to find my router when I'm too cheap to pay for a static IP or if it's not offered (as in the case of a residential ISP). Often paired with the IPtables scripts above to allow a network/host to trust an IP that is dynamic (like for VPN or phones at a remote worker's house).
### aws_cli_mfa_tool.sh
Automates using MFA credentials over the AWS CLI.
<file_sep>#!/bin/bash
# == AWS ASSUME ROLE CLI TOOL ==
# Usage:
# To run this you need---
# Linux or Windows with WSL:
# `sudo apt-get update`
# `sudo apt-get install jq python-pip`
# `pip install awscli`
# `aws configure` (just pick all the defaults, except for your keys)
# Mac/OSX:
# #Setup Homebrew
# `brew update`
# `brew install python3 jq`
# `pip3 install --upgrade pip setuptools wheel awscli`
# `aws configure` (just pick all the defaults, except for your keys)
# In a script that needs to use this tool just add:
# `SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"` < if you put it in the same folder to find the folder
# `source ${SCRIPTDIR}/aws_cli_mfa_tool.sh` < call this script from wherever it is
# `aws_cli_mfa_auth` < activate the function from this tool if you need to run it again later (happens automatically the first time).
# Process:
# 1. It checks if the parameter passed is a role ARN or a friendly name
# 2. If it was a friendly name, the ARN is looked up in the role_defs file.
# 3. It attempts to get role temp creds using the supplied ARN from #1 or looked up in #2.
# 4. It parses and uses the creds returned.
#== Configurable Vars ==
# Specify your role's friendly name to look up in the list
ROLE=${1:-'~~~~'}
# How long the MFA session will be good for in seconds (how long you can use the cached creds)
ROLE_DURATION=43200
# Where it will read role friendly names
ROLE_DEFS_FILE="${HOME}/.aws/role_defs"
check_dependancies () {
if [[ "$(which jq)" = *"not found"* || "$(which pip)" = *"not found"* ]]; then
if [[ "$(cat /etc/issue)" = *"Ubuntu"* ]]; then
echo "Installing 'jq' and 'python-pip'..."
sudo apt-get update
sudo apt-get install jq python-pip
else
echo "The pachages jq and python-pip are required for this script."
exit 0
fi
fi
if [[ "$(which aws)" = *"not found"* ]]; then
echo "Installing the pip package AWS CLI ('awscli')..."
echo "You might need to run 'pip install --upgrade pip'"
pip install awscli
echo ""
echo "Time to configure awscli. Please choose the defaults for everything (except your keys)"
aws configure
fi
}
# Make sure the requirements are installed
check_dependancies
aws_cli_assume_role () {
if [[ "${ROLE}" = *"arn:aws:iam"* ]]; then
ROLE_ARN=${ROLE}
else
echo "Specified value is not an ARN, checking 'friendly name' list..."
FRIENDLY_REGEX="^${ROLE}|"
ROLE_ARN=`cat ${ROLE_DEFS_FILE} | grep "${FRIENDLY_REGEX}" | cut -d'|' -f2`
fi
if [[ "${ROLE_ARN}" == '' ]]; then
echo "ERROR: no role for friendly name found. Please add to '${ROLE_DEFS_FILE}' as 'friendly_name|arn'"
exit 0
else
echo "Role ARN to assume: '${ROLE_ARN}'"
fi
UNXTM=`date +%s`
ROLE_CREDS=`aws sts assume-role --role-arn ${ROLE_ARN} --role-session-name cli-${UNXTM}`
# echo "ROLE_CREDS='${ROLE_CREDS}'"
TEMP_AWS_ACCESS_KEY_ID=`echo $ROLE_CREDS | jq -r '.Credentials.AccessKeyId'`
TEMP_AWS_SECRET_ACCESS_KEY=`echo $ROLE_CREDS | jq -r '.Credentials.SecretAccessKey'`
TEMP_AWS_SESSION_TOKEN=`echo $ROLE_CREDS | jq -r '.Credentials.SessionToken'`
TEMP_AWS_SESSION_EXP=`echo $ROLE_CREDS | jq -r '.Credentials.Expiration'`
echo "TEMP_AWS_ACCESS_KEY_ID='${TEMP_AWS_ACCESS_KEY_ID}'"
# echo "TEMP_AWS_SECRET_ACCESS_KEY='${TEMP_AWS_SECRET_ACCESS_KEY}'"
# echo "TEMP_AWS_SESSION_TOKEN='${TEMP_AWS_SESSION_TOKEN}'"
echo "TEMP_AWS_SESSION_EXP='${TEMP_AWS_SESSION_EXP}'"
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset AWS_SESSION_TOKEN
export AWS_ACCESS_KEY_ID=${TEMP_AWS_ACCESS_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${TEMP_AWS_SECRET_ACCESS_KEY}
export AWS_SESSION_TOKEN=${TEMP_AWS_SESSION_TOKEN}
}
aws_cli_assume_role
<file_sep># Installing AWSCLI on Pfsense
1. Install packages _cron_ and _sudo_ in the web UI package manager
- _sudo_ will allow you to run as root so you can install things
- _cron_ will let you schedule running scripts regularly
1. Use the sudo package to allow yourself to sudo
1. Add an ssh key to your pfsense account
1. `ssh username@routerip` to get into the pfsense shell
1. `sudo su -` to switch to root
1. `python -m ensurepip` installs pip python package manager
1. `pip install --upgrade pip` upgrades pip to current version
1. `pip install awscli` installs the awscli
1. `aws --version` to ensure it installed properly
1. `aws configure` to add credentials. Be careful with the default zone thing, it doesn't check it and if you put in an invalid one the cli won't work. Usually you can leave it blank.
- __NOTE:__ for security reasons, I would make an IAM account that only has access to read and write records for Route53 in the zone you want. It's safer that way in case someone gains access to your router.
- here is a decent IAM policy that restricts the user to just reading/writing recordsets in Route53. It will probably be OK for home stuff... but in a business I would definitely put the ARN of the specific hosted zone in "Resource" (like `arn:aws:route53:::hostedzone/[HOSTED ZONE ID]`) and maybe even look into locking it down to specific records using conditions.
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets"
],
"Resource": "*"
}
]
}
```
## General PfSense stuff
PfSense uses /bin/sh as the default shell for root and something else for other accounts. It doesn't have bash by default and some bash scripts will fail on these cut-down default shells. Also, the only text editor it comes with is vi, which I hate and gives errors when I try to install vim, so I usually install nano.
- to install bash run `pkg install bash` as root
- to install nano run `pkg install nano` as root
<file_sep>#!/usr/bin/python
import os
import csv
import socket
# User-changeable Vars
iptables_file = '/etc/iptables.up.rules'
update_ip_tables_file = False
# Application-controlled Vars
csv_file = './dyndns_hosts.csv'
lock_file = './dyndns_hosts.lock'
something_changed = False
host_col = 0
ip_col = 1
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def readHostList():
"""Return host_list (dictionary) stored in CSV file."""
reader = csv.reader(open(csv_file, 'rb'), delimiter=',', quotechar='"')
host_list = {}
for row in reader:
host_list[row[host_col]] = row[ip_col]
return host_list
def getTextOutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
pipe = os.popen(cmd + ' 2>&1', 'r')
text = pipe.read()
if text[-1:] == '\n': text = text[:-1]
return text
def saveHostList():
"""save host_list (dictionary) to CSV file."""
writer = csv.writer(open(csv_file, 'wb'), delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for hostname in host_list:
writer.writerow([hostname,host_list[hostname]])
def updateIPtablesEntry(hostname,ip_address):
"""update single entry in IPTables config file."""
# make sure the address is valid
if addressIsValid(ip_address):
#print "updating iptables file entry for " + hostname + " to " + ip_address #DEBUG
pass
else:
print bcolors.FAIL + "IP address (" + ip_address + ") is invalid. Cannot update iptables" + bcolors.ENDC
return False
# find out where in the file the comment for the rule is located
comment_line = getTextOutput("grep -n '# " + hostname + "' " + iptables_file + " | cut -d':' -f1")
# check to make sure the comment was found
if comment_line == '':
print bcolors.WARNING + "Unable to locate entry for " + hostname + " in the file " + iptables_file + bcolors.ENDC
print bcolors.WARNING + "If there is a rule already it needs to have a comment line above it so I can find it." + bcolors.ENDC
print bcolors.WARNING + "The comment line should look like this: '# " + hostname + "'" + bcolors.ENDC
print bcolors.WARNING + "If you are using webmin then it would be just '" + hostname + "'" + bcolors.ENDC
return False
else:
#print "comment is located at line #" + comment_line #DEBUG
pass
# the rule line should be right below the comment
rule_line = int(float(comment_line)) + 1
#print "rule is located at line #" + str(rule_line) #DEBUG
# replace the rule line in the file with a new one
rule = "-A INPUT -s "+ ip_address + " -j ACCEPT"
os.system("sed -i '" + str(rule_line) + "s/.*/" + rule + "/' " + iptables_file)
# let the script know to reload iptables
something_changed = True
def addressIsValid(ip_address):
try:
socket.inet_aton(cur_dyndns)
# address is valid
return True
except socket.error:
# address invalid
return False
def reloadIptables():
os.system("iptables-restore < " + iptables_file)
# make sure this is not already running
if os.path.exists(lock_file):
print bcolors.FAIL + "This utility seems to be already running." + bcolors.ENDC
print bcolors.FAIL + "If this is not the case delete the file "+ lock_file + " and try running the utility again." + bcolors.ENDC
exit()
else:
os.system("touch " + lock_file)
# Read in the hosts
host_list = readHostList()
for hostname in host_list:
# pull the addresses
cur_dyndns = getTextOutput("host " + hostname + " | cut -d ' ' -f4")
last_dyndns = host_list[hostname]
#print "The IP address on file for " + hostname + " is " + last_dyndns #DEBUG
# ensure the new one is valid
if addressIsValid(cur_dyndns):
#print "The current IP address for " + hostname + " is " + cur_dyndns #DEBUG
pass
else:
print bcolors.WARNING + "Unable to resolve " + hostname + ". Keeping old IP address." + bcolors.ENDC
cur_dyndns = last_dyndns
# check to see if they match
if cur_dyndns != last_dyndns:
print "Changing address for " + hostname + " from " + last_dyndns + " to " + cur_dyndns
host_list[hostname] = cur_dyndns
if update_ip_tables_file:
updateIPtablesEntry(hostname,host_list[hostname])
something_changed = True
else:
#print "there was no change in address for " + hostname + ". It will remain " + host_list[hostname] + "." #DEBUG
pass
if something_changed:
#print "saving host list back to CSV file" #DEBUG
saveHostList()
print bcolors.OKGREEN + "Reloading iptables to update" + bcolors.ENDC
reloadIptables()
# unlock the utility
os.remove(lock_file)
# set color to default
print bcolors.ENDC + " "
<file_sep>#!/bin/bash
# == AWS MFA REQUIRED CLI TOOL ==
# Setup:
# AWS IAM group exists called "RealPerson" which allows you to set your password, setup MFA, and access home directory on S3.
# It also has a policy attached that denies all requests (other than MFA setup requests) without MFA present (see policy "RequireMFA+ManageOwnMFA" and/or https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_users-self-manage-mfa-and-creds.html ).
# Usage:
# To run this you need---
# Linux or Windows with WSL:
# `sudo apt-get update`
# `sudo apt-get install jq python-pip`
# `pip install awscli`
# `aws configure` (just pick all the defaults, except for your keys)
# Mac/OSX:
# #Setup Homebrew
# `brew update`
# `brew install python3 jq`
# `pip3 install --upgrade pip setuptools wheel awscli`
# `aws configure` (just pick all the defaults, except for your keys)
# In a script that needs to use this tool just add:
# `SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"` < if you put it in the same folder to find the folder
# `source ${SCRIPTDIR}/aws_cli_mfa_tool.sh` < call this script from wherever it is
# `aws_cli_mfa_auth` < activate the function from this tool if you need to run it again later (happens automatically the first time).
# Process:
# 1. It checks a cache file for creds at `${MFA_CREDS_CACHE}` and sources it if available applying those creds.
# 2. It tries a test query of `aws s3 ls --output=json 2>&1` (aka "litmus test") because all RealPerson members are allowed to do the ListBuckets command when using MFA.
# 3. In the results of that, it looks for "ListBuckets" which should only come up if you were rejected (bad/expired cache creds or not MFA'd).
# 4. If you failed the test query it tries to pull your MFA info, if you have none, it'll tell you to set up MFA.
# 5. If it finds an MFA device, it asks you for an OTP (the code from the MFA).
# 6. It attempts to get MFA temp creds using the supplied OTP and the MFA serial number it looked up in #4.
# 7. It parses, uses, and caches the creds in `${MFA_CREDS_CACHE}` along with the expiration of the creds as a comment.
#== Configurable Vars ==
# Specify your MFA SN if it isn't browseable
MFA_SN=${1:-''}
# Where it will store cached credentials
MFA_CREDS_CACHE="${HOME}/.aws/mfa_creds"
# How long the MFA session will be good for in seconds (how long you can use the cached creds)
MFA_DURATION=129600
check_dependancies () {
if [[ "$(which jq)" = *"not found"* || "$(which pip)" = *"not found"* ]]; then
if [[ "$(cat /etc/issue)" = *"Ubuntu"* ]]; then
echo "Installing 'jq' and 'python-pip'..."
sudo apt-get update
sudo apt-get install jq python-pip
else
echo "The pachages jq and python-pip are required for this script."
exit 0
fi
fi
if [[ "$(which aws)" = *"not found"* ]]; then
echo "Installing the pip package AWS CLI ('awscli')..."
echo "You might need to run 'pip install --upgrade pip'"
pip install awscli
echo ""
echo "Time to configure awscli. Please choose the defaults for everything (except your keys)"
aws configure
fi
}
# Make sure the requirements are installed
check_dependancies
aws_cli_get_mfa_sn () {
if [[ "${MFA_SN}" == '' ]]; then
AWSMFA=`aws iam list-mfa-devices`
if [[ "${AWSMFA}" == *"SerialNumber"* ]]; then
AWSMFASN=`echo ${AWSMFA} | jq -r '.MFADevices[0].SerialNumber'`
else
echo ""
echo "No MFA device found on account. MFA is required. Log into the console, go to IAM > Users, then in your account go to 'Security Credentials' and edit 'Assigned MFA Device'."
exit 0
fi
else
AWSMFASN="${MFA_SN}"
fi
AWSUSER=`echo "${AWSMFASN}" | cut -f2 -d'/'`
}
aws_cli_mfa_auth () {
# Clear out any temp creds currently in use
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset AWS_SESSION_TOKEN
# Check for and load the cached creds
if [ -f ${MFA_CREDS_CACHE} ]; then
echo ""
echo "Sourcing cached credentials..."
source ${MFA_CREDS_CACHE}
fi
# Perform the litmus test
AWSTEST=`aws s3 ls --output=json 2>&1`
if [[ "${AWSTEST}" == *"ListBuckets"* ]]; then
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset AWS_SESSION_TOKEN
aws_cli_get_mfa_sn
echo ""
echo "MFA SN = ${AWSMFASN}"
echo ""
echo "Enter MFA token (usually 6 numbers from a token or something like Google Authenticator)"
echo ">"
read MFAOTP
AWSMFATKNRES=`aws sts get-session-token --serial-number ${AWSMFASN} --token-code ${MFAOTP} --output=json --duration-seconds ${MFA_DURATION}`
echo "AWSMFATKNRES='${AWSMFATKNRES}'"
export AWS_ACCESS_KEY_ID=`echo $AWSMFATKNRES | jq -r '.Credentials.AccessKeyId'`
export AWS_SECRET_ACCESS_KEY=`echo $AWSMFATKNRES | jq -r '.Credentials.SecretAccessKey'`
export AWS_SESSION_TOKEN=`echo $AWSMFATKNRES | jq -r '.Credentials.SessionToken'`
AWS_SESSION_EXP=`echo $AWSMFATKNRES | jq -r '.Credentials.Expiration'`
echo '# MFA Creds cached by script' > ${MFA_CREDS_CACHE}
cat <<EOL >> ${MFA_CREDS_CACHE}
export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
export AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}
# Expires: ${AWS_SESSION_EXP}
EOL
source ${MFA_CREDS_CACHE}
fi
}
aws_cli_mfa_auth
<file_sep>#!/bin/bash
# ### For pfsense ###
# See awscli instructions - scripting_projects/route53_automation/awscli_on_pfsense.md
# Make sure you install bash as the built-in shell doesn't support a lot of standard bash things.
# Use the bash path /usr/local/bin/bash instead of /bin/bash as that is the install directory in PfSense.
# ### general info ###
# This assumes you already have the AWSCLI and python 2.7.
# This also assumes you already have your domain/zone setup in Route53 and have created
# an "A" recordset for the domain name we will be updating.
# Log files write to and look for the currently executing directory, so you will need to
# be in the same directory each time if you want it to work. In cron you do it like this:
# `cd /root && /root/aws_ddns_update.sh`
# alternatively you can just change the $DIR variable to point to a non-relative path.
# Hosted Zone ID e.g. BJBK35SKMM9OE
ZONEID="enter zone id here"
# The CNAME you want to update e.g. hello.example.com
RECORDSET="enter cname here"
# More advanced options below
# The Time-To-Live of this recordset
TTL=300
# Change this if you want
COMMENT="Auto updating @ `date`"
# Change to AAAA if using an IPv6 address
TYPE="A"
# Get the external IP address from OpenDNS (more reliable than other providers)
IP=`dig +short myip.opendns.com @resolver1.opendns.com`
function valid_ip()
{
local ip=$1
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
}
function get_current_record_value()
{
ZONEID=$1
RECORDSET=$2
REC_VALUE=`aws route53 list-resource-record-sets --hosted-zone-id ${ZONEID} --query "ResourceRecordSets[?Name == '${RECORDSET}.']" | python -c "import sys, json; print json.load(sys.stdin)[0]['ResourceRecords'][0]['Value']"`
return ${REC_VALUE}
}
# Get current dir
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
REC_ID="${ZONEID}_${RECORDSET}"
LOGFILE="$DIR/update-route53.log"
IPFILE="$DIR/update-route53_${REC_ID}.ip"
if ! valid_ip $IP; then
echo "`date`_${REC_ID}> Invalid IP address: $IP" >> "$LOGFILE"
exit 1
fi
# Check if the IP has changed
if [ ! -f "$IPFILE" ]
then
touch "$IPFILE"
fi
# Usually this just checks the local file the script writes to see if the IP chnaged,
# but if there is a possibility that it got changed in AWS by someone else you might
# want to try uncommenting CURRENT_REC_VALUE and checking against that instead.
#CURRENT_REC_VALUE=`get_current_record_value ${ZONEID} ${RECORDSET}`
if grep -Fxq "$IP" "$IPFILE"; then
# code if found
echo "`date`_${REC_ID}> IP is still $IP. Exiting" >> "$LOGFILE"
exit 0
else
echo "`date`_${REC_ID}> IP has changed to $IP" >> "$LOGFILE"
# Fill a temp file with valid JSON
TMPFILE=$(mktemp /tmp/temporary-file.XXXXXXXX)
cat > ${TMPFILE} << EOF
{
"Comment":"$COMMENT",
"Changes":[
{
"Action":"UPSERT",
"ResourceRecordSet":{
"ResourceRecords":[
{
"Value":"$IP"
}
],
"Name":"$RECORDSET",
"Type":"$TYPE",
"TTL":$TTL
}
}
]
}
EOF
# Update the Hosted Zone record
aws route53 change-resource-record-sets \
--hosted-zone-id $ZONEID \
--change-batch file://"$TMPFILE" >> "$LOGFILE"
echo "" >> "$LOGFILE"
# Clean up
rm $TMPFILE
fi
# All Done - cache the IP address for next time
echo "$IP" > "$IPFILE"
| b2410948575dc0b1322f31cf8a78618a58d0012b | [
"Markdown",
"Python",
"Shell"
] | 6 | Markdown | johnsmclay/scripting_projects | fa2f424ea2b7913a1600f2279860facf464c07e9 | 78ad30b666e790a01553cec1435353a51e5b07fe |
refs/heads/master | <file_sep># Generated by Django 2.0.7 on 2018-08-19 14:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0009_auto_20180819_1941'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'ordering': ['-create_at']},
),
migrations.RenameField(
model_name='comment',
old_name='created_at',
new_name='create_at',
),
migrations.RenameField(
model_name='image',
old_name='created_at',
new_name='create_at',
),
migrations.RenameField(
model_name='like',
old_name='created_at',
new_name='create_at',
),
]
<file_sep># Generated by Django 2.0.7 on 2018-08-19 10:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0005_image_tags'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='create_at',
new_name='created_at',
),
migrations.RenameField(
model_name='like',
old_name='create_at',
new_name='created_at',
),
]
<file_sep># Mystagram
clone the instagram!
## statck
- django
- postgres
- react
- aws(EB)
## virtual envronment
가상환경 접속
```python
pipenv shell
```
## run server
```bash
python manage.py runserver
```
## migrate
장고의 ORM 코드를 변경했을 경우, 변경하사항을 DB 에 알리고 적용시켜야한다 .
이를 위해 아래의 명령어를 입력하면 된다
```bash
python manage.py makemigrations
python manage.py migrate
```
### migate 과 makemigrations 의 차이점과 추가 명령어
- makemigrations: 장고에서 제공하는 모델의 변경사항을 감지하고 기록함 (마이그레이션 파일 생성)
- migrate: 파일과 설정값을 읽어서 변경사항을 DB 에 저장(마이그레이션 적용)
- showmigrations `<app-name>` : 마이그레이션 적용 현황
- sqlmigrate `<app-name>` `<migration-name>`: 지정 마이그레이션의 SQL 내역
즉, makemigrations 는 마이그레이션 파일(초안)을 생성하는 것이며, 해당 마이그레이션 파일을 DB 에 반영하기 위해서는 migrate 명령어가 필요하다.
## Create a super user
어드민 페이지를 사용하기 전에 superuser 를 등록해야한다.
아래의 커맨드를 통해 유저를 생성할 수 있다.
```bash
python manage.py createsuperuser
```
질문을 모두 입력한 후, 유저 생성이 완료되었으면 서버를 실행하자.
이후 브라우저에서 localhost:8000/admin 으로 접속하여
본인이 등록한 user 의 이름으로 로그인한다.
## creating the user modal
models 를 통해서 장고는 variable 을 db 로 변환시킨다.
model 에서 필드는 어떻게 추가 되는 것일까.
데이터베이스의 데이터 형식은 django.db 의 models 에서 제공된다.
필드의 종류에는 CharField, URLField, TextField 등이 있다.
따라서 아래와 같이 필드를 추가할 수 있다.
```python
from django.db import models
name = models.CharField(_("Name of User"), blank=True, max_length=255)
website= models.URLField(null=True)
bio = models.TextField(null=True)
phone = models.CharField(max_length=140, null=True)
gender = models.CharField(max_length=80, choices=GENDER_CHOICES, null=True)
```
## time stamp
타임스탬프는 쉽게 말해서 날짜이다.
타임스탬프를 사용하면 아래와같은 사항들을 추적할 수 있다.
1. 언제 모델이 생성되었는지
2. 언제 모델이 업데이트 되었는지
아래의 코드를 보자
```python
class TimeStampedModel(model.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract=True
```
우리는 2 개의 필드를 만들었는데, created_at 은 모델이 생성되었을때 입력되고,
updated_at 은 모델이 업데이트 될떄마다 자동으로 입력된다.
또한 내부에 Meta 클래스를 생성하고, abstract 를 true 로 선언함으로써 이는 데이터베이스에 영항을 미치지 않는 추상 모델이된다.
이로써 TimestampedModel 은 다른 모델들을 위한 base 로 사용된다.
## creating the image model
위에서 만든 TimeStampedModel 를 상속받는 2 개의 클래스를 만든다.
2 개의 클래스는 각각 이미지와 댓글을 저장하는 모델이 된다.
```python
class Image(TimeStampedModel):
file = models.ImageField()
location = models.CharField(max_length=140)
caption = models.TextField()
class Comment(TimeStampedModel):
message = models.TextField()
```
## Explaining Model Relationships
### one to many/ many to one
대응 관계(relation)는 1 대 N 또는 N 대 1 로 정의 되어 질 수 있다.
ex) 한개의 사진에 여러개의 댓글을 다는 경우
한명의 owner 가 여러개의 글을 가지고 있는 경우
아래의 고양이 예제를 살펴보자
```python
from django.db import models
from . import Owner
class Cat(models.Model):
name = models.CharField(max_length=30)
breed = models.CharField(max_length=20)
grumpy = models.BooleanField(default =False)
owner = models.ForeignKey(Owner,null=True)
jon = Owner.objects.create(
name="Jon"
last_name="Doe"
age=78
)
bunns = Cat.objects.get(id=2)
bunns.owner = jon
jon.save()
```
여기서 bunns 는 고양이이며, jon 을 생성 후 주인으로 등록한다.
이렇게 외래키를 사용하여 데이터베이스의 데이터 간의 관계를 만들 수 있다.
### getting related objects
장고는 자동으로 set 이라고 불리는 클래스의 속성을 만든다.
외래키를 가지고 있다면 외래키는 자동으로 주인 객체를 바라보게되며, 주인 모델은 새로운 속성을 갖게 된다. 이름은 cat_set(modelName_set)이된다.
그러나 실제로 cat_set 이라는 속성이 생성되지는 않는다.
코드는 아래와 같다.
```python
jon = Owner.objects.get(pk=1)
jon_cats = jon.cat_set.all()
```
### many to many relationship
예를 들면 많은 유저가 다른 많은 유저를 팔로우 할 수 있다.
이를 N 대 M 관계라고 한다.
장고로 프로그래밍할때 다음과 같이 나타내어질 수 있다.
```python
class Owner(models.Model):
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
age = models.IntegerField()
following = models.ManyToManyField('self')
followers = models.ManyToManyField('self')
jon = Owner.objects.get(pk=1)
pedro = Owner.objects.get(pk=2)
jisu = Owner.objects.get(pk=3)
jon.followers.add(jisu, pedro)
```
ManyToManyField 와 add 를 통해서 many To many 관계 작성이 가능하다.
## Registering the Models in the admin
어드민 페이지에 우리가 생성한 모델들을 추가히기 위해 아래의 코드를 admin.py 에 추가한다.
```python
from django.contrib import admin
from . import models
# Register your models here.
@admin.register(models.Image)
class ImageAdmin(admin.ModelAdmin):
pass
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
pass
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
pass
```
이 후, 다시 어드민 페이지에 접속하면 생성한 모델들이 추가 되어있다.
## Customizing the Django Admin
어드민 리스트에 출력되는 내용들을 추가할 때 아래의 코드와 같이 작성한다.
```python
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'image',
'created_at',
'updated_at'
)
```
만약 특정한 속성값을 클릭했을때 편집으로 넘기기위해서는 아래와 같이 작성한다
```python
@admin.register(models.Image)
class ImageAdmin(admin.ModelAdmin):
list_display_links= (
'location',
)
```
다음으로, 특정 속성으로 탐색하는 기능을 추가하고 싶으면, 아래의 코드를 작성한다.
이때, 자동으로 서치바가 상단에 생성된다.
```python
@admin.register(models.Image)
class ImageAdmin(admin.ModelAdmin):
search_fields = (
'location',
)
```
마지막으로 우측에 특정 속성에 대한 필터를 추가하기위해서는 아래의 코드를 작성한다.
```python
@admin.register(models.Image)
class ImageAdmin(admin.ModelAdmin):
list_filter = (
'location',
'creator'
)
```
## Rest API 의 규칙
- 동사를 사용하지 않는다(동사는 CRUD 에서 발생)
- 명사를 사용한다
```
// bad
${BASE_URL}/getAllDogs
// good
GET -> ${BASE_URL}/dogs
POST -> ${BASE_URL}/dogs
PUT -> ${BASE_URL}/dogs
DELETE -> ${BASE_URL}/dogs
/dogs
GET -> /dogs/kung
POST -> /dogs/kung (error - 이미 생성되었으므로)
PUT -> /dogs/kung (kung이 있는 경우에는 사용할 수 있음)
DELETE -> /dogs/kung (kung이 있는 경우에는 사용할 수 있음)
// 변형
GET -> dogs/search?color=brown
GET /owners/nicolas/dogs -> List of all the dogs that nicolas has.
POST /owners/nicolas/dogs -> Create a dog for Nicolas
PUT /owners/nicolas/dogs -> Update all of Nicolas' dogs
DELETE /owners/nicolas/dogs -> delete
GET -> /dogs/search?color=brown
GET -> /owners/nicolas/dogs/search?color=
versioning
/v1/dogs/search?color=brown
/v2/dogs/search?color=brown
```
## django rest framework
- 장고 rest api 를 만들기 위한 프레임워크
- class, function, 파이썬 패키지등이 api 를 쉽게 만들어줌
아래와 같은 키워드로 설치한다
```bash
pipenv shell
pipenv install djangorestframework
```
config/settings/bash.py 에서 서드파티 앱 리스트를 갱신해준다
```python
THIRD_PARTY_APPS = [
'...',
'rest_framework',
'...',
]
```
## 시리얼라이저(serializer)
- api 는 json 과 일을 한다. 프런트에서 json 을 요구한다는 것이다.
- 장고는 json 과 일을 하지 않는다. json 은 자바스크립트 기반이므로 파이썬 기반인 장고와는 다르게 생겼다.
- 따라서 장고 rest framework 가 갖고 있는 시리얼라이저는 json->파이썬, 파이썬->json 으로 변환하는 역할을 한다.
어플리케이션 내에 serializers.py 라는 파일을 생성한다.
파일 이름은 장고에 영향을 미치므로 주의하자
serializers 에서는 모델들을 가져와서 meta class 에서 model 과 필드를 설정해준다.
```py
# serializers.py
from rest_framework import serializers
from . import models
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = models.Comment
fields = '__all__'
class LikeSerializer(serializers.ModelSerializer):
class Meta:
models = models.Like
fields = '__all__'
```
## View
에쩨로 우리 DB 의 모든 이미지를 볼 수 있는 View 를 만들어 보자
우선 view 위에 다음과 같은 import 문이 있는데,
이는 템플릿을 사용하기 위한 import 문이므로 지우도록하자.
```py
from django.shortcuts import render
```
그리고 다음과 같은 코드를 추가한다.
```py
from rest_framework.views import APIView
from rest_framework.response import Response
from . import models, serializers
class ListAllImages(APIView):
def get(self, request, format=None):
all_images = models.Image.objects.all()
serializer = serializers.ImageSerializer(all_images, many=True)
return Response(data=serializer.data)
```
우선 설치한 rest_framework 를 사용한다.
get 를 선언하는데 request 와 format 을 인자로 받는다.
format 은 xml 이나 json 이 들어갈수있으며, 디폴트 설정은 None 이다.
앞서 만든 serializer 를 사용하여 python 오브젝트에서 json 으로 변환해준다.
다만 인자가 1 개일 경우 serializer 는 단수로 인식하므로 many 인자를 True 로 지정한다.
마지막으로 Response 에 담아 리턴하는데, serializers 를 거친 데이터는 serializers.data 에 저장되므로, 이를 인자로 전달한다.
## URL
이제 view 를 만들었으니 URL 에 연결해보도록하자
```py
# config/urls.py
urlpatterns = [
path("images/", include("mystagram.images.urls"), namespace='images'),
]
# images/urls.py
app_name = "images"
urlpatterns = [
path("all/", view=views.ListAllImages.as_view(), name="all_images"),
]
```
| 8c5d19108886d69d098f504427e0964a6601bdda | [
"Markdown",
"Python"
] | 3 | Python | devnunu/mystagram | 2eebb8bd9b332197f0a2d5ac4920e85b27d7cfaa | 301bbdfdd3a17a6fc8290ac0534868a5e266d4dc |
refs/heads/master | <file_sep><?php
// Disable file attachment pages, redirect to parent post
wp_redirect(get_permalink($post->post_parent));
<file_sep> <footer class="site-footer">
<small>© <?php echo date('Y'); ?> <?php bloginfo('name'); ?></small>
</footer>
<?php wp_footer(); ?>
<span class="hidden"><?php include "dist/svg-symbols.svg"; ?></span>
<!-- <?php echo get_num_queries(); ?> queries. <?php timer_stop(1); ?> seconds. -->
</body>
</html>
<file_sep><?php get_header(); ?>
<?php while (have_posts()) : the_post(); ?>
<article <?php post_class() ?> id="post-<?php the_ID(); ?>">
<h1><?php the_title(); ?></h1>
<?php
if (has_post_thumbnail()) :
$image_id = get_post_thumbnail_id($post->ID);
$img_src = wp_get_attachment_image_url($image_id, 'large');
$img_srcset = wp_get_attachment_image_srcset($image_id, 'large');
$img_sizes = wp_get_attachment_image_sizes($image_id, 'large');
$img_alt = get_post_meta($image_id, '_wp_attachment_image_alt', true);
$img_title = get_the_title($image_id);
echo '<img src="'. esc_url($img_src) .'" srcset="'. esc_attr($img_srcset) .'" sizes="'. esc_attr($img_sizes) .'" alt="'. esc_attr($img_alt) .'" title="'. esc_attr($img_title) .'">';
endif;
?>
<?php the_content(); ?>
</article>
<?php endwhile; ?>
<?php get_footer(); ?>
<file_sep># Origin Theme
A starter theme, specifically for one-off bespoke themes.
More specifically, this is _my_ starter theme. It is how I start all my personal and client projects. It might not be your cup of tea, and that's just fine 🙂 The Fork button is close by.
It is geared towards one-off themes where customisation options are something the client doesn't need or, isn't something they should even have access to. It also removes comments, as i'm yet to build a client site with them.
## Features
* Disabled customiser
* Disabled comments
* Disabled admin bar (on the front-end)
* Simplified Editor buttons
* Changes 'Posts' to 'News'
* Press CPT with custom taxonomy and relates templates
* Gulp for running tasks
* Scss
* Autoprefixer
* Merge JS files into two `.js` file & minify (`venfor.js` & `app.js`)
* Merge SVG files into one to use as symbols
* LiveReload
* Create zip file suitable for uploading/sharing
## Gulp Tasks
Task | Description
--- | ---
`gulp` | Run watch tasks and re-build files as they change
`gulp build` | Build files with no watchers
`gulp release` | Bundle all required files together and create zip file in this themes directory
## Recommended Plugins
I prefer to keep plugins to a minimum, but there are always exceptions.
* [Advanced Custom Fields](https://www.advancedcustomfields.com/) – Because 99.99% of sites I build need some form of custom fields, and no way in hell am I writing those manually.
* [Post Type Archive Link](https://wordpress.org/plugins/post-type-archive-links/) – Allows you to add _real_ CPT archive links to menus, without needing to add custom links. No extra crap is added.
* [Simple Page Ordering](https://en-gb.wordpress.org/plugins/simple-page-ordering/) – Adds the ability to drag & drop reorder pages from the usual Posts admin pages, no extra UI is added
## #protips
* Add the class `debug_mq` to `body_class()` to show the current breakpoint
* If your local domain ends with `.dev`, that'll work automatically
<file_sep><?php get_header(); ?>
<ul class="press-type-filter">
<?php
$filters = get_terms('presstype', 'orderby=count&hide_empty=0');
foreach ($filters as $filter) :
echo '<li><a href="'. get_term_link($filter->term_id) .'" data-filter="'. $filter->slug .'" class="button">'. $filter->name .'</a></li>';
endforeach;
?>
</ul>
<div class="press-items">
<?php
while (have_posts()) : the_post();
get_template_part('partials/content', 'press');
endwhile;
?>
</div>
<?php the_posts_pagination(); ?>
<?php get_footer(); ?>
<file_sep><?php
/*****
Show PHP errors & warnings
*****/
// error_reporting(E_ALL);
// ini_set('display_errors', '1');
function origin_add_styles_and_scripts() {
wp_enqueue_style('app', get_template_directory_uri() . '/dist/app.min.css', array(), '1.2', 'all');
wp_enqueue_script('vendor', get_template_directory_uri() . '/dist/vendor.min.js', array ('jquery'), 1.2, true);
wp_enqueue_script('app', get_template_directory_uri() . '/dist/app.min.js', array ('jquery'), 1.2, true);
}
add_action('wp_enqueue_scripts', 'origin_add_styles_and_scripts');
/*****
Require CPT's and taxonomies
*****/
// require "cpt/your_cpt.php";
/*****
Add support for thumbnails and menus
*****/
add_theme_support('post-thumbnails');
add_theme_support('menus');
/*****
Add custom image sizes
*****/
add_image_size('larger', 1400, 1400);
add_image_size('huge', 2000, 2000);
add_image_size('massive', 2600, 2600);
/****
Echo or return the SVG icon with correct viewBox
*****/
function return_svg($icon) {
switch ($icon) {
case "fb":
$view = "0 0 15 32";
break;
case "tw":
$view = "0 0 31 27";
break;
default:
$view = "0 0 0 0";
}
$str = '<svg data-icon="' . $icon . '" viewBox="' . $view . '"><use xlink:href="#' . $icon . '"></use></svg>';
return $str;
}
function svg($icon) {
echo return_svg($icon);
}
// And now include a bunch of stuff that hides admin panels, and some useful functions
require "functions/origin.php";
<file_sep><?php
$body_classes = array();
if (strpos($_SERVER['HTTP_HOST'], '.dev') > -1) :
$body_classes[] = 'debug_mq';
endif;
?>
<!DOCTYPE html>
<html <?php language_attributes(); ?>>
<head>
<meta name="description" content="<?php bloginfo("description"); ?>" />
<meta charset="<?php bloginfo('charset'); ?>" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<link rel="pingback" href="<?php bloginfo('pingback_url'); ?>" />
<link rel="shortcut icon" href="<?php bloginfo('template_url'); ?>/favicon.ico" />
<title><?php wp_title('«', true, 'right'); ?> <?php bloginfo('name'); ?></title>
<?php wp_head(); ?>
</head>
<body <?php body_class($body_classes); ?>>
<header class="site-header">
<a class="logo" href="<?php bloginfo('url'); ?>/"><?php bloginfo('name'); ?></a>
<nav class="site-header-nav">
<?php
wp_nav_menu(array(
'sort_column' => 'menu_order',
'menu' => 'Header',
'container' => '',
'items_wrap' => '<ul>%3$s</ul>'
));
?>
</nav>
</header>
<file_sep><?php
/*****
Hide Admin Bar in WP >= 3.1
*****/
add_filter('show_admin_bar', '__return_false');
/*****
Disable Theme Updates
*****/
remove_action('load-update-core.php', 'wp_update_themes');
add_filter('pre_site_transient_update_themes', create_function('$a', "return null;"));
/*****
Hide all dashboard stuff
*****/
// Hide most of the dashboard meta boxes
function mpp_remove_dashboard_meta() {
remove_meta_box('dashboard_incoming_links', 'dashboard', 'normal');
remove_meta_box('dashboard_plugins', 'dashboard', 'normal');
remove_meta_box('dashboard_primary', 'dashboard', 'side');
remove_meta_box('dashboard_secondary', 'dashboard', 'normal');
remove_meta_box('dashboard_quick_press', 'dashboard', 'side');
remove_meta_box('dashboard_recent_drafts', 'dashboard', 'side');
remove_meta_box('dashboard_recent_comments', 'dashboard', 'normal');
remove_meta_box('dashboard_right_now', 'dashboard', 'normal');
remove_meta_box('dashboard_activity', 'dashboard', 'normal');//since 3.8
}
add_action('admin_init', 'mpp_remove_dashboard_meta');
// Hide 'Welcome' panel, and 'Screen Options' & 'Help' tabs
add_action('wp_dashboard_setup', 'remove_welcome_panel');
function remove_welcome_panel() {
global $wp_filter;
unset($wp_filter['welcome_panel']);
}
add_action('admin_head', 'mytheme_remove_help_tabs');
function mytheme_remove_help_tabs() {
$screen = get_current_screen();
$screen->remove_help_tabs();
}
// Add our own meta box
add_action('wp_dashboard_setup', 'register_my_dashboard_widget');
function register_my_dashboard_widget() {
wp_add_dashboard_widget('my_dashboard_widget', 'Welcome', 'dashboard_widget_display');
}
function dashboard_widget_display() {
echo 'Use the menu on the left to add content and update everything on this website.';
}
/*****
Remove generator meta tag from head
*****/
remove_action('wp_head', 'wp_generator');
/*****
Rename 'Post' to 'News'
*****/
function origin_change_post_label() {
global $menu;
global $submenu;
$menu[5][0] = 'News';
$submenu['edit.php'][5][0] = 'News';
$submenu['edit.php'][10][0] = 'Add News';
$submenu['edit.php'][16][0] = 'News Tags';
echo '';
}
function origin_change_post_object() {
global $wp_post_types;
$labels =& $wp_post_types['post']->labels;
$labels->name = 'News';
$labels->singular_name = 'News';
$labels->add_new = 'Add News';
$labels->add_new_item = 'Add News';
$labels->edit_item = 'Edit News';
$labels->new_item = 'News';
$labels->view_item = 'View News';
$labels->search_items = 'Search News';
$labels->not_found = 'No News found';
$labels->not_found_in_trash = 'No News found in Trash';
$labels->all_items = 'All News';
$labels->menu_name = 'News';
$labels->name_admin_bar = 'News';
}
add_action('admin_menu', 'origin_change_post_label');
add_action('init', 'origin_change_post_object');
/*****
Remove customizer from menu
http://stackoverflow.com/a/26873392
*****/
function origin_remove_customize_menu() {
$customize_url_arr = array();
$customize_url_arr[] = 'customize.php'; // 3.x
$customize_url = add_query_arg( 'return', urlencode( wp_unslash( $_SERVER['REQUEST_URI'] ) ), 'customize.php' );
$customize_url_arr[] = $customize_url; // 4.0 & 4.1
if (current_theme_supports('custom-header') && current_user_can('customize')) {
$customize_url_arr[] = add_query_arg( 'autofocus[control]', 'header_image', $customize_url ); // 4.1
$customize_url_arr[] = 'custom-header'; // 4.0
}
if (current_theme_supports('custom-background') && current_user_can('customize')) {
$customize_url_arr[] = add_query_arg( 'autofocus[control]', 'background_image', $customize_url ); // 4.1
$customize_url_arr[] = 'custom-background'; // 4.0
}
foreach ($customize_url_arr as $customize_url) {
remove_submenu_page( 'themes.php', $customize_url );
}
}
add_action( 'admin_menu', 'origin_remove_customize_menu', 999 );
/*****
Remove comments
http://wordpress.stackexchange.com/a/17936
*****/
// Removes from admin menu
function origin_remove_comments_admin_link() {
remove_menu_page('edit-comments.php');
}
add_action('admin_menu', 'origin_remove_comments_admin_link');
// Removes from post and pages
function origin_remove_comment_post_types() {
remove_post_type_support('post', 'comments');
remove_post_type_support('page', 'comments');
}
add_action('init', 'origin_remove_comment_post_types', 100);
// Removes from admin bar
function origin_remove_comments_admin_bar_link() {
global $wp_admin_bar;
$wp_admin_bar->remove_menu('comments');
}
add_action('wp_before_admin_bar_render', 'origin_remove_comments_admin_bar_link');
/*****
Disable emoji
http://wordpress.stackexchange.com/a/185578
*****/
function disable_wp_emojicons() {
// all actions related to emojis
remove_action('admin_print_styles', 'print_emoji_styles');
remove_action('wp_head', 'print_emoji_detection_script', 7);
remove_action('admin_print_scripts', 'print_emoji_detection_script');
remove_action('wp_print_styles', 'print_emoji_styles');
remove_filter('wp_mail', 'wp_staticize_emoji_for_email');
remove_filter('the_content_feed', 'wp_staticize_emoji');
remove_filter('comment_text_rss', 'wp_staticize_emoji');
// filter to remove TinyMCE emojis
add_filter('tiny_mce_plugins', 'disable_emojicons_tinymce');
}
add_action('init', 'disable_wp_emojicons');
function disable_emojicons_tinymce($plugins) {
if (is_array($plugins)) {
return array_diff($plugins, array('wpemoji'));
} else {
return array();
}
}
/*****
Stop the 'Posts' archive being an active menu item with in a CPT sinlge item
https://wordpress.org/support/topic/blog-tab-gets-highlighted-in-nav-menu-for-custom-post-types#post-2711621
*****/
function origin_disable_posts_archive_nav_highlight_on_cpt_single($classes,$item,$args) {
if (!is_singular('post') && !is_category() && !is_tag()) :
$blog_page_id = intval(get_option('page_for_posts'));
if ($blog_page_id != 0) :
if ($item->object_id == $blog_page_id) :
unset($classes[array_search('current_page_parent',$classes)]);
endif;
endif;
endif;
return $classes;
}
add_filter('nav_menu_css_class','origin_disable_posts_archive_nav_highlight_on_cpt_single',10,3);
/*****
Remove <p> tags surounding images
https://css-tricks.com/snippets/wordpress/remove-paragraph-tags-from-around-images/#comment-150114
---
Might need this http://wordpress.stackexchange.com/a/174585
*****/
function filter_ptags_on_images($content){
return preg_replace('/<p>\\s*?(<a .*?><img.*?><\\/a>|<img.*?>)?\\s*<\\/p>/s', '\1', $content);
}
add_filter('the_content', 'filter_ptags_on_images');
/*****
Pluralise string
---
Returns different srings based on the int given to it
---
echo origin_plural($number_of_people, 'person', 'people');
*****/
function origin_plural( $amount, $singular = '', $plural = 's' ) {
if ($amount === 1) {
return $singular;
}
return $plural;
}
/*****
Remove some editor buttons – to client should have the ability to justify text!
*****/
function origin_tinymce_editor_buttons($buttons) {
return array(
"formatselect",
"separator",
"bold",
"italic",
"underline",
"strikethrough",
"bullist",
"numlist",
"link",
"unlink",
"image",
"blockquote",
"outdent",
"indent",
"undo",
"redo",
"removeformat",
"code"
);
}
add_filter("mce_buttons", "origin_tinymce_editor_buttons", 99);
/*****
Remove some block styles
*****/
function origin_tinymce_block_formats($arr){
$arr['block_formats'] = 'Paragraph=p;Heading 2=h2;Heading 3=h3;Heading 4=h4;Heading 5=h5;Heading 6=h6;Pre=pre;Code=code';
return $arr;
}
add_filter('tiny_mce_before_init', 'origin_tinymce_block_formats');
/*****
Utility function to essentially replace `print_r`, but more useful and *always* shown, even if no data is supplied
*****/
function pre($var, $maxheight = false) {
$maxheightcss = ($maxheight) ? 'max-height: 300px;' : '';
echo '<pre style="background: #fcffb1; color: #000; font-weight: normal; text-align: left; margin: 0; box-sizing: border-box; padding: 10px 15px; font-size: 14px; line-height: 18px; width: 100%; overflow: auto; outline: 4px solid rgb('. rand(0, 150) .','. rand(0, 150) .','. rand(0, 150) .');'. $maxheightcss .'">';
if ($var) :
print_r($var);
else :
if (is_bool($var)) :
var_dump($var);
else :
echo "\n\n\t--- <b>No data received by pre() function</b> ---\n\n";
endif;
endif;
echo '</pre>';
}
| 97841f7518910f991947d2c3346d65b83da779e9 | [
"Markdown",
"PHP"
] | 8 | PHP | CrossStroke/origin | b89c64af8ba8843eb41c8e399750943d18a58b12 | 82da8c400d7765b302f10400adc6f444a0a59bee |
refs/heads/dlperezmartinez | <repo_name>dlperezmartinez/PerezMartinezDaniel<file_sep>/README.md
# PerezMartinezDaniel
This is for an assessment activity.
<file_sep>/src/calculadora/calculadoraPolaca.java
package calculadora;
public class calculadoraPolaca {
//VARIABLES
private String commando;
private NodoPila arriba;
public void pushPila(double nuevo_dato) {
NodoPila nuevo_nodo = new NodoPila(nuevo_dato, arriba);
arriba = nuevo_nodo;
}
public double establecerValorPila( ) {
double dato_arriba = arriba.dato;
arriba = arriba.abajo;
return dato_arriba;
}
public calculadoraPolaca(String commando) {
arriba = null;
this.commando = commando;
}
public double resultado( ) {
double valor1, valor2;
int j;
for(int i = 0; i < commando.length( ); i++) {
// si es un digito
if(Character.isDigit(commando.charAt(i))) {
double numero;
// obtener un string valor1 partir del numero
String temp = "";
for(j = 0; (j < 100) && (Character.isDigit(
commando.charAt(i)) || (commando.charAt(i) == '.')); j++, i++) {
temp = temp + String.valueOf(commando.
charAt(i));
}
// convertir valor1 double y añadir valor1 la pila
numero = Double.parseDouble(temp);
pushPila(numero);
} else if(commando.charAt(i) == '+') {
valor2 = establecerValorPila( );
valor1 = establecerValorPila( );
pushPila(valor1 + valor2);
} else if(commando.charAt(i) == '-') {
valor2 = establecerValorPila( );
valor1 = establecerValorPila( );
pushPila(valor1 - valor2);
} else if(commando.charAt(i) == '*') {
valor2 = establecerValorPila( );
valor1 = establecerValorPila( );
pushPila(valor1 * valor2);
} else if(commando.charAt(i) == '/') {
valor2 = establecerValorPila( );
valor1 = establecerValorPila( );
pushPila(valor1 / valor2);
}
else if(commando.charAt(i) == '^') {
valor2 = establecerValorPila( );
valor1 = establecerValorPila( );
pushPila(Math.pow(valor1, valor2));}
else if(commando.charAt(i) == '%') {
valor2 = establecerValorPila( );
valor1 = establecerValorPila( );
pushPila(valor1%valor2);
} else if(commando.charAt(i) != ' ') {
throw new IllegalArgumentException( );
}
}
double val = establecerValorPila( );
if(arriba != null) {
throw new IllegalArgumentException( );
}
return val;
}
} | 8035730f068c78ac4eba8a3a0f046d365c6a79d8 | [
"Markdown",
"Java"
] | 2 | Markdown | dlperezmartinez/PerezMartinezDaniel | 4096917274052466c8b45c147ea9122fdef2557a | 4f7bbdd91cd0d0889118a042a4482145dd63bdde |
refs/heads/master | <file_sep>package com.devakt.modal;
import java.util.Date;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.FieldDefaults;
@Data
@Builder
@FieldDefaults(level = AccessLevel.PRIVATE)
@AllArgsConstructor
@NoArgsConstructor
public class RoomView {
Integer id;
String description;
String floor;
String department;
String name;
int nbPersons;
}
<file_sep>package com.devakt.exception;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ResponseStatus;
@ResponseStatus(value = HttpStatus.UNAUTHORIZED)
public class LoginException extends RuntimeException {
private static final long serialVersionUID = 1L;
public LoginException() {
super(String.format("Impossible de se connecter, merci de vérifier vos paramètres"));
}
}
<file_sep>package com.devakt.service;
import com.devakt.entity.Booking;
import com.devakt.entity.Room;
import com.devakt.entity.User;
import com.devakt.exception.BookException;
import com.devakt.exception.LoginException;
import com.devakt.exception.RoomNotFoundException;
import com.devakt.mapper.BookingMapper;
import com.devakt.mapper.RoomMapper;
import com.devakt.modal.BookingView;
import com.devakt.modal.IntervalView;
import com.devakt.modal.RoomView;
import com.devakt.repository.BookingRepository;
import com.devakt.repository.RoomRepository;
import static com.devakt.utils.DateUtils.dateToInterval;
import static com.devakt.utils.DateUtils.intervalToDate;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.devakt.repository.UserRepository;
import static com.devakt.utils.ParamUtils.INTERVAL_START;
import static com.devakt.utils.ParamUtils.INTERVAL_END;
import com.devakt.utils.IntegerUtils;
import com.devakt.utils.StringUtils;
import lombok.AllArgsConstructor;
import org.bson.types.ObjectId;
import org.springframework.stereotype.Service;
@Service
@AllArgsConstructor
public class BookingService {
private final RoomRepository roomRepository;
private final BookingRepository bookingRepository;
private final UserRepository userRepository;
public List<RoomView> getRooms() {
return roomRepository
.findAll()
.stream()
.map(RoomMapper.INSTANCE::roomToRoomView)
.collect(Collectors.toList());
}
public RoomView getRoom(int roomId) {
return roomRepository.findById(roomId)
.map(RoomMapper.INSTANCE::roomToRoomView)
.orElseThrow(RoomNotFoundException::new);
}
public List<BookingView> getRoomBookings(int roomId) {
return bookingRepository
.findByRoomIdAndActive(roomId, true)
.stream()
.map(BookingMapper.INSTANCE::bookingToBookingView)
.collect(Collectors.toList());
}
public List<BookingView> getBookings(String email) {
User user = userRepository.findByEmail(email).orElseThrow(LoginException::new);
return bookingRepository
.findByUserId(user.getId())
.stream()
.filter(b -> StringUtils.equals(b.getUser().getEmail(), email))
.map(BookingMapper.INSTANCE::bookingToBookingView)
.collect(Collectors.toList());
}
public List<RoomView> getAvailableRooms(String bookingDate, int nbPersons, String fromTime, String toTime) {
int intervalFrom = Integer.parseInt(fromTime);
int intervalTo = Integer.parseInt(toTime);
if(intervalFrom < INTERVAL_START || intervalTo > INTERVAL_END) {
return Collections.emptyList();
}
Map<Integer, List<Booking>> bookingsMap =
bookingRepository.findByBookingDateAndActive(bookingDate, true)
.stream()
.collect(Collectors.groupingBy(b -> b.getRoom().getId(), Collectors.toList()));
List<Integer> userInterval = IntStream.range(intervalFrom, intervalTo).boxed().collect(Collectors.toList());
List<RoomView> rooms = new ArrayList<>();
for(Room room : roomRepository.findAll()) {
List<Booking> bookings = bookingsMap.get(room.getId());
if (verifyRoomAvailability(room, nbPersons, bookings, userInterval)) {
rooms.add(RoomMapper.INSTANCE.roomToRoomView(room));
}
}
return rooms;
}
public List<IntervalView> getIntervals () {
return IntStream.rangeClosed(INTERVAL_START, INTERVAL_END)
.boxed()
.map(i -> IntervalView.builder()
.fromTime(intervalToDate(i))
.fromTimeInt(i)
.toTime(intervalToDate(i+1))
.toTimeInt(i+1)
.build())
.collect(Collectors.toList());
}
public void book(BookingView bookingView) {
User user = userRepository.findByEmail(bookingView.getEmail())
.orElseThrow(LoginException::new);
Room room = roomRepository.findById(bookingView.getRoomId())
.orElseThrow(RoomNotFoundException::new);
if(!verifyRoomAvailability(room.getId(), bookingView)) {
throw new BookException();
}
Booking booking = BookingMapper.INSTANCE.bookingViewToBooking(bookingView);
booking.setUser(user);
booking.setRoom(room);
booking.setActive(true);
bookingRepository.save(booking);
}
public List<IntervalView> getAvailableIntervals(int roomId, String bookingDate) {
List<Integer> reservedInterval = bookingRepository
.findByBookingDateAndRoomIdAndActive(bookingDate, roomId, true)
.stream()
.flatMap(b -> IntStream.range(dateToInterval(b.getFromTime()), dateToInterval(b.getToTime())).boxed())
.collect(Collectors.toList());
List<Integer> allIntervals = IntStream.rangeClosed(INTERVAL_START, INTERVAL_END).boxed().collect(Collectors.toList());
return allIntervals.stream()
.filter(i -> !reservedInterval.contains(i))
.map(i -> IntervalView.builder().bookingDate(bookingDate).fromTime(intervalToDate(i)).toTime(intervalToDate(i+1)).build())
.collect(Collectors.toList());
}
public void removeBooking(ObjectId bookingId) {
Booking booking = bookingRepository.findById(bookingId).orElseThrow(RoomNotFoundException::new);
booking.setRemoveDate(new Date());
booking.setActive(false);
bookingRepository.save(booking);
}
private boolean verifyRoomAvailability(Integer roomId, BookingView bookingView) {
List<Booking> bookings =
bookingRepository.findByBookingDateAndActive(bookingView.getBookingDate(), true)
.stream()
.filter(b -> IntegerUtils.equals(b.getRoom().getId(), roomId))
.collect(Collectors.toList());
List<Integer> userInterval = IntStream.range(bookingView.getFromTime(), bookingView.getToTime())
.boxed().collect(Collectors.toList());
Room room = roomRepository.findById(roomId).orElseThrow(RoomNotFoundException::new);
return verifyRoomAvailability(room, bookingView.getNbPersons(), bookings, userInterval);
}
private boolean verifyRoomAvailability (Room room, int nbPersons, List<Booking> bookings, List<Integer> userInterval) {
if(room.getNbPersons() < nbPersons) {
return false;
}
if(bookings == null) {
return true;
}
List<Integer> bookedIntervals = bookings.stream()
.flatMap(b -> IntStream.range(dateToInterval(b.getFromTime()), dateToInterval(b.getToTime())).boxed())
.collect(Collectors.toList());
return userInterval.stream().noneMatch(bookedIntervals::contains);
}
}
<file_sep>package com.devakt.entity;
import java.io.Serializable;
import java.util.Date;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.FieldDefaults;
import org.bson.types.ObjectId;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.DBRef;
import org.springframework.data.mongodb.core.mapping.Document;
@Data
@Builder
@FieldDefaults(level = AccessLevel.PRIVATE)
@AllArgsConstructor
@NoArgsConstructor
@Document
public class Booking implements Serializable {
@Id
ObjectId id;
@DBRef
User user;
@DBRef
Room room;
String description;
String bookingDate;
String fromTime;
String toTime;
int nbPersons;
Date insertDate;
boolean active;
Date removeDate;
}
<file_sep>package com.devakt.utils;
public class IntegerUtils {
private IntegerUtils() {
}
public static boolean equals(Integer integer1, Integer integer2) {
if (integer1 == null) {
return integer2 == null;
}
return integer2 != null && integer1.intValue() == integer2.intValue();
}
}
<file_sep>package com.devakt.controller;
import com.devakt.service.InitDatabaseService;
import lombok.AllArgsConstructor;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
@RestController
@CrossOrigin(origins = {"http://localhost:3000","http://devakt.ddns.net","http://devakt.ddns.net:3000"}, maxAge = 3600)
@AllArgsConstructor
public class InitController {
private final InitDatabaseService initDatabaseService;
@GetMapping(value = "/init")
@ResponseBody
public ResponseEntity init() {
initDatabaseService.initData();
return ResponseEntity.noContent().build();
}
}
<file_sep>package com.devakt.exception;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ResponseStatus;
@ResponseStatus(value = HttpStatus.CONFLICT)
public class BookException extends RuntimeException {
private static final long serialVersionUID = 1L;
public BookException() {
super(String.format("Une erreur est survenue lors de la réservation, la salle n'est plus disponible à l'horaire choisi"));
}
}
<file_sep>package com.devakt.utils;
public class StringUtils {
private StringUtils() {
}
public static boolean equals(String str1, String str2) {
if(str1 == null) {
return str2 == null;
}
if(str2 == null) {
return false;
}
return str1.equals(str2);
}
}
| 8ca8ac8c958789c2c3fd7cc7de31809c0e70cb77 | [
"Java"
] | 8 | Java | lordsm11/meeting-room-booking | 08b325f6dbf7371dd1cbef9a88d93dd6d4bf18ec | 0b02a1ec57ba2949b2b8b59dc0a4ef2529347114 |
refs/heads/master | <repo_name>anarmawala/HUHY-Backend<file_sep>/routes/index.js
var express = require('express');
var Input = require('../models/input.js')
var router = express.Router();
router.post("/questionnaire", function(req, res, next) {
let personName = req.body.name;
let zip = req.body.zip;
let email = req.body.email;
delete req.body.name;
delete req.body.zip;
delete req.body.email;
var newInput = new Input({
date: Date(),
user: {
name: personName,
zip: zip,
email: email
},
answers: req.body
});
console.log(newInput);
newInput.save(function(err) {
if (err) throw err;
});
res.send("Success");
});
router.get('/kiosk', function(req, res, next) {
var zip = req.query.zip;
res.json({}); //PLACEHOLDER
});
module.exports = router;
| b32235d4bce0dec23806736d9188d597a56a7828 | [
"JavaScript"
] | 1 | JavaScript | anarmawala/HUHY-Backend | a93abdce3912719006799e8639c47c8cafe915cb | 9db93408f2e7438639c0d7e754fc34ab8aa44517 |
refs/heads/main | <repo_name>jan-janssen/conda-forge-contribution<file_sep>/.ci_support/readmetemplate.md
# conda-forge-contribution
[](https://github.com/jan-janssen/conda-forge-contribution/actions)
This repository allows you to quickly generate a list of all your [conda-forge](https://conda-forge.org) contributions.
For example the contributions of [jan-janssen](https://github.com/jan-janssen) are available at [https://jan-janssen.github.io/conda-forge-contribution/](https://jan-janssen.github.io/conda-forge-contribution/).
To generate your own contribution-list, simply fork this repository and set the environment variable `GH_TOKEN` as a [github action secret](https://docs.github.com/en/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository):
```
GH_TOKEN = <your Github token which enables access to public_repo and read:org>
```
For the token the following permissions are required:

After creating the environment variable `GH_TOKEN` trigger a new build on the master branch.
Designed by [colorlib](https://colorlib.com/wp/template/responsive-table-v2/).
<file_sep>/README.md
# conda-forge-contribution
[](https://github.com/jan-janssen/conda-forge-contribution/actions)
This repository allows you to quickly generate a list of all your [conda-forge](https://conda-forge.org) contributions.
For example the contributions of [jan-janssen](https://github.com/jan-janssen) are available at [https://jan-janssen.github.io/conda-forge-contribution/](https://jan-janssen.github.io/conda-forge-contribution/).
To generate your own contribution-list, simply fork this repository and set the environment variable `GH_TOKEN` as a [github action secret](https://docs.github.com/en/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository):
```
GH_TOKEN = <your Github token which enables access to public_repo and read:org>
```
For the token the following permissions are required:

After creating the environment variable `GH_TOKEN` trigger a new build on the master branch.
Designed by [colorlib](https://colorlib.com/wp/template/responsive-table-v2/).
# List of Packages
| Package Name | Downloads |
|:-------------|:---------:|
| [pint](https://anaconda.org/conda-forge/pint) | [](https://anaconda.org/conda-forge/pint) |
| [h5py](https://anaconda.org/conda-forge/h5py) | [](https://anaconda.org/conda-forge/h5py) |
| [pystache](https://anaconda.org/conda-forge/pystache) | [](https://anaconda.org/conda-forge/pystache) |
| [pyzmq](https://anaconda.org/conda-forge/pyzmq) | [](https://anaconda.org/conda-forge/pyzmq) |
| [pyparsing](https://anaconda.org/conda-forge/pyparsing) | [](https://anaconda.org/conda-forge/pyparsing) |
| [phonenumbers](https://anaconda.org/conda-forge/phonenumbers) | [](https://anaconda.org/conda-forge/phonenumbers) |
| [mutagen](https://anaconda.org/conda-forge/mutagen) | [](https://anaconda.org/conda-forge/mutagen) |
| [pymongo](https://anaconda.org/conda-forge/pymongo) | [](https://anaconda.org/conda-forge/pymongo) |
| [oauthlib](https://anaconda.org/conda-forge/oauthlib) | [](https://anaconda.org/conda-forge/oauthlib) |
| [pydruid](https://anaconda.org/conda-forge/pydruid) | [](https://anaconda.org/conda-forge/pydruid) |
| [flask-oauthlib](https://anaconda.org/conda-forge/flask-oauthlib) | [](https://anaconda.org/conda-forge/flask-oauthlib) |
| [pytrie](https://anaconda.org/conda-forge/pytrie) | [](https://anaconda.org/conda-forge/pytrie) |
| [voluptuous](https://anaconda.org/conda-forge/voluptuous) | [](https://anaconda.org/conda-forge/voluptuous) |
| [girder-client](https://anaconda.org/conda-forge/girder-client) | [](https://anaconda.org/conda-forge/girder-client) |
| [girder](https://anaconda.org/conda-forge/girder) | [](https://anaconda.org/conda-forge/girder) |
| [scandir](https://anaconda.org/conda-forge/scandir) | [](https://anaconda.org/conda-forge/scandir) |
| [f90wrap](https://anaconda.org/conda-forge/f90wrap) | [](https://anaconda.org/conda-forge/f90wrap) |
| [speechrecognition](https://anaconda.org/conda-forge/speechrecognition) | [](https://anaconda.org/conda-forge/speechrecognition) |
| [qds-sdk](https://anaconda.org/conda-forge/qds-sdk) | [](https://anaconda.org/conda-forge/qds-sdk) |
| [cement](https://anaconda.org/conda-forge/cement) | [](https://anaconda.org/conda-forge/cement) |
| [tldextract](https://anaconda.org/conda-forge/tldextract) | [](https://anaconda.org/conda-forge/tldextract) |
| [wtf-peewee](https://anaconda.org/conda-forge/wtf-peewee) | [](https://anaconda.org/conda-forge/wtf-peewee) |
| [cloudant](https://anaconda.org/conda-forge/cloudant) | [](https://anaconda.org/conda-forge/cloudant) |
| [libhwloc](https://anaconda.org/conda-forge/libhwloc) | [](https://anaconda.org/conda-forge/libhwloc) |
| [imapclient](https://anaconda.org/conda-forge/imapclient) | [](https://anaconda.org/conda-forge/imapclient) |
| [flask-seasurf](https://anaconda.org/conda-forge/flask-seasurf) | [](https://anaconda.org/conda-forge/flask-seasurf) |
| [flask-restful](https://anaconda.org/conda-forge/flask-restful) | [](https://anaconda.org/conda-forge/flask-restful) |
| [flask-user](https://anaconda.org/conda-forge/flask-user) | [](https://anaconda.org/conda-forge/flask-user) |
| [creoleparser](https://anaconda.org/conda-forge/creoleparser) | [](https://anaconda.org/conda-forge/creoleparser) |
| [zodburi](https://anaconda.org/conda-forge/zodburi) | [](https://anaconda.org/conda-forge/zodburi) |
| [sseclient](https://anaconda.org/conda-forge/sseclient) | [](https://anaconda.org/conda-forge/sseclient) |
| [flask-moment](https://anaconda.org/conda-forge/flask-moment) | [](https://anaconda.org/conda-forge/flask-moment) |
| [flask-cors](https://anaconda.org/conda-forge/flask-cors) | [](https://anaconda.org/conda-forge/flask-cors) |
| [zodbpickle](https://anaconda.org/conda-forge/zodbpickle) | [](https://anaconda.org/conda-forge/zodbpickle) |
| [dominate](https://anaconda.org/conda-forge/dominate) | [](https://anaconda.org/conda-forge/dominate) |
| [flask-debugtoolbar](https://anaconda.org/conda-forge/flask-debugtoolbar) | [](https://anaconda.org/conda-forge/flask-debugtoolbar) |
| [msg-extractor](https://anaconda.org/conda-forge/msg-extractor) | [](https://anaconda.org/conda-forge/msg-extractor) |
| [flask-flatpages](https://anaconda.org/conda-forge/flask-flatpages) | [](https://anaconda.org/conda-forge/flask-flatpages) |
| [textract](https://anaconda.org/conda-forge/textract) | [](https://anaconda.org/conda-forge/textract) |
| [census](https://anaconda.org/conda-forge/census) | [](https://anaconda.org/conda-forge/census) |
| [zconfig](https://anaconda.org/conda-forge/zconfig) | [](https://anaconda.org/conda-forge/zconfig) |
| [flask-migrate](https://anaconda.org/conda-forge/flask-migrate) | [](https://anaconda.org/conda-forge/flask-migrate) |
| [fenics](https://anaconda.org/conda-forge/fenics) | [](https://anaconda.org/conda-forge/fenics) |
| [petsc](https://anaconda.org/conda-forge/petsc) | [](https://anaconda.org/conda-forge/petsc) |
| [phonopy](https://anaconda.org/conda-forge/phonopy) | [](https://anaconda.org/conda-forge/phonopy) |
| [monty](https://anaconda.org/conda-forge/monty) | [](https://anaconda.org/conda-forge/monty) |
| [mshr](https://anaconda.org/conda-forge/mshr) | [](https://anaconda.org/conda-forge/mshr) |
| [leather](https://anaconda.org/conda-forge/leather) | [](https://anaconda.org/conda-forge/leather) |
| [agate](https://anaconda.org/conda-forge/agate) | [](https://anaconda.org/conda-forge/agate) |
| [agate-dbf](https://anaconda.org/conda-forge/agate-dbf) | [](https://anaconda.org/conda-forge/agate-dbf) |
| [agate-sql](https://anaconda.org/conda-forge/agate-sql) | [](https://anaconda.org/conda-forge/agate-sql) |
| [csvkit](https://anaconda.org/conda-forge/csvkit) | [](https://anaconda.org/conda-forge/csvkit) |
| [kafkacat](https://anaconda.org/conda-forge/kafkacat) | [](https://anaconda.org/conda-forge/kafkacat) |
| [rope](https://anaconda.org/conda-forge/rope) | [](https://anaconda.org/conda-forge/rope) |
| [rednose](https://anaconda.org/conda-forge/rednose) | [](https://anaconda.org/conda-forge/rednose) |
| [waybackpack](https://anaconda.org/conda-forge/waybackpack) | [](https://anaconda.org/conda-forge/waybackpack) |
| [markovify](https://anaconda.org/conda-forge/markovify) | [](https://anaconda.org/conda-forge/markovify) |
| [jieba](https://anaconda.org/conda-forge/jieba) | [](https://anaconda.org/conda-forge/jieba) |
| [jaraco.logging](https://anaconda.org/conda-forge/jaraco.logging) | [](https://anaconda.org/conda-forge/jaraco.logging) |
| [jaraco.stream](https://anaconda.org/conda-forge/jaraco.stream) | [](https://anaconda.org/conda-forge/jaraco.stream) |
| [records](https://anaconda.org/conda-forge/records) | [](https://anaconda.org/conda-forge/records) |
| [lesscpy](https://anaconda.org/conda-forge/lesscpy) | [](https://anaconda.org/conda-forge/lesscpy) |
| [twython](https://anaconda.org/conda-forge/twython) | [](https://anaconda.org/conda-forge/twython) |
| [transaction](https://anaconda.org/conda-forge/transaction) | [](https://anaconda.org/conda-forge/transaction) |
| [zodb](https://anaconda.org/conda-forge/zodb) | [](https://anaconda.org/conda-forge/zodb) |
| [zeo](https://anaconda.org/conda-forge/zeo) | [](https://anaconda.org/conda-forge/zeo) |
| [google-auth](https://anaconda.org/conda-forge/google-auth) | [](https://anaconda.org/conda-forge/google-auth) |
| [google-auth-oauthlib](https://anaconda.org/conda-forge/google-auth-oauthlib) | [](https://anaconda.org/conda-forge/google-auth-oauthlib) |
| [dicttoxml](https://anaconda.org/conda-forge/dicttoxml) | [](https://anaconda.org/conda-forge/dicttoxml) |
| [moto](https://anaconda.org/conda-forge/moto) | [](https://anaconda.org/conda-forge/moto) |
| [pyperclip](https://anaconda.org/conda-forge/pyperclip) | [](https://anaconda.org/conda-forge/pyperclip) |
| [furl](https://anaconda.org/conda-forge/furl) | [](https://anaconda.org/conda-forge/furl) |
| [pyside2](https://anaconda.org/conda-forge/pyside2) | [](https://anaconda.org/conda-forge/pyside2) |
| [infinity](https://anaconda.org/conda-forge/infinity) | [](https://anaconda.org/conda-forge/infinity) |
| [intervals](https://anaconda.org/conda-forge/intervals) | [](https://anaconda.org/conda-forge/intervals) |
| [dnspython](https://anaconda.org/conda-forge/dnspython) | [](https://anaconda.org/conda-forge/dnspython) |
| [langdetect](https://anaconda.org/conda-forge/langdetect) | [](https://anaconda.org/conda-forge/langdetect) |
| [dotmap](https://anaconda.org/conda-forge/dotmap) | [](https://anaconda.org/conda-forge/dotmap) |
| [plac](https://anaconda.org/conda-forge/plac) | [](https://anaconda.org/conda-forge/plac) |
| [aenum](https://anaconda.org/conda-forge/aenum) | [](https://anaconda.org/conda-forge/aenum) |
| [scrypt](https://anaconda.org/conda-forge/scrypt) | [](https://anaconda.org/conda-forge/scrypt) |
| [siesta](https://anaconda.org/conda-forge/siesta) | [](https://anaconda.org/conda-forge/siesta) |
| [oletools](https://anaconda.org/conda-forge/oletools) | [](https://anaconda.org/conda-forge/oletools) |
| [h5io](https://anaconda.org/conda-forge/h5io) | [](https://anaconda.org/conda-forge/h5io) |
| [python-box](https://anaconda.org/conda-forge/python-box) | [](https://anaconda.org/conda-forge/python-box) |
| [pyiron](https://anaconda.org/conda-forge/pyiron) | [](https://anaconda.org/conda-forge/pyiron) |
| [pyiron_atomistics](https://anaconda.org/conda-forge/pyiron_atomistics) | [](https://anaconda.org/conda-forge/pyiron_atomistics) |
| [pyiron_base](https://anaconda.org/conda-forge/pyiron_base) | [](https://anaconda.org/conda-forge/pyiron_base) |
| [pyiron_dft](https://anaconda.org/conda-forge/pyiron_dft) | [](https://anaconda.org/conda-forge/pyiron_dft) |
| [pyiron_example_job](https://anaconda.org/conda-forge/pyiron_example_job) | [](https://anaconda.org/conda-forge/pyiron_example_job) |
| [pyiron_lammps](https://anaconda.org/conda-forge/pyiron_lammps) | [](https://anaconda.org/conda-forge/pyiron_lammps) |
| [pyiron_vasp](https://anaconda.org/conda-forge/pyiron_vasp) | [](https://anaconda.org/conda-forge/pyiron_vasp) |
| [lammps](https://anaconda.org/conda-forge/lammps) | [](https://anaconda.org/conda-forge/lammps) |
| [libsigsegv](https://anaconda.org/conda-forge/libsigsegv) | [](https://anaconda.org/conda-forge/libsigsegv) |
| [ovito](https://anaconda.org/conda-forge/ovito) | [](https://anaconda.org/conda-forge/ovito) |
| [python-crontab](https://anaconda.org/conda-forge/python-crontab) | [](https://anaconda.org/conda-forge/python-crontab) |
| [simpletraj](https://anaconda.org/conda-forge/simpletraj) | [](https://anaconda.org/conda-forge/simpletraj) |
| [atomsk](https://anaconda.org/conda-forge/atomsk) | [](https://anaconda.org/conda-forge/atomsk) |
| [astral](https://anaconda.org/conda-forge/astral) | [](https://anaconda.org/conda-forge/astral) |
| [python-flint](https://anaconda.org/conda-forge/python-flint) | [](https://anaconda.org/conda-forge/python-flint) |
| [bcrypt](https://anaconda.org/conda-forge/bcrypt) | [](https://anaconda.org/conda-forge/bcrypt) |
| [pysqa](https://anaconda.org/conda-forge/pysqa) | [](https://anaconda.org/conda-forge/pysqa) |
| [codacy-coverage](https://anaconda.org/conda-forge/codacy-coverage) | [](https://anaconda.org/conda-forge/codacy-coverage) |
| [openbabel](https://anaconda.org/conda-forge/openbabel) | [](https://anaconda.org/conda-forge/openbabel) |
| [pyotp](https://anaconda.org/conda-forge/pyotp) | [](https://anaconda.org/conda-forge/pyotp) |
| [libxc](https://anaconda.org/conda-forge/libxc) | [](https://anaconda.org/conda-forge/libxc) |
| [dtool_s3](https://anaconda.org/conda-forge/dtool_s3) | [](https://anaconda.org/conda-forge/dtool_s3) |
| [dtool_http](https://anaconda.org/conda-forge/dtool_http) | [](https://anaconda.org/conda-forge/dtool_http) |
| [dtool_cli](https://anaconda.org/conda-forge/dtool_cli) | [](https://anaconda.org/conda-forge/dtool_cli) |
| [dtool_create](https://anaconda.org/conda-forge/dtool_create) | [](https://anaconda.org/conda-forge/dtool_create) |
| [dtoolcore](https://anaconda.org/conda-forge/dtoolcore) | [](https://anaconda.org/conda-forge/dtoolcore) |
| [dtool_info](https://anaconda.org/conda-forge/dtool_info) | [](https://anaconda.org/conda-forge/dtool_info) |
| [dtool_symlink](https://anaconda.org/conda-forge/dtool_symlink) | [](https://anaconda.org/conda-forge/dtool_symlink) |
| [dtool_config](https://anaconda.org/conda-forge/dtool_config) | [](https://anaconda.org/conda-forge/dtool_config) |
| [dtool](https://anaconda.org/conda-forge/dtool) | [](https://anaconda.org/conda-forge/dtool) |
| [gpaw](https://anaconda.org/conda-forge/gpaw) | [](https://anaconda.org/conda-forge/gpaw) |
| [catlearn](https://anaconda.org/conda-forge/catlearn) | [](https://anaconda.org/conda-forge/catlearn) |
| [pylops](https://anaconda.org/conda-forge/pylops) | [](https://anaconda.org/conda-forge/pylops) |
| [scikit-fmm](https://anaconda.org/conda-forge/scikit-fmm) | [](https://anaconda.org/conda-forge/scikit-fmm) |
| [cfn-lint](https://anaconda.org/conda-forge/cfn-lint) | [](https://anaconda.org/conda-forge/cfn-lint) |
| [libssh](https://anaconda.org/conda-forge/libssh) | [](https://anaconda.org/conda-forge/libssh) |
| [cabextract](https://anaconda.org/conda-forge/cabextract) | [](https://anaconda.org/conda-forge/cabextract) |
| [mscorefonts](https://anaconda.org/conda-forge/mscorefonts) | [](https://anaconda.org/conda-forge/mscorefonts) |
| [cm-unicode](https://anaconda.org/conda-forge/cm-unicode) | [](https://anaconda.org/conda-forge/cm-unicode) |
| [randspg](https://anaconda.org/conda-forge/randspg) | [](https://anaconda.org/conda-forge/randspg) |
| [seekpath](https://anaconda.org/conda-forge/seekpath) | [](https://anaconda.org/conda-forge/seekpath) |
| [icet](https://anaconda.org/conda-forge/icet) | [](https://anaconda.org/conda-forge/icet) |
| [mendeleev](https://anaconda.org/conda-forge/mendeleev) | [](https://anaconda.org/conda-forge/mendeleev) |
| [custodian](https://anaconda.org/conda-forge/custodian) | [](https://anaconda.org/conda-forge/custodian) |
| [molmod](https://anaconda.org/conda-forge/molmod) | [](https://anaconda.org/conda-forge/molmod) |
| [pymatgen](https://anaconda.org/conda-forge/pymatgen) | [](https://anaconda.org/conda-forge/pymatgen) |
| [yaff](https://anaconda.org/conda-forge/yaff) | [](https://anaconda.org/conda-forge/yaff) |
| [sqsgenerator](https://anaconda.org/conda-forge/sqsgenerator) | [](https://anaconda.org/conda-forge/sqsgenerator) |
| [sharedarray](https://anaconda.org/conda-forge/sharedarray) | [](https://anaconda.org/conda-forge/sharedarray) |
| [scisweeper](https://anaconda.org/conda-forge/scisweeper) | [](https://anaconda.org/conda-forge/scisweeper) |
| [pyfileindex](https://anaconda.org/conda-forge/pyfileindex) | [](https://anaconda.org/conda-forge/pyfileindex) |
| [flask-paginate](https://anaconda.org/conda-forge/flask-paginate) | [](https://anaconda.org/conda-forge/flask-paginate) |
| [pymatgen-diffusion](https://anaconda.org/conda-forge/pymatgen-diffusion) | [](https://anaconda.org/conda-forge/pymatgen-diffusion) |
| [smoqe](https://anaconda.org/conda-forge/smoqe) | [](https://anaconda.org/conda-forge/smoqe) |
| [pymatgen-db](https://anaconda.org/conda-forge/pymatgen-db) | [](https://anaconda.org/conda-forge/pymatgen-db) |
| [fireworks](https://anaconda.org/conda-forge/fireworks) | [](https://anaconda.org/conda-forge/fireworks) |
| [atomate](https://anaconda.org/conda-forge/atomate) | [](https://anaconda.org/conda-forge/atomate) |
| [pyhull](https://anaconda.org/conda-forge/pyhull) | [](https://anaconda.org/conda-forge/pyhull) |
| [sphinxdft](https://anaconda.org/conda-forge/sphinxdft) | [](https://anaconda.org/conda-forge/sphinxdft) |
| [dscribe](https://anaconda.org/conda-forge/dscribe) | [](https://anaconda.org/conda-forge/dscribe) |
| [soaplite](https://anaconda.org/conda-forge/soaplite) | [](https://anaconda.org/conda-forge/soaplite) |
| [schnetpack](https://anaconda.org/conda-forge/schnetpack) | [](https://anaconda.org/conda-forge/schnetpack) |
| [libxsmm](https://anaconda.org/conda-forge/libxsmm) | [](https://anaconda.org/conda-forge/libxsmm) |
| [fypp](https://anaconda.org/conda-forge/fypp) | [](https://anaconda.org/conda-forge/fypp) |
| [cp2k](https://anaconda.org/conda-forge/cp2k) | [](https://anaconda.org/conda-forge/cp2k) |
| [pycp2k](https://anaconda.org/conda-forge/pycp2k) | [](https://anaconda.org/conda-forge/pycp2k) |
| [mapbox-vector-tile](https://anaconda.org/conda-forge/mapbox-vector-tile) | [](https://anaconda.org/conda-forge/mapbox-vector-tile) |
| [tamkin](https://anaconda.org/conda-forge/tamkin) | [](https://anaconda.org/conda-forge/tamkin) |
| [phonolammps](https://anaconda.org/conda-forge/phonolammps) | [](https://anaconda.org/conda-forge/phonolammps) |
| [dynaphopy](https://anaconda.org/conda-forge/dynaphopy) | [](https://anaconda.org/conda-forge/dynaphopy) |
| [owlready2](https://anaconda.org/conda-forge/owlready2) | [](https://anaconda.org/conda-forge/owlready2) |
| [flask-smorest](https://anaconda.org/conda-forge/flask-smorest) | [](https://anaconda.org/conda-forge/flask-smorest) |
| [atomicrex](https://anaconda.org/conda-forge/atomicrex) | [](https://anaconda.org/conda-forge/atomicrex) |
| [mdf_forge](https://anaconda.org/conda-forge/mdf_forge) | [](https://anaconda.org/conda-forge/mdf_forge) |
| [mdf_toolbox](https://anaconda.org/conda-forge/mdf_toolbox) | [](https://anaconda.org/conda-forge/mdf_toolbox) |
| [matminer](https://anaconda.org/conda-forge/matminer) | [](https://anaconda.org/conda-forge/matminer) |
| [aflow](https://anaconda.org/conda-forge/aflow) | [](https://anaconda.org/conda-forge/aflow) |
| [globus-nexus-client](https://anaconda.org/conda-forge/globus-nexus-client) | [](https://anaconda.org/conda-forge/globus-nexus-client) |
| [kivy-garden](https://anaconda.org/conda-forge/kivy-garden) | [](https://anaconda.org/conda-forge/kivy-garden) |
| [clease](https://anaconda.org/conda-forge/clease) | [](https://anaconda.org/conda-forge/clease) |
| [pyscal](https://anaconda.org/conda-forge/pyscal) | [](https://anaconda.org/conda-forge/pyscal) |
| [fair-research-login](https://anaconda.org/conda-forge/fair-research-login) | [](https://anaconda.org/conda-forge/fair-research-login) |
| [edn_format](https://anaconda.org/conda-forge/edn_format) | [](https://anaconda.org/conda-forge/edn_format) |
| [pyrfc3339](https://anaconda.org/conda-forge/pyrfc3339) | [](https://anaconda.org/conda-forge/pyrfc3339) |
| [kim-api](https://anaconda.org/conda-forge/kim-api) | [](https://anaconda.org/conda-forge/kim-api) |
| [proteus](https://anaconda.org/conda-forge/proteus) | [](https://anaconda.org/conda-forge/proteus) |
| [kimpy](https://anaconda.org/conda-forge/kimpy) | [](https://anaconda.org/conda-forge/kimpy) |
| [openkimmodels](https://anaconda.org/conda-forge/openkimmodels) | [](https://anaconda.org/conda-forge/openkimmodels) |
| [rocm-smi](https://anaconda.org/conda-forge/rocm-smi) | [](https://anaconda.org/conda-forge/rocm-smi) |
| [openkim-models](https://anaconda.org/conda-forge/openkim-models) | [](https://anaconda.org/conda-forge/openkim-models) |
| [numericalunits](https://anaconda.org/conda-forge/numericalunits) | [](https://anaconda.org/conda-forge/numericalunits) |
| [potentials](https://anaconda.org/conda-forge/potentials) | [](https://anaconda.org/conda-forge/potentials) |
| [datamodeldict](https://anaconda.org/conda-forge/datamodeldict) | [](https://anaconda.org/conda-forge/datamodeldict) |
| [atomman](https://anaconda.org/conda-forge/atomman) | [](https://anaconda.org/conda-forge/atomman) |
| [recordtype](https://anaconda.org/conda-forge/recordtype) | [](https://anaconda.org/conda-forge/recordtype) |
| [conda-merge](https://anaconda.org/conda-forge/conda-merge) | [](https://anaconda.org/conda-forge/conda-merge) |
| [kliff](https://anaconda.org/conda-forge/kliff) | [](https://anaconda.org/conda-forge/kliff) |
| [parsplice](https://anaconda.org/conda-forge/parsplice) | [](https://anaconda.org/conda-forge/parsplice) |
| [damask](https://anaconda.org/conda-forge/damask) | [](https://anaconda.org/conda-forge/damask) |
| [phono3py](https://anaconda.org/conda-forge/phono3py) | [](https://anaconda.org/conda-forge/phono3py) |
| [wulffpack](https://anaconda.org/conda-forge/wulffpack) | [](https://anaconda.org/conda-forge/wulffpack) |
| [dynasor](https://anaconda.org/conda-forge/dynasor) | [](https://anaconda.org/conda-forge/dynasor) |
| [hiphive](https://anaconda.org/conda-forge/hiphive) | [](https://anaconda.org/conda-forge/hiphive) |
| [pylammpsmpi](https://anaconda.org/conda-forge/pylammpsmpi) | [](https://anaconda.org/conda-forge/pylammpsmpi) |
| [quickff](https://anaconda.org/conda-forge/quickff) | [](https://anaconda.org/conda-forge/quickff) |
| [python-keycloak](https://anaconda.org/conda-forge/python-keycloak) | [](https://anaconda.org/conda-forge/python-keycloak) |
| [jupyterlab_latex](https://anaconda.org/conda-forge/jupyterlab_latex) | [](https://anaconda.org/conda-forge/jupyterlab_latex) |
| [gifsicle](https://anaconda.org/conda-forge/gifsicle) | [](https://anaconda.org/conda-forge/gifsicle) |
| [pymatnest](https://anaconda.org/conda-forge/pymatnest) | [](https://anaconda.org/conda-forge/pymatnest) |
| [fox](https://anaconda.org/conda-forge/fox) | [](https://anaconda.org/conda-forge/fox) |
| [jupyterlab-latex](https://anaconda.org/conda-forge/jupyterlab-latex) | [](https://anaconda.org/conda-forge/jupyterlab-latex) |
| [qe](https://anaconda.org/conda-forge/qe) | [](https://anaconda.org/conda-forge/qe) |
| [pwtools](https://anaconda.org/conda-forge/pwtools) | [](https://anaconda.org/conda-forge/pwtools) |
| [click-shell](https://anaconda.org/conda-forge/click-shell) | [](https://anaconda.org/conda-forge/click-shell) |
| [temmeta](https://anaconda.org/conda-forge/temmeta) | [](https://anaconda.org/conda-forge/temmeta) |
| [abipy](https://anaconda.org/conda-forge/abipy) | [](https://anaconda.org/conda-forge/abipy) |
| [periodic-table-plotter](https://anaconda.org/conda-forge/periodic-table-plotter) | [](https://anaconda.org/conda-forge/periodic-table-plotter) |
| [atomicfile](https://anaconda.org/conda-forge/atomicfile) | [](https://anaconda.org/conda-forge/atomicfile) |
| [pseudo_dojo](https://anaconda.org/conda-forge/pseudo_dojo) | [](https://anaconda.org/conda-forge/pseudo_dojo) |
| [abinit](https://anaconda.org/conda-forge/abinit) | [](https://anaconda.org/conda-forge/abinit) |
| [gpaw-data](https://anaconda.org/conda-forge/gpaw-data) | [](https://anaconda.org/conda-forge/gpaw-data) |
| [atompaw](https://anaconda.org/conda-forge/atompaw) | [](https://anaconda.org/conda-forge/atompaw) |
| [pytorch-lightning](https://anaconda.org/conda-forge/pytorch-lightning) | [](https://anaconda.org/conda-forge/pytorch-lightning) |
| [sphinxdft-data](https://anaconda.org/conda-forge/sphinxdft-data) | [](https://anaconda.org/conda-forge/sphinxdft-data) |
| [iprpy](https://anaconda.org/conda-forge/iprpy) | [](https://anaconda.org/conda-forge/iprpy) |
| [iprpy-data](https://anaconda.org/conda-forge/iprpy-data) | [](https://anaconda.org/conda-forge/iprpy-data) |
| [abinit-data](https://anaconda.org/conda-forge/abinit-data) | [](https://anaconda.org/conda-forge/abinit-data) |
| [pyiron-data](https://anaconda.org/conda-forge/pyiron-data) | [](https://anaconda.org/conda-forge/pyiron-data) |
| [pyprocar](https://anaconda.org/conda-forge/pyprocar) | [](https://anaconda.org/conda-forge/pyprocar) |
| [pychemia](https://anaconda.org/conda-forge/pychemia) | [](https://anaconda.org/conda-forge/pychemia) |
| [match-series](https://anaconda.org/conda-forge/match-series) | [](https://anaconda.org/conda-forge/match-series) |
| [scanf](https://anaconda.org/conda-forge/scanf) | [](https://anaconda.org/conda-forge/scanf) |
| [chemics](https://anaconda.org/conda-forge/chemics) | [](https://anaconda.org/conda-forge/chemics) |
| [pystem](https://anaconda.org/conda-forge/pystem) | [](https://anaconda.org/conda-forge/pystem) |
| [py4dstem](https://anaconda.org/conda-forge/py4dstem) | [](https://anaconda.org/conda-forge/py4dstem) |
| [rich](https://anaconda.org/conda-forge/rich) | [](https://anaconda.org/conda-forge/rich) |
| [lazy-property](https://anaconda.org/conda-forge/lazy-property) | [](https://anaconda.org/conda-forge/lazy-property) |
| [wannierberri](https://anaconda.org/conda-forge/wannierberri) | [](https://anaconda.org/conda-forge/wannierberri) |
| [povray](https://anaconda.org/conda-forge/povray) | [](https://anaconda.org/conda-forge/povray) |
| [sumo](https://anaconda.org/conda-forge/sumo) | [](https://anaconda.org/conda-forge/sumo) |
| [nptyping](https://anaconda.org/conda-forge/nptyping) | [](https://anaconda.org/conda-forge/nptyping) |
| [mir-flare](https://anaconda.org/conda-forge/mir-flare) | [](https://anaconda.org/conda-forge/mir-flare) |
| [typish](https://anaconda.org/conda-forge/typish) | [](https://anaconda.org/conda-forge/typish) |
| [vapory](https://anaconda.org/conda-forge/vapory) | [](https://anaconda.org/conda-forge/vapory) |
| [sidpy](https://anaconda.org/conda-forge/sidpy) | [](https://anaconda.org/conda-forge/sidpy) |
| [getent](https://anaconda.org/conda-forge/getent) | [](https://anaconda.org/conda-forge/getent) |
| [pyiron_contrib](https://anaconda.org/conda-forge/pyiron_contrib) | [](https://anaconda.org/conda-forge/pyiron_contrib) |
| [pslibrary](https://anaconda.org/conda-forge/pslibrary) | [](https://anaconda.org/conda-forge/pslibrary) |
| [abtem](https://anaconda.org/conda-forge/abtem) | [](https://anaconda.org/conda-forge/abtem) |
| [wannier90](https://anaconda.org/conda-forge/wannier90) | [](https://anaconda.org/conda-forge/wannier90) |
| [doi2bib](https://anaconda.org/conda-forge/doi2bib) | [](https://anaconda.org/conda-forge/doi2bib) |
| [aalto-boss](https://anaconda.org/conda-forge/aalto-boss) | [](https://anaconda.org/conda-forge/aalto-boss) |
| [djangorestframework-queryfields](https://anaconda.org/conda-forge/djangorestframework-queryfields) | [](https://anaconda.org/conda-forge/djangorestframework-queryfields) |
| [qmpy](https://anaconda.org/conda-forge/qmpy) | [](https://anaconda.org/conda-forge/qmpy) |
| [optimade](https://anaconda.org/conda-forge/optimade) | [](https://anaconda.org/conda-forge/optimade) |
| [matid](https://anaconda.org/conda-forge/matid) | [](https://anaconda.org/conda-forge/matid) |
| [chronic](https://anaconda.org/conda-forge/chronic) | [](https://anaconda.org/conda-forge/chronic) |
| [pynng](https://anaconda.org/conda-forge/pynng) | [](https://anaconda.org/conda-forge/pynng) |
| [mongogrant](https://anaconda.org/conda-forge/mongogrant) | [](https://anaconda.org/conda-forge/mongogrant) |
| [maggma](https://anaconda.org/conda-forge/maggma) | [](https://anaconda.org/conda-forge/maggma) |
| [fitsnap3](https://anaconda.org/conda-forge/fitsnap3) | [](https://anaconda.org/conda-forge/fitsnap3) |
| [tespy](https://anaconda.org/conda-forge/tespy) | [](https://anaconda.org/conda-forge/tespy) |
| [aimsgb](https://anaconda.org/conda-forge/aimsgb) | [](https://anaconda.org/conda-forge/aimsgb) |
| [gb-code](https://anaconda.org/conda-forge/gb-code) | [](https://anaconda.org/conda-forge/gb-code) |
| [maml](https://anaconda.org/conda-forge/maml) | [](https://anaconda.org/conda-forge/maml) |
| [amset](https://anaconda.org/conda-forge/amset) | [](https://anaconda.org/conda-forge/amset) |
| [orthopy](https://anaconda.org/conda-forge/orthopy) | [](https://anaconda.org/conda-forge/orthopy) |
| [boltztrap2](https://anaconda.org/conda-forge/boltztrap2) | [](https://anaconda.org/conda-forge/boltztrap2) |
| [quadpy](https://anaconda.org/conda-forge/quadpy) | [](https://anaconda.org/conda-forge/quadpy) |
| [ndim](https://anaconda.org/conda-forge/ndim) | [](https://anaconda.org/conda-forge/ndim) |
| [chempy](https://anaconda.org/conda-forge/chempy) | [](https://anaconda.org/conda-forge/chempy) |
| [pyneqsys](https://anaconda.org/conda-forge/pyneqsys) | [](https://anaconda.org/conda-forge/pyneqsys) |
| [grakel](https://anaconda.org/conda-forge/grakel) | [](https://anaconda.org/conda-forge/grakel) |
| [phasepy](https://anaconda.org/conda-forge/phasepy) | [](https://anaconda.org/conda-forge/phasepy) |
| [matscholar](https://anaconda.org/conda-forge/matscholar) | [](https://anaconda.org/conda-forge/matscholar) |
| [cexprtk](https://anaconda.org/conda-forge/cexprtk) | [](https://anaconda.org/conda-forge/cexprtk) |
| [atsim-potentials](https://anaconda.org/conda-forge/atsim-potentials) | [](https://anaconda.org/conda-forge/atsim-potentials) |
| [pyocr](https://anaconda.org/conda-forge/pyocr) | [](https://anaconda.org/conda-forge/pyocr) |
| [castepxbin](https://anaconda.org/conda-forge/castepxbin) | [](https://anaconda.org/conda-forge/castepxbin) |
| [bader](https://anaconda.org/conda-forge/bader) | [](https://anaconda.org/conda-forge/bader) |
| [pyiron-experimental](https://anaconda.org/conda-forge/pyiron-experimental) | [](https://anaconda.org/conda-forge/pyiron-experimental) |
| [pytradfri](https://anaconda.org/conda-forge/pytradfri) | [](https://anaconda.org/conda-forge/pytradfri) |
| [mlip](https://anaconda.org/conda-forge/mlip) | [](https://anaconda.org/conda-forge/mlip) |
| [pyiron_experimental](https://anaconda.org/conda-forge/pyiron_experimental) | [](https://anaconda.org/conda-forge/pyiron_experimental) |
| [n2p2](https://anaconda.org/conda-forge/n2p2) | [](https://anaconda.org/conda-forge/n2p2) |
| [jax-md](https://anaconda.org/conda-forge/jax-md) | [](https://anaconda.org/conda-forge/jax-md) |
| [dm-haiku](https://anaconda.org/conda-forge/dm-haiku) | [](https://anaconda.org/conda-forge/dm-haiku) |
| [google-colab](https://anaconda.org/conda-forge/google-colab) | [](https://anaconda.org/conda-forge/google-colab) |
| [memoization](https://anaconda.org/conda-forge/memoization) | [](https://anaconda.org/conda-forge/memoization) |
| [pyiron_gpl](https://anaconda.org/conda-forge/pyiron_gpl) | [](https://anaconda.org/conda-forge/pyiron_gpl) |
| [pyiron_continuum](https://anaconda.org/conda-forge/pyiron_continuum) | [](https://anaconda.org/conda-forge/pyiron_continuum) |
| [matscipy](https://anaconda.org/conda-forge/matscipy) | [](https://anaconda.org/conda-forge/matscipy) |
| [quippy](https://anaconda.org/conda-forge/quippy) | [](https://anaconda.org/conda-forge/quippy) |
| [runner](https://anaconda.org/conda-forge/runner) | [](https://anaconda.org/conda-forge/runner) |
| [structdbrest](https://anaconda.org/conda-forge/structdbrest) | [](https://anaconda.org/conda-forge/structdbrest) |
| [wquantiles](https://anaconda.org/conda-forge/wquantiles) | [](https://anaconda.org/conda-forge/wquantiles) |
| [dmba](https://anaconda.org/conda-forge/dmba) | [](https://anaconda.org/conda-forge/dmba) |
| [asr](https://anaconda.org/conda-forge/asr) | [](https://anaconda.org/conda-forge/asr) |
| [robocrys](https://anaconda.org/conda-forge/robocrys) | [](https://anaconda.org/conda-forge/robocrys) |
| [cite](https://anaconda.org/conda-forge/cite) | [](https://anaconda.org/conda-forge/cite) |
| [pdftotext](https://anaconda.org/conda-forge/pdftotext) | [](https://anaconda.org/conda-forge/pdftotext) |
| [mongita](https://anaconda.org/conda-forge/mongita) | [](https://anaconda.org/conda-forge/mongita) |
| [rdfpy](https://anaconda.org/conda-forge/rdfpy) | [](https://anaconda.org/conda-forge/rdfpy) |
| [imagedataextractor](https://anaconda.org/conda-forge/imagedataextractor) | [](https://anaconda.org/conda-forge/imagedataextractor) |
| [nomad-lab](https://anaconda.org/conda-forge/nomad-lab) | [](https://anaconda.org/conda-forge/nomad-lab) |
| [otpauth](https://anaconda.org/conda-forge/otpauth) | [](https://anaconda.org/conda-forge/otpauth) |
| [qrtools](https://anaconda.org/conda-forge/qrtools) | [](https://anaconda.org/conda-forge/qrtools) |
| [pyauthenticator](https://anaconda.org/conda-forge/pyauthenticator) | [](https://anaconda.org/conda-forge/pyauthenticator) |
| [ebcdic](https://anaconda.org/conda-forge/ebcdic) | [](https://anaconda.org/conda-forge/ebcdic) |
| [euporie](https://anaconda.org/conda-forge/euporie) | [](https://anaconda.org/conda-forge/euporie) |
| [pyiron_gui](https://anaconda.org/conda-forge/pyiron_gui) | [](https://anaconda.org/conda-forge/pyiron_gui) |
| [smartmontools](https://anaconda.org/conda-forge/smartmontools) | [](https://anaconda.org/conda-forge/smartmontools) |
| [yfinance](https://anaconda.org/conda-forge/yfinance) | [](https://anaconda.org/conda-forge/yfinance) |
| [kanapy](https://anaconda.org/conda-forge/kanapy) | [](https://anaconda.org/conda-forge/kanapy) |
| [chemprop](https://anaconda.org/conda-forge/chemprop) | [](https://anaconda.org/conda-forge/chemprop) |
| [typed-argument-parser](https://anaconda.org/conda-forge/typed-argument-parser) | [](https://anaconda.org/conda-forge/typed-argument-parser) |
| [lbmpy](https://anaconda.org/conda-forge/lbmpy) | [](https://anaconda.org/conda-forge/lbmpy) |
| [pystencils](https://anaconda.org/conda-forge/pystencils) | [](https://anaconda.org/conda-forge/pystencils) |
| [pystencils-walberla](https://anaconda.org/conda-forge/pystencils-walberla) | [](https://anaconda.org/conda-forge/pystencils-walberla) |
| [lbmpy-walberla](https://anaconda.org/conda-forge/lbmpy-walberla) | [](https://anaconda.org/conda-forge/lbmpy-walberla) |
| [p-tqdm](https://anaconda.org/conda-forge/p-tqdm) | [](https://anaconda.org/conda-forge/p-tqdm) |
| [pystencils-autodiff](https://anaconda.org/conda-forge/pystencils-autodiff) | [](https://anaconda.org/conda-forge/pystencils-autodiff) |
| [dtool_overlay](https://anaconda.org/conda-forge/dtool_overlay) | [](https://anaconda.org/conda-forge/dtool_overlay) |
| [pystencils-reco](https://anaconda.org/conda-forge/pystencils-reco) | [](https://anaconda.org/conda-forge/pystencils-reco) |
| [dtool_tag](https://anaconda.org/conda-forge/dtool_tag) | [](https://anaconda.org/conda-forge/dtool_tag) |
| [dtool_annotation](https://anaconda.org/conda-forge/dtool_annotation) | [](https://anaconda.org/conda-forge/dtool_annotation) |
| [alamode](https://anaconda.org/conda-forge/alamode) | [](https://anaconda.org/conda-forge/alamode) |
| [ifermi](https://anaconda.org/conda-forge/ifermi) | [](https://anaconda.org/conda-forge/ifermi) |
| [meshcut](https://anaconda.org/conda-forge/meshcut) | [](https://anaconda.org/conda-forge/meshcut) |
| [enumlib](https://anaconda.org/conda-forge/enumlib) | [](https://anaconda.org/conda-forge/enumlib) |
| [pymatgen-analysis-diffusion](https://anaconda.org/conda-forge/pymatgen-analysis-diffusion) | [](https://anaconda.org/conda-forge/pymatgen-analysis-diffusion) |
| [pymatgen-lammps](https://anaconda.org/conda-forge/pymatgen-lammps) | [](https://anaconda.org/conda-forge/pymatgen-lammps) |
| [lammps-interface](https://anaconda.org/conda-forge/lammps-interface) | [](https://anaconda.org/conda-forge/lammps-interface) |
| [pylatte](https://anaconda.org/conda-forge/pylatte) | [](https://anaconda.org/conda-forge/pylatte) |
| [codeclimate-test-reporter](https://anaconda.org/conda-forge/codeclimate-test-reporter) | [](https://anaconda.org/conda-forge/codeclimate-test-reporter) |
| [vasppy](https://anaconda.org/conda-forge/vasppy) | [](https://anaconda.org/conda-forge/vasppy) |
| [pynauty](https://anaconda.org/conda-forge/pynauty) | [](https://anaconda.org/conda-forge/pynauty) |
| [apav](https://anaconda.org/conda-forge/apav) | [](https://anaconda.org/conda-forge/apav) |
| [codepy](https://anaconda.org/conda-forge/codepy) | [](https://anaconda.org/conda-forge/codepy) |
| [loopy](https://anaconda.org/conda-forge/loopy) | [](https://anaconda.org/conda-forge/loopy) |
| [latte](https://anaconda.org/conda-forge/latte) | [](https://anaconda.org/conda-forge/latte) |
| [mail-parser](https://anaconda.org/conda-forge/mail-parser) | [](https://anaconda.org/conda-forge/mail-parser) |
| [rfc6555](https://anaconda.org/conda-forge/rfc6555) | [](https://anaconda.org/conda-forge/rfc6555) |
| [offlineimap](https://anaconda.org/conda-forge/offlineimap) | [](https://anaconda.org/conda-forge/offlineimap) |
| [pyoutlook](https://anaconda.org/conda-forge/pyoutlook) | [](https://anaconda.org/conda-forge/pyoutlook) |
| [dtool_smb](https://anaconda.org/conda-forge/dtool_smb) | [](https://anaconda.org/conda-forge/dtool_smb) |
| [dtool_lookup_server_annotation_filter_plugin](https://anaconda.org/conda-forge/dtool_lookup_server_annotation_filter_plugin) | [](https://anaconda.org/conda-forge/dtool_lookup_server_annotation_filter_plugin) |
| [dtool_lookup_server_dependency_graph_plugin](https://anaconda.org/conda-forge/dtool_lookup_server_dependency_graph_plugin) | [](https://anaconda.org/conda-forge/dtool_lookup_server_dependency_graph_plugin) |
| [dtool_lookup_server_notification_plugin](https://anaconda.org/conda-forge/dtool_lookup_server_notification_plugin) | [](https://anaconda.org/conda-forge/dtool_lookup_server_notification_plugin) |
| [flask-pymongo](https://anaconda.org/conda-forge/flask-pymongo) | [](https://anaconda.org/conda-forge/flask-pymongo) |
| [dtool_ecs](https://anaconda.org/conda-forge/dtool_ecs) | [](https://anaconda.org/conda-forge/dtool_ecs) |
| [dtool_lookup_server_direct_mongo_plugin](https://anaconda.org/conda-forge/dtool_lookup_server_direct_mongo_plugin) | [](https://anaconda.org/conda-forge/dtool_lookup_server_direct_mongo_plugin) |
| [dtool_lookup_server_plugin_scaffolding](https://anaconda.org/conda-forge/dtool_lookup_server_plugin_scaffolding) | [](https://anaconda.org/conda-forge/dtool_lookup_server_plugin_scaffolding) |
| [dtool_irods](https://anaconda.org/conda-forge/dtool_irods) | [](https://anaconda.org/conda-forge/dtool_irods) |
| [dtool_lookup_server](https://anaconda.org/conda-forge/dtool_lookup_server) | [](https://anaconda.org/conda-forge/dtool_lookup_server) |
| [dtool_lookup_api](https://anaconda.org/conda-forge/dtool_lookup_api) | [](https://anaconda.org/conda-forge/dtool_lookup_api) |
| [dtool_gui_tk](https://anaconda.org/conda-forge/dtool_gui_tk) | [](https://anaconda.org/conda-forge/dtool_gui_tk) |
| [dtool_utils](https://anaconda.org/conda-forge/dtool_utils) | [](https://anaconda.org/conda-forge/dtool_utils) |
| [dtoolai](https://anaconda.org/conda-forge/dtoolai) | [](https://anaconda.org/conda-forge/dtoolai) |
| [dtool-lookup-client](https://anaconda.org/conda-forge/dtool-lookup-client) | [](https://anaconda.org/conda-forge/dtool-lookup-client) |
| [dtool_azure](https://anaconda.org/conda-forge/dtool_azure) | [](https://anaconda.org/conda-forge/dtool_azure) |
| [dtool_ibeis](https://anaconda.org/conda-forge/dtool_ibeis) | [](https://anaconda.org/conda-forge/dtool_ibeis) |
| [ubelt](https://anaconda.org/conda-forge/ubelt) | [](https://anaconda.org/conda-forge/ubelt) |
| [utool](https://anaconda.org/conda-forge/utool) | [](https://anaconda.org/conda-forge/utool) |
| [chemml](https://anaconda.org/conda-forge/chemml) | [](https://anaconda.org/conda-forge/chemml) |
| [molsimplify](https://anaconda.org/conda-forge/molsimplify) | [](https://anaconda.org/conda-forge/molsimplify) |
| [crest](https://anaconda.org/conda-forge/crest) | [](https://anaconda.org/conda-forge/crest) |
| [awesomeversion](https://anaconda.org/conda-forge/awesomeversion) | [](https://anaconda.org/conda-forge/awesomeversion) |
| [mt940](https://anaconda.org/conda-forge/mt940) | [](https://anaconda.org/conda-forge/mt940) |
| [fints](https://anaconda.org/conda-forge/fints) | [](https://anaconda.org/conda-forge/fints) |
| [sepaxml](https://anaconda.org/conda-forge/sepaxml) | [](https://anaconda.org/conda-forge/sepaxml) |
| [homeassistant](https://anaconda.org/conda-forge/homeassistant) | [](https://anaconda.org/conda-forge/homeassistant) |
| [voluptuous-serialize](https://anaconda.org/conda-forge/voluptuous-serialize) | [](https://anaconda.org/conda-forge/voluptuous-serialize) |
| [pytube](https://anaconda.org/conda-forge/pytube) | [](https://anaconda.org/conda-forge/pytube) |
| [aiotube](https://anaconda.org/conda-forge/aiotube) | [](https://anaconda.org/conda-forge/aiotube) |
| [pure-python-adb](https://anaconda.org/conda-forge/pure-python-adb) | [](https://anaconda.org/conda-forge/pure-python-adb) |
| [tld](https://anaconda.org/conda-forge/tld) | [](https://anaconda.org/conda-forge/tld) |
| [adb-shell](https://anaconda.org/conda-forge/adb-shell) | [](https://anaconda.org/conda-forge/adb-shell) |
| [pycoingecko](https://anaconda.org/conda-forge/pycoingecko) | [](https://anaconda.org/conda-forge/pycoingecko) |
| [py-find-1st](https://anaconda.org/conda-forge/py-find-1st) | [](https://anaconda.org/conda-forge/py-find-1st) |
| [technical](https://anaconda.org/conda-forge/technical) | [](https://anaconda.org/conda-forge/technical) |
| [pandas-ta](https://anaconda.org/conda-forge/pandas-ta) | [](https://anaconda.org/conda-forge/pandas-ta) |
| [search-engine-parser](https://anaconda.org/conda-forge/search-engine-parser) | [](https://anaconda.org/conda-forge/search-engine-parser) |
| [social-analyzer](https://anaconda.org/conda-forge/social-analyzer) | [](https://anaconda.org/conda-forge/social-analyzer) |
| [tsase](https://anaconda.org/conda-forge/tsase) | [](https://anaconda.org/conda-forge/tsase) |
| [adb](https://anaconda.org/conda-forge/adb) | [](https://anaconda.org/conda-forge/adb) |
| [tensor-sensor](https://anaconda.org/conda-forge/tensor-sensor) | [](https://anaconda.org/conda-forge/tensor-sensor) |
| [batoms-api](https://anaconda.org/conda-forge/batoms-api) | [](https://anaconda.org/conda-forge/batoms-api) |
| [batoms](https://anaconda.org/conda-forge/batoms) | [](https://anaconda.org/conda-forge/batoms) |
| [calphy](https://anaconda.org/conda-forge/calphy) | [](https://anaconda.org/conda-forge/calphy) |
| [i-pi](https://anaconda.org/conda-forge/i-pi) | [](https://anaconda.org/conda-forge/i-pi) |
| [where](https://anaconda.org/conda-forge/where) | [](https://anaconda.org/conda-forge/where) |
| [itermate](https://anaconda.org/conda-forge/itermate) | [](https://anaconda.org/conda-forge/itermate) |
| [pumml](https://anaconda.org/conda-forge/pumml) | [](https://anaconda.org/conda-forge/pumml) |
| [trainstation](https://anaconda.org/conda-forge/trainstation) | [](https://anaconda.org/conda-forge/trainstation) |
| [python_version](https://anaconda.org/conda-forge/python_version) | [](https://anaconda.org/conda-forge/python_version) |
| [jraph](https://anaconda.org/conda-forge/jraph) | [](https://anaconda.org/conda-forge/jraph) |
| [jmp](https://anaconda.org/conda-forge/jmp) | [](https://anaconda.org/conda-forge/jmp) |
| [aproc](https://anaconda.org/conda-forge/aproc) | [](https://anaconda.org/conda-forge/aproc) |
| [fortio](https://anaconda.org/conda-forge/fortio) | [](https://anaconda.org/conda-forge/fortio) |
| [sty](https://anaconda.org/conda-forge/sty) | [](https://anaconda.org/conda-forge/sty) |
| [precice](https://anaconda.org/conda-forge/precice) | [](https://anaconda.org/conda-forge/precice) |
| [pyprecice](https://anaconda.org/conda-forge/pyprecice) | [](https://anaconda.org/conda-forge/pyprecice) |
| [fenicsprecice](https://anaconda.org/conda-forge/fenicsprecice) | [](https://anaconda.org/conda-forge/fenicsprecice) |
| [scienceplots](https://anaconda.org/conda-forge/scienceplots) | [](https://anaconda.org/conda-forge/scienceplots) |
| [sssp](https://anaconda.org/conda-forge/sssp) | [](https://anaconda.org/conda-forge/sssp) |
| [linkedin-scraper](https://anaconda.org/conda-forge/linkedin-scraper) | [](https://anaconda.org/conda-forge/linkedin-scraper) |
| [linkedin-api](https://anaconda.org/conda-forge/linkedin-api) | [](https://anaconda.org/conda-forge/linkedin-api) |
| [uncertainty-toolbox](https://anaconda.org/conda-forge/uncertainty-toolbox) | [](https://anaconda.org/conda-forge/uncertainty-toolbox) |
| [libecpint](https://anaconda.org/conda-forge/libecpint) | [](https://anaconda.org/conda-forge/libecpint) |
| [handcalcs](https://anaconda.org/conda-forge/handcalcs) | [](https://anaconda.org/conda-forge/handcalcs) |
| [votca](https://anaconda.org/conda-forge/votca) | [](https://anaconda.org/conda-forge/votca) |
| [torchmd](https://anaconda.org/conda-forge/torchmd) | [](https://anaconda.org/conda-forge/torchmd) |
| [jobflow](https://anaconda.org/conda-forge/jobflow) | [](https://anaconda.org/conda-forge/jobflow) |
| [python-binance](https://anaconda.org/conda-forge/python-binance) | [](https://anaconda.org/conda-forge/python-binance) |
| [waiting](https://anaconda.org/conda-forge/waiting) | [](https://anaconda.org/conda-forge/waiting) |
| [ryvencore](https://anaconda.org/conda-forge/ryvencore) | [](https://anaconda.org/conda-forge/ryvencore) |
| [ryvencore-qt](https://anaconda.org/conda-forge/ryvencore-qt) | [](https://anaconda.org/conda-forge/ryvencore-qt) |
| [ryven](https://anaconda.org/conda-forge/ryven) | [](https://anaconda.org/conda-forge/ryven) |
| [simplegmail](https://anaconda.org/conda-forge/simplegmail) | [](https://anaconda.org/conda-forge/simplegmail) |
| [google-drive](https://anaconda.org/conda-forge/google-drive) | [](https://anaconda.org/conda-forge/google-drive) |
| [rtfde](https://anaconda.org/conda-forge/rtfde) | [](https://anaconda.org/conda-forge/rtfde) |
| [msoffcrypto-tool](https://anaconda.org/conda-forge/msoffcrypto-tool) | [](https://anaconda.org/conda-forge/msoffcrypto-tool) |
| [pcodedmp](https://anaconda.org/conda-forge/pcodedmp) | [](https://anaconda.org/conda-forge/pcodedmp) |
| [flatlatex](https://anaconda.org/conda-forge/flatlatex) | [](https://anaconda.org/conda-forge/flatlatex) |
| [yabadaba](https://anaconda.org/conda-forge/yabadaba) | [](https://anaconda.org/conda-forge/yabadaba) |
| [matbench](https://anaconda.org/conda-forge/matbench) | [](https://anaconda.org/conda-forge/matbench) |
| [merge-args](https://anaconda.org/conda-forge/merge-args) | [](https://anaconda.org/conda-forge/merge-args) |
| [uvicore](https://anaconda.org/conda-forge/uvicore) | [](https://anaconda.org/conda-forge/uvicore) |
| [pydatamail_google](https://anaconda.org/conda-forge/pydatamail_google) | [](https://anaconda.org/conda-forge/pydatamail_google) |
| [pydatamail](https://anaconda.org/conda-forge/pydatamail) | [](https://anaconda.org/conda-forge/pydatamail) |
| [pymov2gif](https://anaconda.org/conda-forge/pymov2gif) | [](https://anaconda.org/conda-forge/pymov2gif) |
| [pyimapsync](https://anaconda.org/conda-forge/pyimapsync) | [](https://anaconda.org/conda-forge/pyimapsync) |
| [email2pdf2](https://anaconda.org/conda-forge/email2pdf2) | [](https://anaconda.org/conda-forge/email2pdf2) |
| [basisgen](https://anaconda.org/conda-forge/basisgen) | [](https://anaconda.org/conda-forge/basisgen) |
| [galeodes](https://anaconda.org/conda-forge/galeodes) | [](https://anaconda.org/conda-forge/galeodes) |
| [pycronserver](https://anaconda.org/conda-forge/pycronserver) | [](https://anaconda.org/conda-forge/pycronserver) |
| [timg](https://anaconda.org/conda-forge/timg) | [](https://anaconda.org/conda-forge/timg) |
| [astromodels](https://anaconda.org/conda-forge/astromodels) | [](https://anaconda.org/conda-forge/astromodels) |
| [gromacs](https://anaconda.org/conda-forge/gromacs) | [](https://anaconda.org/conda-forge/gromacs) |
| [moltemplate](https://anaconda.org/conda-forge/moltemplate) | [](https://anaconda.org/conda-forge/moltemplate) |
| [qforce](https://anaconda.org/conda-forge/qforce) | [](https://anaconda.org/conda-forge/qforce) |
| [blueqat](https://anaconda.org/conda-forge/blueqat) | [](https://anaconda.org/conda-forge/blueqat) |
| [pycolt](https://anaconda.org/conda-forge/pycolt) | [](https://anaconda.org/conda-forge/pycolt) |
| [ezyrb](https://anaconda.org/conda-forge/ezyrb) | [](https://anaconda.org/conda-forge/ezyrb) |
| [quacc](https://anaconda.org/conda-forge/quacc) | [](https://anaconda.org/conda-forge/quacc) |
| [atomate2](https://anaconda.org/conda-forge/atomate2) | [](https://anaconda.org/conda-forge/atomate2) |
| [turbo-seti](https://anaconda.org/conda-forge/turbo-seti) | [](https://anaconda.org/conda-forge/turbo-seti) |
| [blimpy](https://anaconda.org/conda-forge/blimpy) | [](https://anaconda.org/conda-forge/blimpy) |
| [alchemlyb](https://anaconda.org/conda-forge/alchemlyb) | [](https://anaconda.org/conda-forge/alchemlyb) |
| [e3nn](https://anaconda.org/conda-forge/e3nn) | [](https://anaconda.org/conda-forge/e3nn) |
| [nequip](https://anaconda.org/conda-forge/nequip) | [](https://anaconda.org/conda-forge/nequip) |
| [torch-runstats](https://anaconda.org/conda-forge/torch-runstats) | [](https://anaconda.org/conda-forge/torch-runstats) |
| [torch-ema](https://anaconda.org/conda-forge/torch-ema) | [](https://anaconda.org/conda-forge/torch-ema) |
| [opt_einsum_fx](https://anaconda.org/conda-forge/opt_einsum_fx) | [](https://anaconda.org/conda-forge/opt_einsum_fx) |
| [pybader](https://anaconda.org/conda-forge/pybader) | [](https://anaconda.org/conda-forge/pybader) |
| [mailbits](https://anaconda.org/conda-forge/mailbits) | [](https://anaconda.org/conda-forge/mailbits) |
| [quimb](https://anaconda.org/conda-forge/quimb) | [](https://anaconda.org/conda-forge/quimb) |
| [pytoolconfig](https://anaconda.org/conda-forge/pytoolconfig) | [](https://anaconda.org/conda-forge/pytoolconfig) |
| [cleantext](https://anaconda.org/conda-forge/cleantext) | [](https://anaconda.org/conda-forge/cleantext) |
| [pydatamail_ml](https://anaconda.org/conda-forge/pydatamail_ml) | [](https://anaconda.org/conda-forge/pydatamail_ml) |
| [funcx](https://anaconda.org/conda-forge/funcx) | [](https://anaconda.org/conda-forge/funcx) |
| [colmena](https://anaconda.org/conda-forge/colmena) | [](https://anaconda.org/conda-forge/colmena) |
| [proxystore](https://anaconda.org/conda-forge/proxystore) | [](https://anaconda.org/conda-forge/proxystore) |
| [mdf-connect-client](https://anaconda.org/conda-forge/mdf-connect-client) | [](https://anaconda.org/conda-forge/mdf-connect-client) |
| [dlhub-sdk](https://anaconda.org/conda-forge/dlhub-sdk) | [](https://anaconda.org/conda-forge/dlhub-sdk) |
| [foundry_ml](https://anaconda.org/conda-forge/foundry_ml) | [](https://anaconda.org/conda-forge/foundry_ml) |
| [json2table](https://anaconda.org/conda-forge/json2table) | [](https://anaconda.org/conda-forge/json2table) |
| [hypothesize](https://anaconda.org/conda-forge/hypothesize) | [](https://anaconda.org/conda-forge/hypothesize) |
| [fn](https://anaconda.org/conda-forge/fn) | [](https://anaconda.org/conda-forge/fn) |
| [cadcad](https://anaconda.org/conda-forge/cadcad) | [](https://anaconda.org/conda-forge/cadcad) |
| [skipatom](https://anaconda.org/conda-forge/skipatom) | [](https://anaconda.org/conda-forge/skipatom) |
| [pympipool](https://anaconda.org/conda-forge/pympipool) | [](https://anaconda.org/conda-forge/pympipool) |
| [crccheck](https://anaconda.org/conda-forge/crccheck) | [](https://anaconda.org/conda-forge/crccheck) |
| [adf2dms](https://anaconda.org/conda-forge/adf2dms) | [](https://anaconda.org/conda-forge/adf2dms) |
| [megnet](https://anaconda.org/conda-forge/megnet) | [](https://anaconda.org/conda-forge/megnet) |
| [mnemonic](https://anaconda.org/conda-forge/mnemonic) | [](https://anaconda.org/conda-forge/mnemonic) |
| [pbkdf2](https://anaconda.org/conda-forge/pbkdf2) | [](https://anaconda.org/conda-forge/pbkdf2) |
| [pystore](https://anaconda.org/conda-forge/pystore) | [](https://anaconda.org/conda-forge/pystore) |
| [sha256](https://anaconda.org/conda-forge/sha256) | [](https://anaconda.org/conda-forge/sha256) |
| [py4vasp](https://anaconda.org/conda-forge/py4vasp) | [](https://anaconda.org/conda-forge/py4vasp) |
| [imap-tools](https://anaconda.org/conda-forge/imap-tools) | [](https://anaconda.org/conda-forge/imap-tools) |
| [threeml](https://anaconda.org/conda-forge/threeml) | [](https://anaconda.org/conda-forge/threeml) |
| [nng](https://anaconda.org/conda-forge/nng) | [](https://anaconda.org/conda-forge/nng) |
| [pylgbst](https://anaconda.org/conda-forge/pylgbst) | [](https://anaconda.org/conda-forge/pylgbst) |
| [scanpdf](https://anaconda.org/conda-forge/scanpdf) | [](https://anaconda.org/conda-forge/scanpdf) |
| [bleak-winrt](https://anaconda.org/conda-forge/bleak-winrt) | [](https://anaconda.org/conda-forge/bleak-winrt) |
| [dbus-next](https://anaconda.org/conda-forge/dbus-next) | [](https://anaconda.org/conda-forge/dbus-next) |
| [getmail6](https://anaconda.org/conda-forge/getmail6) | [](https://anaconda.org/conda-forge/getmail6) |
| [bleak](https://anaconda.org/conda-forge/bleak) | [](https://anaconda.org/conda-forge/bleak) |
| [pyobjc-framework-corebluetooth](https://anaconda.org/conda-forge/pyobjc-framework-corebluetooth) | [](https://anaconda.org/conda-forge/pyobjc-framework-corebluetooth) |
| [stringbrewer](https://anaconda.org/conda-forge/stringbrewer) | [](https://anaconda.org/conda-forge/stringbrewer) |
| [ufolib2](https://anaconda.org/conda-forge/ufolib2) | [](https://anaconda.org/conda-forge/ufolib2) |
| [cu2qu](https://anaconda.org/conda-forge/cu2qu) | [](https://anaconda.org/conda-forge/cu2qu) |
| [booleanoperations](https://anaconda.org/conda-forge/booleanoperations) | [](https://anaconda.org/conda-forge/booleanoperations) |
| [fontparts](https://anaconda.org/conda-forge/fontparts) | [](https://anaconda.org/conda-forge/fontparts) |
| [font-v](https://anaconda.org/conda-forge/font-v) | [](https://anaconda.org/conda-forge/font-v) |
| [ufolint](https://anaconda.org/conda-forge/ufolint) | [](https://anaconda.org/conda-forge/ufolint) |
| [defcon](https://anaconda.org/conda-forge/defcon) | [](https://anaconda.org/conda-forge/defcon) |
| [uharfbuzz](https://anaconda.org/conda-forge/uharfbuzz) | [](https://anaconda.org/conda-forge/uharfbuzz) |
| [beziers](https://anaconda.org/conda-forge/beziers) | [](https://anaconda.org/conda-forge/beziers) |
| [opentype-sanitizer](https://anaconda.org/conda-forge/opentype-sanitizer) | [](https://anaconda.org/conda-forge/opentype-sanitizer) |
| [dehinter](https://anaconda.org/conda-forge/dehinter) | [](https://anaconda.org/conda-forge/dehinter) |
| [axisregistry](https://anaconda.org/conda-forge/axisregistry) | [](https://anaconda.org/conda-forge/axisregistry) |
| [opentypespec](https://anaconda.org/conda-forge/opentypespec) | [](https://anaconda.org/conda-forge/opentypespec) |
| [openstep-plist](https://anaconda.org/conda-forge/openstep-plist) | [](https://anaconda.org/conda-forge/openstep-plist) |
| [gflanguages](https://anaconda.org/conda-forge/gflanguages) | [](https://anaconda.org/conda-forge/gflanguages) |
| [fontmath](https://anaconda.org/conda-forge/fontmath) | [](https://anaconda.org/conda-forge/fontmath) |
| [vharfbuzz](https://anaconda.org/conda-forge/vharfbuzz) | [](https://anaconda.org/conda-forge/vharfbuzz) |
| [cffsubr](https://anaconda.org/conda-forge/cffsubr) | [](https://anaconda.org/conda-forge/cffsubr) |
| [pygmailsorter](https://anaconda.org/conda-forge/pygmailsorter) | [](https://anaconda.org/conda-forge/pygmailsorter) |
| [funcx-common](https://anaconda.org/conda-forge/funcx-common) | [](https://anaconda.org/conda-forge/funcx-common) |
| [pykmip](https://anaconda.org/conda-forge/pykmip) | [](https://anaconda.org/conda-forge/pykmip) |
| [global-chem](https://anaconda.org/conda-forge/global-chem) | [](https://anaconda.org/conda-forge/global-chem) |
| [molml](https://anaconda.org/conda-forge/molml) | [](https://anaconda.org/conda-forge/molml) |
| [odachi](https://anaconda.org/conda-forge/odachi) | [](https://anaconda.org/conda-forge/odachi) |
| [chemsolve](https://anaconda.org/conda-forge/chemsolve) | [](https://anaconda.org/conda-forge/chemsolve) |
| [pyscreener](https://anaconda.org/conda-forge/pyscreener) | [](https://anaconda.org/conda-forge/pyscreener) |
| [paddy](https://anaconda.org/conda-forge/paddy) | [](https://anaconda.org/conda-forge/paddy) |
| [plams](https://anaconda.org/conda-forge/plams) | [](https://anaconda.org/conda-forge/plams) |
| [mp-api](https://anaconda.org/conda-forge/mp-api) | [](https://anaconda.org/conda-forge/mp-api) |
| [emmet-core](https://anaconda.org/conda-forge/emmet-core) | [](https://anaconda.org/conda-forge/emmet-core) |
| [emmet-api](https://anaconda.org/conda-forge/emmet-api) | [](https://anaconda.org/conda-forge/emmet-api) |
| [emmet-builders](https://anaconda.org/conda-forge/emmet-builders) | [](https://anaconda.org/conda-forge/emmet-builders) |
| [de-interpol](https://anaconda.org/conda-forge/de-interpol) | [](https://anaconda.org/conda-forge/de-interpol) |
| [de-risikogebiete](https://anaconda.org/conda-forge/de-risikogebiete) | [](https://anaconda.org/conda-forge/de-risikogebiete) |
| [de-bundesrat](https://anaconda.org/conda-forge/de-bundesrat) | [](https://anaconda.org/conda-forge/de-bundesrat) |
| [de-smard](https://anaconda.org/conda-forge/de-smard) | [](https://anaconda.org/conda-forge/de-smard) |
| [de-jobsuche](https://anaconda.org/conda-forge/de-jobsuche) | [](https://anaconda.org/conda-forge/de-jobsuche) |
| [de-strahlenschutz](https://anaconda.org/conda-forge/de-strahlenschutz) | [](https://anaconda.org/conda-forge/de-strahlenschutz) |
| [de-mudab](https://anaconda.org/conda-forge/de-mudab) | [](https://anaconda.org/conda-forge/de-mudab) |
| [pypresseportal](https://anaconda.org/conda-forge/pypresseportal) | [](https://anaconda.org/conda-forge/pypresseportal) |
| [de-bundestag](https://anaconda.org/conda-forge/de-bundestag) | [](https://anaconda.org/conda-forge/de-bundestag) |
| [de-travelwarning](https://anaconda.org/conda-forge/de-travelwarning) | [](https://anaconda.org/conda-forge/de-travelwarning) |
| [de-zoll](https://anaconda.org/conda-forge/de-zoll) | [](https://anaconda.org/conda-forge/de-zoll) |
| [de-ladestationen](https://anaconda.org/conda-forge/de-ladestationen) | [](https://anaconda.org/conda-forge/de-ladestationen) |
| [de-polizei-brandenburg](https://anaconda.org/conda-forge/de-polizei-brandenburg) | [](https://anaconda.org/conda-forge/de-polizei-brandenburg) |
| [latexify-py](https://anaconda.org/conda-forge/latexify-py) | [](https://anaconda.org/conda-forge/latexify-py) |
| [de-autobahn](https://anaconda.org/conda-forge/de-autobahn) | [](https://anaconda.org/conda-forge/de-autobahn) |
| [de-dwd](https://anaconda.org/conda-forge/de-dwd) | [](https://anaconda.org/conda-forge/de-dwd) |
| [de-nina](https://anaconda.org/conda-forge/de-nina) | [](https://anaconda.org/conda-forge/de-nina) |
| [deutschland](https://anaconda.org/conda-forge/deutschland) | [](https://anaconda.org/conda-forge/deutschland) |
| [de-studiensuche](https://anaconda.org/conda-forge/de-studiensuche) | [](https://anaconda.org/conda-forge/de-studiensuche) |
| [de-vag](https://anaconda.org/conda-forge/de-vag) | [](https://anaconda.org/conda-forge/de-vag) |
| [de-hochwasserzentralen](https://anaconda.org/conda-forge/de-hochwasserzentralen) | [](https://anaconda.org/conda-forge/de-hochwasserzentralen) |
| [de-marktstammdaten](https://anaconda.org/conda-forge/de-marktstammdaten) | [](https://anaconda.org/conda-forge/de-marktstammdaten) |
| [de-entgeltatlas](https://anaconda.org/conda-forge/de-entgeltatlas) | [](https://anaconda.org/conda-forge/de-entgeltatlas) |
| [de-dip-bundestag](https://anaconda.org/conda-forge/de-dip-bundestag) | [](https://anaconda.org/conda-forge/de-dip-bundestag) |
| [de-weiterbildungssuche](https://anaconda.org/conda-forge/de-weiterbildungssuche) | [](https://anaconda.org/conda-forge/de-weiterbildungssuche) |
| [de-coachingangebote](https://anaconda.org/conda-forge/de-coachingangebote) | [](https://anaconda.org/conda-forge/de-coachingangebote) |
| [de-ausbildungssuche](https://anaconda.org/conda-forge/de-ausbildungssuche) | [](https://anaconda.org/conda-forge/de-ausbildungssuche) |
| [de-abfallnavi](https://anaconda.org/conda-forge/de-abfallnavi) | [](https://anaconda.org/conda-forge/de-abfallnavi) |
| [de-ecovisio](https://anaconda.org/conda-forge/de-ecovisio) | [](https://anaconda.org/conda-forge/de-ecovisio) |
| [de-pflanzenschutzmittelzulassung](https://anaconda.org/conda-forge/de-pflanzenschutzmittelzulassung) | [](https://anaconda.org/conda-forge/de-pflanzenschutzmittelzulassung) |
| [de-bundestag-lobbyregister](https://anaconda.org/conda-forge/de-bundestag-lobbyregister) | [](https://anaconda.org/conda-forge/de-bundestag-lobbyregister) |
| [de-bundeshaushalt](https://anaconda.org/conda-forge/de-bundeshaushalt) | [](https://anaconda.org/conda-forge/de-bundeshaushalt) |
| [de-dashboarddeutschland](https://anaconda.org/conda-forge/de-dashboarddeutschland) | [](https://anaconda.org/conda-forge/de-dashboarddeutschland) |
| [de-tagesschau](https://anaconda.org/conda-forge/de-tagesschau) | [](https://anaconda.org/conda-forge/de-tagesschau) |
| [de-berufssprachkurssuche](https://anaconda.org/conda-forge/de-berufssprachkurssuche) | [](https://anaconda.org/conda-forge/de-berufssprachkurssuche) |
| [de-feiertage](https://anaconda.org/conda-forge/de-feiertage) | [](https://anaconda.org/conda-forge/de-feiertage) |
| [de-pegel-online](https://anaconda.org/conda-forge/de-pegel-online) | [](https://anaconda.org/conda-forge/de-pegel-online) |
| [ironflow](https://anaconda.org/conda-forge/ironflow) | [](https://anaconda.org/conda-forge/ironflow) |
| [e3nn-jax](https://anaconda.org/conda-forge/e3nn-jax) | [](https://anaconda.org/conda-forge/e3nn-jax) |
| [red-black-tree-mod](https://anaconda.org/conda-forge/red-black-tree-mod) | [](https://anaconda.org/conda-forge/red-black-tree-mod) |
| [architector](https://anaconda.org/conda-forge/architector) | [](https://anaconda.org/conda-forge/architector) |
| [hippynn](https://anaconda.org/conda-forge/hippynn) | [](https://anaconda.org/conda-forge/hippynn) |
| [dirsync](https://anaconda.org/conda-forge/dirsync) | [](https://anaconda.org/conda-forge/dirsync) |
| [ssh-ipykernel](https://anaconda.org/conda-forge/ssh-ipykernel) | [](https://anaconda.org/conda-forge/ssh-ipykernel) |
| [open-clip-torch](https://anaconda.org/conda-forge/open-clip-torch) | [](https://anaconda.org/conda-forge/open-clip-torch) |
| [safetensors](https://anaconda.org/conda-forge/safetensors) | [](https://anaconda.org/conda-forge/safetensors) |
| [facexlib](https://anaconda.org/conda-forge/facexlib) | [](https://anaconda.org/conda-forge/facexlib) |
| [imaginairy](https://anaconda.org/conda-forge/imaginairy) | [](https://anaconda.org/conda-forge/imaginairy) |
| [timerit](https://anaconda.org/conda-forge/timerit) | [](https://anaconda.org/conda-forge/timerit) |
| [sixelcrop](https://anaconda.org/conda-forge/sixelcrop) | [](https://anaconda.org/conda-forge/sixelcrop) |
| [mamonca](https://anaconda.org/conda-forge/mamonca) | [](https://anaconda.org/conda-forge/mamonca) |
| [pyiron_ontology](https://anaconda.org/conda-forge/pyiron_ontology) | [](https://anaconda.org/conda-forge/pyiron_ontology) |
| [openphase](https://anaconda.org/conda-forge/openphase) | [](https://anaconda.org/conda-forge/openphase) |
| [units](https://anaconda.org/conda-forge/units) | [](https://anaconda.org/conda-forge/units) |
| [pyre-extensions](https://anaconda.org/conda-forge/pyre-extensions) | [](https://anaconda.org/conda-forge/pyre-extensions) |
| [xformers](https://anaconda.org/conda-forge/xformers) | [](https://anaconda.org/conda-forge/xformers) |
| [oxdna-analysis-tools](https://anaconda.org/conda-forge/oxdna-analysis-tools) | [](https://anaconda.org/conda-forge/oxdna-analysis-tools) |
| [hydra-colorlog](https://anaconda.org/conda-forge/hydra-colorlog) | [](https://anaconda.org/conda-forge/hydra-colorlog) |
| [read-version](https://anaconda.org/conda-forge/read-version) | [](https://anaconda.org/conda-forge/read-version) |
| [structuretoolkit](https://anaconda.org/conda-forge/structuretoolkit) | [](https://anaconda.org/conda-forge/structuretoolkit) |
| [pysipfenn](https://anaconda.org/conda-forge/pysipfenn) | [](https://anaconda.org/conda-forge/pysipfenn) |
| [pysmartdl](https://anaconda.org/conda-forge/pysmartdl) | [](https://anaconda.org/conda-forge/pysmartdl) |
| [rfc3161ng](https://anaconda.org/conda-forge/rfc3161ng) | [](https://anaconda.org/conda-forge/rfc3161ng) |
| [pdb-tools](https://anaconda.org/conda-forge/pdb-tools) | [](https://anaconda.org/conda-forge/pdb-tools) |
| [moleculekit](https://anaconda.org/conda-forge/moleculekit) | [](https://anaconda.org/conda-forge/moleculekit) |
| [pymace](https://anaconda.org/conda-forge/pymace) | [](https://anaconda.org/conda-forge/pymace) |
| [watchgha](https://anaconda.org/conda-forge/watchgha) | [](https://anaconda.org/conda-forge/watchgha) |
| [flux-core](https://anaconda.org/conda-forge/flux-core) | [](https://anaconda.org/conda-forge/flux-core) |
| [flux-sched](https://anaconda.org/conda-forge/flux-sched) | [](https://anaconda.org/conda-forge/flux-sched) |
| [fvgp](https://anaconda.org/conda-forge/fvgp) | [](https://anaconda.org/conda-forge/fvgp) |
| [hgdl](https://anaconda.org/conda-forge/hgdl) | [](https://anaconda.org/conda-forge/hgdl) |
| [gpcam](https://anaconda.org/conda-forge/gpcam) | [](https://anaconda.org/conda-forge/gpcam) |
| [pycrystal](https://anaconda.org/conda-forge/pycrystal) | [](https://anaconda.org/conda-forge/pycrystal) |
| [pyxtal](https://anaconda.org/conda-forge/pyxtal) | [](https://anaconda.org/conda-forge/pyxtal) |
| [flux-restful-client](https://anaconda.org/conda-forge/flux-restful-client) | [](https://anaconda.org/conda-forge/flux-restful-client) |
| [maxvolpy](https://anaconda.org/conda-forge/maxvolpy) | [](https://anaconda.org/conda-forge/maxvolpy) |
| [ttopt](https://anaconda.org/conda-forge/ttopt) | [](https://anaconda.org/conda-forge/ttopt) |
| [insightface](https://anaconda.org/conda-forge/insightface) | [](https://anaconda.org/conda-forge/insightface) |
| [psij-rest](https://anaconda.org/conda-forge/psij-rest) | [](https://anaconda.org/conda-forge/psij-rest) |
| [psij-zmq](https://anaconda.org/conda-forge/psij-zmq) | [](https://anaconda.org/conda-forge/psij-zmq) |
| [websocket](https://anaconda.org/conda-forge/websocket) | [](https://anaconda.org/conda-forge/websocket) |
| [psij-python](https://anaconda.org/conda-forge/psij-python) | [](https://anaconda.org/conda-forge/psij-python) |
| [emdfile](https://anaconda.org/conda-forge/emdfile) | [](https://anaconda.org/conda-forge/emdfile) |
| [covalent](https://anaconda.org/conda-forge/covalent) | [](https://anaconda.org/conda-forge/covalent) |
| [gmailsorter](https://anaconda.org/conda-forge/gmailsorter) | [](https://anaconda.org/conda-forge/gmailsorter) |
| [descriptastorus](https://anaconda.org/conda-forge/descriptastorus) | [](https://anaconda.org/conda-forge/descriptastorus) |
| [pyqalloy](https://anaconda.org/conda-forge/pyqalloy) | [](https://anaconda.org/conda-forge/pyqalloy) |
| [abacusutils](https://anaconda.org/conda-forge/abacusutils) | [](https://anaconda.org/conda-forge/abacusutils) |
| [sphinx-pdj-theme](https://anaconda.org/conda-forge/sphinx-pdj-theme) | [](https://anaconda.org/conda-forge/sphinx-pdj-theme) |
| [lobsterpy](https://anaconda.org/conda-forge/lobsterpy) | [](https://anaconda.org/conda-forge/lobsterpy) |
| [vermouth](https://anaconda.org/conda-forge/vermouth) | [](https://anaconda.org/conda-forge/vermouth) |
| [materials-learning-algorithms](https://anaconda.org/conda-forge/materials-learning-algorithms) | [](https://anaconda.org/conda-forge/materials-learning-algorithms) |
| [coexist](https://anaconda.org/conda-forge/coexist) | [](https://anaconda.org/conda-forge/coexist) |
<file_sep>/.ci_support/template.md
# List of Packages
| Package Name | Downloads |
|:-------------|:---------:|
{% for package in package_lst -%}
| [{{ package }}](https://anaconda.org/conda-forge/{{ package }}) | [](https://anaconda.org/conda-forge/{{ package }}) |
{% endfor %}
<file_sep>/.ci_support/run.py
import sys
import getopt
import requests
import pandas
from datetime import date
from jinja2 import Template
query_template = """
{
organization(login: "conda-forge") {
{%- if after %}
teams(first: 100, after: "{{ after }}", userLogins: ["{{ githubuser }}"]) {
{%- else %}
teams(first: 100, userLogins: ["{{ githubuser }}"]) {
{%- endif %}
totalCount
edges {
node {
name
description
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
"""
def get_all_package_names(username, token):
t = Template(query_template)
after = None
next_page = True
packages_lst = []
while next_page:
query = t.render(githubuser=username, after=after)
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers={"Authorization": token})
if request.status_code == 200:
result_dict = request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
next_page = result_dict['data']['organization']['teams']['pageInfo']['hasNextPage']
after = result_dict['data']['organization']['teams']['pageInfo']['endCursor']
for n in result_dict['data']['organization']['teams']['edges']:
if n['node']['name'] not in ['all-members', 'Core']:
packages_lst.append(n['node']['name'])
return packages_lst
def read_template(file):
with open(file, 'r') as f:
return f.read()
def write_index(file, output):
with open(file, 'w') as f:
f.writelines(output)
def write_files(total_packages):
web = Template(read_template(file=".ci_support/template.html"))
web_output = web.render(package_lst=total_packages)
write_index(file="index.html", output=web_output)
md = Template(read_template(file=".ci_support/template.md"))
md_output = md.render(package_lst=total_packages)
write_index(file="packages.md", output=md_output)
def command_line(argv):
username = None
token = None
repo = None
try:
opts, args = getopt.getopt(
argv[1:], "u:t:g:h", ["username=", "token=", "githubrepo=", "help"]
)
except getopt.GetoptError:
print("run.py -u <username> -t <token> -g <githubrepo>")
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
print("run.py -u <username> -t <token> -g <githubrepo>")
sys.exit()
elif opt in ("-u", "--username"):
username = arg
elif opt in ("-t", "--token"):
token = arg
elif opt in ("-g", "--githubrepo"):
repo = arg
return username, token, repo
def get_download_count_line(content_lst):
for i, l in enumerate(content_lst):
if "total downloads" in l:
return int(l.split(">")[1].split("<")[0])
def get_github_stats_url(repo, filename):
username, reponame = repo.split("/")
return "http://" + username + ".github.io/" + reponame + "/" + filename
def get_package_download_count(package_name):
r = requests.get('https://anaconda.org/conda-forge/' + package_name)
return get_download_count_line(content_lst=r.content.decode().split("\n"))
def get_condaforge_contribution(package_lst):
download_count_lst = [get_package_download_count(package_name=p) for p in package_lst]
# Number of packages
package_lst.append("number")
download_count_lst.append(len(package_lst))
# Sum number of downloads
package_lst.append("sum")
download_count_lst.append(sum([v for v in download_count_lst if v is not None]))
# Prepend date
package_lst.insert(0, "Date")
download_count_lst.insert(0, date.today().strftime("%Y/%m/%d"))
return pandas.DataFrame({p:[d] for p, d in zip(package_lst, download_count_lst)})
def download_existing_data(data_download):
return pandas.read_csv(data_download, index_col=0)
def get_statistics(package_lst, repo, filename):
data_download = get_github_stats_url(repo=repo, filename=filename)
df_new = get_condaforge_contribution(package_lst=package_lst)
df_old = download_existing_data(data_download=data_download)
df_merge = df_old.append(df_new, sort=False)
df_merge.to_csv(filename)
if __name__ == "__main__":
username, token, repo = command_line(sys.argv)
package_lst = get_all_package_names(username=username, token="bearer "+token)
write_files(total_packages=package_lst)
get_statistics(
package_lst=package_lst,
repo=repo,
filename="stats.csv"
)
| 9401d3d542e1c0dfbad4cb5d1bba1c1aba665eef | [
"Markdown",
"Python"
] | 4 | Markdown | jan-janssen/conda-forge-contribution | e037e26168a08a3daec6e941dbab96a88d8d0bf0 | c7c1f376eb691dcadfde785f50b7d3e31158aeac |
refs/heads/main | <repo_name>voomoo/Django_BloodHero<file_sep>/README.md
# <img src = 'static/images/bloodhero.png' alt = 'BloodHero logo' width = '20'> BloodHero <img src = 'static/images/bloodhero.png' alt = 'BloodHero logo' width = '20'>
A simple database driven web application. Where
- user can create account
- log in to account
- look for required bloodgroup
- message someone
[Video Demo!](https://drive.google.com/file/d/12lc8GMxcCAc2Tpr5UL5SKlbeoAGEraCq/view?usp=sharing)
|Desktop View | Mobile View|
|-------------|------------|
| <img src = 'static/images/landing_page.png' height = '300'> | <img src = 'static/images/landing_page_phn.png' height = '300'>|
| <img src = 'static/images/registration_page.png' height = '300'> | <img src = 'static/images/registration_page_phn.png' height = '300'>|
| <img src = 'static/images/dashboard.png' height = '300'> | <img src = 'static/images/dashboard_phn.png' height = '300'> |
| <img src = 'static/images/messages.png' height = '300'> | <img src = 'static/images/messages_phn.png' height = '300'>|
| <img src = 'static/images/profile.png' height = '300'> | <img src = 'static/images/profile_png.png' height = '300'>|
| <img src = 'static/images/send_msg.png' height = '300'> | <img src = 'static/images/send_msg_phn.png' height = '300'>|
<file_sep>/donors/migrations/0001_initial.py
# Generated by Django 3.1.4 on 2021-01-04 14:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DonorSignup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.CharField(max_length=30, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_superuser', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Messages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_from', models.EmailField(max_length=60)),
('email_to', models.EmailField(max_length=60)),
('message', models.TextField(max_length=400)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('email', models.EmailField(max_length=60)),
('age', models.PositiveIntegerField()),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='Male', max_length=6)),
('phone', models.CharField(max_length=20)),
('blood_group', models.CharField(choices=[('a+', 'A RhD positive (A+)'), ('a-', 'A RhD negative (A-)'), ('b+', 'B RhD positive (B+)'), ('b-', 'B RhD negative (B-)'), ('o+', 'O RhD positive (O+)'), ('o-', 'O RhD negative (O-)'), ('ab+', 'AB RhD positive (AB+)'), ('ab-', 'AB RhD negative (AB-)')], max_length=21)),
('info', models.TextField(blank=True, max_length=400)),
('donor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
<file_sep>/mysite/urls.py
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from donors.views import signup_user, dashboard, logout_user, login_user, profile, mail_user, messages, home
urlpatterns = [
path('admin/', admin.site.urls),
#authentication
path('signup/', signup_user, name='signup_user'),
path('logout/', logout_user, name='logout'),
path('login/', login_user, name='login'),
#navigation
path('dashboard/', dashboard, name='dashboard'),
path('profile/', profile, name='profile'),
path('mail/<int:donor_pk>', mail_user, name='mail'),
path('messages/', messages, name='messages'),
path('', home, name='home')
]
<file_sep>/donors/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate
from .models import DonorSignup, Profile, Messages
class DonorSignupForm(UserCreationForm):
email = forms.EmailField(max_length=60, help_text='Required, Add a valid email')
class Meta:
model = DonorSignup
fields = ('email', 'username', '<PASSWORD>', '<PASSWORD>')
class DonorAuthenticationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = DonorSignup
fields = ('email', 'password')
def clean(self):
if self.is_valid():
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email = email, password = password):
raise forms.ValidationError("Invalid Login")
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['name', 'gender', 'age', 'blood_group', 'phone', 'info']
widgets = {
'info': forms.Textarea(attrs={'rows':2, 'cols':23})
}
class MessageForm(forms.ModelForm):
class Meta:
model = Messages
fields = ['email_to', 'message']
<file_sep>/donors/templates/home.html
{% extends 'base.html' %}
{% load static %}
{% block content %}
<div class="container-fluid d-flex flex-wrap justify-content-around align-items-center p-5">
<div class="row">
<div class="col-md d-flex align-items-center">
<img class="img img-fluid mb-4 ms-md-5" width="350px" src="{% static 'images/erythrocytes.svg' %}" alt="erythrocytes">
</div>
<div class="col">
<div class="text-danger text-center container-fluid">
<h1>Blood is meant to be circulated</h1>
<h1>Pass it around</h1>
<h1>Help others</h1>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Ducimus illum, ut molestiae error dignissimos cum fugiat. Tempora nostrum et assumenda saepe iusto voluptates soluta, cum voluptas, neque maiores nisi illum.</p>
<p>Lorem ipsum dolor sit amet consectetur adipisicing elit. Vitae beatae, alias maiores, excepturi architecto nostrum perspiciatis dignissimos, illo praesentium qui atque accusantium voluptates! Velit amet consequatur laborum quisquam molestiae tempora.</p>
<p>Lorem ipsum dolor sit amet consectetur adipisicing elit. Enim quia quas, eveniet odio ducimus consequatur quo voluptas deleniti harum nisi cumque amet tenetur facere quasi deserunt odit. Distinctio, nisi saepe.</p>
</div>
</div>
</div>
</div>
{% endblock content %}<file_sep>/donors/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import DonorSignup, Profile, Messages
# Register your models here.
class DonorAdmin(UserAdmin):
list_display = ('email', 'username', 'date_joined', 'last_login', 'is_admin', 'is_staff')
search_fields = ('email', 'username')
readonly_fields = ('date_joined', 'last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class ProfileAdmin(admin.ModelAdmin):
list_display = ('email', 'name', 'age', 'gender', 'phone', 'blood_group')
search_fields = ('email', 'name', 'age', 'phone', 'blood_group')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class MessageAdmin(admin.ModelAdmin):
list_display = ('email_from', 'message', 'email_to')
search_fields = ('email_from', 'message', 'email_to')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(DonorSignup, DonorAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Messages, MessageAdmin)<file_sep>/donors/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from .forms import DonorSignupForm, DonorAuthenticationForm, ProfileForm, MessageForm
from django.contrib.auth import authenticate, login, logout
from .models import Profile, Messages
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
if request.user.is_authenticated:
return redirect('dashboard')
else:
return render(request, 'home.html')
def signup_user(request):
context = {}
if request.POST:
form = DonorSignupForm(request.POST)
if form.is_valid():
form.save()
email = form.cleaned_data.get('email')
raw_password = form.cleaned_data.get('<PASSWORD>')
account = authenticate(email=email, password=raw_password)
login(request, account)
return redirect('dashboard')
else:
context['form'] = form
else:
form = DonorSignupForm()
context['form'] = form
return render(request, 'authentication/signup.html', context)
def login_user(request):
context = {}
user = request.user
if user.is_authenticated:
return redirect("dashboard")
if request.POST:
form = DonorAuthenticationForm(request.POST)
if form.is_valid():
email = request.POST['email']
password = request.POST['<PASSWORD>']
user = authenticate(email=email, password=password)
if user:
login(request, user)
if Profile.objects.filter(donor=request.user).exists():
return redirect("dashboard")
else:
return redirect('profile')
else:
form = DonorAuthenticationForm()
context['form'] = form
return render(request, 'authentication/login.html', context)
@login_required
def profile(request):
# return render(request, 'navigation/profile.html', {'form':ProfileForm})
if Profile.objects.filter(donor=request.user).exists():
data = get_object_or_404(Profile, donor=request.user)
if request.method == 'GET':
form = ProfileForm(instance=data)
return render(request, 'navigation/profile.html', {'form': form})
else:
try:
form = ProfileForm(request.POST, instance=data)
form.save()
return redirect('dashboard')
except ValueError:
return redirect('profile')
else:
if request.method == 'GET':
form = ProfileForm()
return render(request, 'navigation/profile.html', {'form': form})
else:
try:
form = ProfileForm(request.POST)
new_profile = form.save(commit=False)
new_profile.donor = request.user
new_profile.email = request.user.email
new_profile.save()
return redirect('dashboard')
except ValueError:
return redirect('profile')
@login_required
def logout_user(request):
logout(request)
return redirect('login')
@login_required
def dashboard(request):
print(request.GET.get('bl_group'))
if(request.GET.get('bl_group') == 'all'):
data = Profile.objects.all()
elif(request.GET.get('bl_group') is None):
data = Profile.objects.all()
else:
data = Profile.objects.filter(blood_group=request.GET.get('bl_group'))
if Profile.objects.filter(donor=request.user).exists():
return render(request, 'navigation/dashboard.html', {'data':data})
else:
return redirect('profile')
@login_required
def mail_user(request, donor_pk):
user_email = get_object_or_404(Profile, pk = donor_pk)
if request.POST:
form = MessageForm(request.POST)
new_msg = form.save(commit=False)
new_msg.email_from = request.user.email
new_msg.save()
#print(address,msg, request.user.email)
return redirect('dashboard')
else:
address = user_email.email
name = Profile.objects.filter(email=request.user.email)[0].name
phone = Profile.objects.filter(email=request.user.email)[0].phone
info = Profile.objects.filter(email=request.user.email)[0].info
msg = 'Hello {},<br><br>I am {}. I am in emergency need of {} blood group. So it would be wonderful if you can call me at {} or mail me at {}. <br><br>Thank you'.format(user_email.name,name, user_email.blood_group, phone, request.user.email)
form = MessageForm({'email_to': address, 'message': msg})
return render(request, 'navigation/mail.html', {'form':form, 'info': info})
@login_required
def messages(request):
data = Messages.objects.filter(email_to=request.user.email).order_by('-id')
return render(request, 'navigation/messages.html', {'data': data})
| 392444ad4b73de488673af7240f3feb1a850628b | [
"Markdown",
"Python",
"HTML"
] | 7 | Markdown | voomoo/Django_BloodHero | 5dce64ea737ad6cfe7dca380a4ad328ee961da6e | 32e9e8ef925836662e7b7124eb8f6f70cf648cae |
refs/heads/master | <file_sep>import React from 'react';
import {YellowBox} from 'react-native';
import {createStackNavigator} from 'react-navigation';
import {Provider} from 'react-redux';
import AspectView from './lib/AspectView';
import createStore from './lib/store';
import styles, {color} from './lib/styles';
YellowBox.ignoreWarnings(['Warning: isMounted(...) is deprecated', 'Module RCTImageLoader']);
const Navigator = createStackNavigator(
{
AspectView
},
{
cardStyle: styles.navigationCard,
navigationOptions: {
headerStyle: styles.navigationHeader,
headerTintColor: color.light,
headerTitleStyle: styles.navigationHeaderTitle,
},
},
);
export default class App extends React.Component {
constructor() {
super();
this.store = null;
this.state = {loading: true};
}
async componentDidMount() {
this.store = await createStore();
this.setState({loading: false});
}
render() {
if (this.state.loading) {
return null;
}
return (
<Provider store={this.store}>
<Navigator />
</Provider>
);
}
}
<file_sep>import Actions from './actions';
export const addAspect = (aspect) => ({type: Actions.ADD_ASPECT, aspect});
export const deleteAspect = (id) => ({type: Actions.DELETE_ASPECT, id});
export const updateAspect = (id, props) => ({type: Actions.UPDATE_ASPECT, id, props});
<file_sep>import InputAccessoryView from 'InputAccessoryView';
import _ from 'lodash';
import React from 'react';
import {
Keyboard,
KeyboardAvoidingView,
Platform,
Text,
TextInput,
TouchableHighlight,
TouchableOpacity,
View,
} from 'react-native';
import {Icon} from 'react-native-elements'
import {SwipeListView} from 'react-native-swipe-list-view';
import {connect} from 'react-redux';
import {addAspect, deleteAspect, updateAspect} from './store/aspect/actionCreators';
import {ROOT_ID} from './store/aspect/constants';
import {getAspects} from './store/aspect/selectors';
import styles, {color, spacing} from './styles';
const getParentId = (props) => (_.get(props.navigation.state, 'params.parentId', ROOT_ID));
class Aspect extends React.PureComponent {
render() {
return (
<TouchableHighlight
style={{backgroundColor: color.darkest, padding: spacing.medium}}
onPress={() => this.props.onPressAspect(this.props.id, this.props.name)}
underlayColor={color.darker}
>
<View style={{flexDirection: 'row', alignItems: 'center'}}>
<Icon color={color.light} name="triangle-right" size={28} type="entypo" />
<Text style={[styles.textPrimary, {flex: 1}]}>{this.props.name}</Text>
</View>
</TouchableHighlight>
);
}
}
class AspectHidden extends React.PureComponent {
renderIcon({color, containerStyle, name, onPress}) {
return (
<Icon
key={name}
containerStyle={containerStyle}
color={color}
name={name}
onPress={onPress}
size={64}
type="evilicon"
underlayColor="transparent"
/>
);
}
renderIcons() {
let icons = [
{
color: color.complementaryLighter,
name: 'close',
onPress: () => this.props.onPressDelete(this.props.id),
},
{
color: color.lighter,
name: 'pencil',
onPress: () => this.props.onPressEdit(this.props.id),
containerStyle: {
marginLeft: 'auto',
},
},
]
return icons.map(this.renderIcon);
}
render() {
return (
<View style={{alignItems: 'center', backgroundColor: color.darker, flex: 1, flexDirection: 'row'}}>
{this.renderIcons()}
</View>
);
}
}
class AspectView extends React.Component {
constructor(props) {
super(props);
this.input = null;
this.openRow = null;
this.parentId = getParentId(props);
this.state = this.getInitialState();
}
getInitialState() {
return {editAspectId: null, name: ''};
}
componentDidMount() {
this.props.navigation.setParams({
onPlusPressed: () => {
this.resetState();
if (this.input) {
this.input.focus();
}
},
});
// TODO: This doesn't seem to allow tapping outside of the input to close it...
// if (this.input && this.props.aspects.length === 0) {
// this.input.focus();
// }
}
resetState() {
this.setState(this.getInitialState());
}
_resetInput = () => {
this.resetState();
Keyboard.dismiss();
}
_navigateToAspect = (parentId, name) => {
this.props.navigation.push('AspectView', {parentId, name});
}
_deleteAspect = (id) => {
this.props.deleteAspect(id);
}
_editAspect = (id) => {
// Consider mapping.
let {name} = this.props.aspects.find((a) => a.id === id);
this.setState({editAspectId: id, name});
if (this.input) {
this.input.focus();
}
if (this.openRow) {
this.openRow.closeRow();
}
}
_renderAspect = (aspect) => {
return (
<Aspect
id={aspect.id}
name={aspect.name}
onPressAspect={this._navigateToAspect}
/>
);
}
_renderAspectHidden = (aspect) => {
return (
<AspectHidden
id={aspect.id}
onPressDelete={this._deleteAspect}
onPressEdit={this._editAspect}
/>
);
}
renderEditAccessory() {
if (!this.state.editAspectId) {
return null;
}
return (
<View style={{
alignItems: 'center',
justifyContent: 'space-between',
borderTopColor: color.dark,
borderTopWidth: 1,
flexDirection: 'row',
padding: spacing.medium,
}}>
<Text style={styles.textSecondary}>
Editing...
</Text>
<Icon
iconStyle={styles.textSecondary}
name='close'
onPress={this._resetInput}
size={48}
type="evilicon"
underlayColor="transparent"
/>
</View>
);
}
render() {
let openValue = 75;
return (
<React.Fragment>
<SwipeListView
data={this.props.aspects}
directionalDistanceChangeThreshold={1}
keyExtractor={({id}) => id}
closeOnRowBeginSwipe={true}
leftOpenValue={openValue}
onRowOpen={(rowKey, rowMap) => (this.openRow = rowMap[rowKey])}
renderItem={({item}) => this._renderAspect(item)}
renderHiddenItem={({item}) => this._renderAspectHidden(item)}
rightOpenValue={openValue * -1}
useFlatList
/>
<KeyboardAvoidingView
behavior={Platform.select({ios: 'padding'})}
keyboardVerticalOffset={Platform.select({android: 0, ios: 64})}
>
{this.renderEditAccessory()}
<View style={{
alignItems: 'center',
justifyContent: 'center',
borderTopColor: color.dark,
borderTopWidth: 1,
flexDirection: 'row',
padding: spacing.medium,
}}>
<TextInput
style={[
styles.textPrimary,
{flex: 1, maxHeight: 200, paddingBottom: spacing.small, paddingTop: spacing.small},
]}
ref={(input) => (this.input = input)}
autoCorrect={false}
multiline={true}
onChangeText={(name) => this.setState({name})}
placeholder="What's in your mind?"
placeholderTextColor={color.light}
selectionColor={color.lighter}
underlineColorAndroid="transparent"
value={this.state.name}
/>
<Icon
containerStyle={{alignSelf: 'flex-end', marginLeft: 'auto'}}
color={this.state.name ? color.lighter : color.light}
disabled={!this.state.name}
size={64}
onPress={() => {
if (this.state.editAspectId) {
this.props.updateAspect(this.state.editAspectId, {name: this.state.name});
} else {
this.props.addAspect({name: this.state.name, parentId: this.parentId});
}
this._resetInput();
}}
name="check"
type="evilicon"
underlayColor="transparent"
/>
</View>
</KeyboardAvoidingView>
</React.Fragment>
);
}
}
AspectView.navigationOptions = ({navigation}) => {
return {
headerRight: (
<Icon
containerStyle={{paddingRight: 9, paddingTop: 3}}
color={color.light}
name="plus"
onPress={() => _.invoke(navigation.state, 'params.onPlusPressed')}
size={36}
type="entypo"
underlayColor="transparent"
/>
),
title: _.get(navigation.state, 'params.name'),
};
};
const mapDispatchToProps = (dispatch) => {
return {
addAspect: (aspect) => dispatch(addAspect(aspect)),
deleteAspect: (id) => dispatch(deleteAspect(id)),
updateAspect: (id, props) => dispatch(updateAspect(id, props)),
};
};
const mapStateToProps = (state, props) => {
let parentId = getParentId(props);
return {
aspects: getAspects(state.aspectState, parentId),
};
};
export default connect(mapStateToProps, mapDispatchToProps)(AspectView);
<file_sep>import {AsyncStorage} from 'react-native';
import {combineReducers, createStore} from 'redux';
import aspectReducer from './aspect/reducer';
const STORAGE_KEY = 'aspect:state';
export default async function() {
let reducer = combineReducers({
aspectState: aspectReducer,
});
let json = await AsyncStorage.getItem(STORAGE_KEY);
let store = createStore(reducer, json ? JSON.parse(json) : undefined);
store.subscribe(async () => {
await AsyncStorage.setItem(STORAGE_KEY, JSON.stringify(store.getState()));
});
return store;
}
<file_sep>export default {
ADD_ASPECT: 'ADD_ASPECT',
DELETE_ASPECT: 'DELETE_ASPECT',
UPDATE_ASPECT: 'UPDATE_ASPECT',
};
| b915886acc2b13b3cbf7e43759c24c8973ff2530 | [
"JavaScript"
] | 5 | JavaScript | wilimitis/aspect | aa1fda6320d4dfbb2863801438d5a87371576f54 | db636d2d6bf63fb0bc6555412a035b1c25162658 |
refs/heads/master | <repo_name>nutch31/SendDailyEmail<file_sep>/database/migrations/2018_08_22_081137_create_send_daily_email.php
<?php
use Illuminate\Support\Facades\Schema;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Database\Migrations\Migration;
class CreateSendDailyEmail extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('log_send_daily_mail', function (Blueprint $table) {
$table->increments('id');
$table->string('file_url');
$table->string('mime_type');
$table->string('extension');
$table->text('content');
$table->string('status');
$table->string('email_from');
$table->string('email');
$table->string('email_cc');
$table->string('email_bcc');
$table->timestamps();
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('send_daily_email');
}
}
<file_sep>/app/Http/Controllers/HomeController.php
<?php
namespace App\Http\Controllers;
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Mail;
use App\Mail\SendMailable;
class HomeController extends Controller
{
//
public function mail(Request $request)
{
$data = [
'name' => '<NAME>',
];
Mail::send('emails.name', $data, function ($message) use ($data)
{
$message->from('<EMAIL>');
$message->to('<EMAIL>');
//<EMAIL> '/'.App::getLocale().'/contact'
$message->subject('Email from Website dittymusic.com | '.$data['name'].'');
});
return 'Email was sent';
}
}
<file_sep>/routes/api.php
<?php
use Illuminate\Http\Request;
/*
|--------------------------------------------------------------------------
| API Routes
|--------------------------------------------------------------------------
|
| Here is where you can register API routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| is assigned the "api" middleware group. Enjoy building your API!
|
*/
Route::middleware('auth:api')->get('/user', function (Request $request) {
return $request->user();
});
Route::get('index', 'SendDailyEmail@index');
Route::post('sendEmail', 'SendDailyEmail@sendEmail');
Route::post('sendEmail2', 'SendDailyEmail@sendEmail2');
Route::post('sendEmail3', 'SendDailyEmail@sendEmail3');
Route::get('view', 'SendDailyEmail@view');
<file_sep>/app/Console/Commands/SendDailyEmail.php
<?php
namespace App\Console\Commands;
use Illuminate\Console\Command;
class SendDailyEmail extends Command
{
/**
* The name and signature of the console command.
*
* @var string
*/
protected $signature = 'command:SendDailyEmail';
/**
* The console command description.
*
* @var string
*/
protected $description = 'Send Daily Email Every 10.00 AM';
/**
* Create a new command instance.
*
* @return void
*/
public function __construct()
{
parent::__construct();
}
/**
* Execute the console command.
*
* @return mixed
*/
public function handle()
{
//
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, "http://message.heroleads.co.th/SendDailyEmail/public/index.php/api/sendEmail");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
curl_setopt($ch, CURLOPT_POST, true);
$data = array(
'fileUrl' => 'https://docs.google.com/spreadsheets/d/1PediIgoD_LKppgwm4jF5srgQN_SqTHilKazkS_AYmaQ/edit?pli=1#gid=152349423',
'mimeType' => 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'email_from' => '<EMAIL>',
'email[0]' => '<EMAIL>',
'email[1]' => '<EMAIL>',
'email[2]' => '<EMAIL>',
'email[3]' => '<EMAIL>'
);
curl_setopt($ch, CURLOPT_POSTFIELDS, $data);
$output = curl_exec($ch);
$info = curl_getinfo($ch);
curl_close($ch);
}
}
<file_sep>/public/crontab.php
<?php
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, "http://message.heroleads.co.th/SendDailyEmail/public/index.php/api/sendEmail");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
curl_setopt($ch, CURLOPT_POST, true);
$data = array(
'fileUrl' => 'https://docs.google.com/spreadsheets/d/1PediIgoD_LKppgwm4jF5srgQN_SqTHilKazkS_AYmaQ/edit?pli=1#gid=152349423',
'mimeType' => 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'email' => '<EMAIL>'
);
curl_setopt($ch, CURLOPT_POSTFIELDS, $data);
$output = curl_exec($ch);
$info = curl_getinfo($ch);
curl_close($ch);
echo '===> '.$output;
/*
// Get cURL resource
$curl = curl_init();
// Set some options - we are passing in a useragent too here
curl_setopt_array($curl, array(
CURLOPT_RETURNTRANSFER => 1,
CURLOPT_URL => 'http://message.heroleads.co.th/SendDailyEmail/public/index.php/api/index',
CURLOPT_USERAGENT => 'Codular Sample cURL Request'
));
// Send the request & save response to $resp
$resp = curl_exec($curl);
// Close request to clear up some resources
curl_close($curl);
echo '====> '.$resp;
*/
/*
// Get cURL resource
$curl = curl_init();
// Set some options - we are passing in a useragent too here
curl_setopt_array($curl, array(
CURLOPT_RETURNTRANSFER => 1,
CURLOPT_URL => 'http://message.heroleads.co.th/SendDailyEmail/public/index.php/api/sendEmail',
CURLOPT_USERAGENT => 'Codular Sample cURL Request',
CURLOPT_POST => 1,
CURLOPT_POSTFIELDS => array(
'fileUrl' => 'https://docs.google.com/spreadsheets/d/1PediIgoD_LKppgwm4jF5srgQN_SqTHilKazkS_AYmaQ/edit?pli=1#gid=152349423',
'mimeType' => 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'email' => '<EMAIL>'
)
));
// Send the request & save response to $resp
$resp = curl_exec($curl);
// Close request to clear up some resources
curl_close($curl);
//echo '===>'.$resp;
*/
?> | b12830ec2a13f4808e03ac92f15dbc83ca3b39e0 | [
"PHP"
] | 5 | PHP | nutch31/SendDailyEmail | 696ee985ca9c336467a97139e77df20d384c6aa9 | 33372dd2bc76332c0d955d759500fd3e40845f71 |
refs/heads/master | <repo_name>lunaQi/fenci_02<file_sep>/fenci_02.py
# !/usr/bin/env python
# -*- coding : utf-8 -*-
import jieba
import jieba.posseg as pseg
'''
def delete_space(stopwordspath):
stw_list = [line.strip()
for line in open(stopwordspath,'r')]
return stw_list
'''
f1 = open("150518it23974.txt")
f2 = open("fenci_150518it23974.txt", 'w')
lines = f1.readlines()
'''
for line in lines:
print line.decode('utf-8')
'''
text = ''
for line in lines:
line.replace('\t', '').replace('\n', '').replace(' ', '')
if len(line) > 0: # delete a null line
#seg_list = jieba.cut(line, cut_all=False)
words = pseg.cut(line)
#for word in seg_list:
for word, flag in words:
if (flag == 'Ng') or (flag == 'n') or (flag == 'nr') or (flag == 'ns') or (flag == 'nt') or (flag == 'nz') or (flag == 'vn'):
text = text + ',' + str(word.encode('utf8'))
#print(text)
f2.write(text)
f1.close()
f2.close()
word_lst = []
word_dict = {}
with open("fenci_150518it23974.txt") as wf, open("wfr_150518it23974.txt", 'w') as wf2:
for word in wf:
word_lst.append(word.split(','))
for item in word_lst:
for item2 in item:
if item2 not in word_dict:
word_dict[item2] = 1
else:
word_dict[item2] += 1
for key in word_dict:
print key,str(word_dict[key])
wf2.write(key + ' ' + str(word_dict[key]) + '\n')
| b123a2c8b7280cd1357ccdc265e80031ad6d5252 | [
"Python"
] | 1 | Python | lunaQi/fenci_02 | e449a796c23b2d9b88bc5b46887d19099527d680 | 27ff52010debfe26ab06769a312304e4c99f3ef5 |
refs/heads/master | <file_sep>#include<iostream>
#include<pthread.h>
#include<semaphore.h>
#include<unistd.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
using namespace std;
int a[10];
int rPtr = -1;
int wPtr = -1;
sem_t seqSem;
sem_t prodSem;
sem_t consSem;
pthread_mutex_t mMutex;
pthread_t prodThread, consThread;
void produce()
{
sem_post(&prodSem);
}
void consume()
{
sem_post(&consSem);
}
int getNextPtrVal(int currVal)
{
if ( currVal == -1 )
{
return 0;
}
else
{
if ( currVal < 9 )
return (currVal+1);
else
return 0;
}
}
void display()
{
pthread_mutex_lock(&mMutex);
for ( int i = 0 ; i < 10; i++ )
cout << "a["<< i << "]=" << a[i] << endl;
cout << "rPtr is " << rPtr << "wPtr is " << wPtr << endl;
pthread_mutex_unlock(&mMutex);
}
void * producer(void *arg )
{
while (1)
{
cout << "Waiting for prod call" << endl;
sem_wait(&prodSem);
cout << "Got a prod call" << endl;
do
{
pthread_mutex_lock(&mMutex);
cout << "Got the sync mutex" << endl;
int newWPtr;
bool goodToWrite = false;
if ( rPtr == -1 )
{
if ( wPtr < 9 )
{
newWPtr = wPtr + 1;
goodToWrite = true;
}
}
else
{
newWPtr = getNextPtrVal(wPtr);
if ( newWPtr != rPtr )
goodToWrite = true;
}
if ( goodToWrite )
{
wPtr = newWPtr;
a[wPtr] = 10*wPtr;
cout << "Wrote at [" << wPtr <<"] value [" << a[wPtr] <<"]" << endl;
pthread_mutex_unlock(&mMutex);
sem_post(&seqSem);
break;
}
else
{
int semVal;
cout << " Can't write as buff full with rPtr is " << rPtr << ", wPtr is:" << wPtr << endl;
sem_getvalue(&seqSem, &semVal);
cout << " Value of seq sem before is" << semVal << endl;
pthread_mutex_unlock(&mMutex);
sem_wait(&seqSem);
cout << " Value of seq sem after is" << semVal << endl;
}
}while(1);
}
}
void * consumer(void * arg )
{
sem_wait(&seqSem); //Reader can't start till the 1st write is done..
while (1)
{
cout << "Waiting for consume call" << endl;
sem_wait(&consSem);
cout << "Got a consume call" << endl;
do
{
pthread_mutex_lock(&mMutex);
cout << "Got the sync mutex" << endl;
int newRPtr;
bool goodToRead = false;
if ( wPtr == 9 )
{
if ( rPtr < 9 )
{
newRPtr = getNextPtrVal(rPtr);
goodToRead = true;
}
}
else if ( wPtr != -1 )
{
newRPtr = getNextPtrVal(rPtr);
if (newRPtr <= wPtr )
goodToRead = true;
}
if ( goodToRead )
{
rPtr = newRPtr;
cout << "Yipee - read done on rPtr[ " << rPtr << "] value is [" << a[rPtr] << "]" << endl;
pthread_mutex_unlock(&mMutex);
sem_post(&seqSem);
break;
}
else
{
int semVal;
cout << " Can't read as buff is empty with wPtr is " << wPtr << ", rPtr is:" << rPtr << endl;
sem_getvalue(&seqSem, &semVal);
cout << " Value of seq sem before is" << semVal << endl;
pthread_mutex_unlock(&mMutex);
sem_wait(&seqSem);
cout << " Value of seq sem after is" << semVal << endl;
}
}while(1);
}
}
int main()
{
sem_init(&seqSem, 0, 0);
sem_init(&prodSem, 0, 0);
sem_init(&consSem, 0, 0);
memset(a, 0, 10*sizeof(int));
int retVal = pthread_create(&prodThread, NULL, producer, NULL);
retVal = pthread_create(&consThread, NULL, consumer, NULL);
int option;
while(1)
{
cout << "EHllo1" << endl;
fflush(stdin);
cout << "Enter your option 0/1/2:";
scanf("%d", &option);
cout << "Hello2, option"<< option << endl;
switch(option)
{
case 0:
produce();
break;
case 1:
consume();
break;
case 2:
display();
break;
}
sleep(1);
}
pthread_join(prodThread,NULL);
pthread_join(consThread,NULL);
return 0;
}
<file_sep>all:
g++ *.cpp -lpthread
clean:
rm -rf *.o
| f1cb0d237e2c9edbcb7ff5124b4914b0c694f354 | [
"Makefile",
"C++"
] | 2 | C++ | ramakrishnan-gh/C-Apps | 176419a84bde829675d125757ea69c86585b6537 | f72156caac56aca9754159a42fc36bf0c1ec5094 |
refs/heads/main | <file_sep># -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re, sys
from bs4 import BeautifulSoup
def uprint(*objects, sep=' ', end='\n', file=sys.stdout):
enc = file.encoding
if enc == 'UTF-8':
print(*objects, sep=sep, end=end, file=file)
else:
f = lambda obj: str(obj).encode(enc, errors='backslashreplace').decode(enc)
print(*map(f, objects), sep=sep, end=end, file=file)
def productInfoFound(infoText,productInfo):
print(infoText + productInfo)
# SAVE DATA to A FILE
f = open("NutrispaceOnPromo_"+docTitle+".txt", "a")
f.write(infoText + productInfo + "\n")
f.close()
def productInfoNotFound(infoTextNF):
print(infoTextNF)
# SAVE DATA to A FILE
f = open("NutrispaceOnPromo_"+docTitle+".txt", "a")
f.write(infoTextNF + "\n")
f.close()
def nutrispace_Login(base_url):
driver.get(base_url + "/Nutrispace/Login.aspx")
time.sleep(3)
driver.find_element_by_id("p_lt_zoneLogin_NSLogonForm_UserName").clear()
# Replace User Name with Real Data
driver.find_element_by_id("p_lt_zoneLogin_NSLogonForm_UserName").send_keys("0000000")
time.sleep(1)
driver.find_element_by_id("p_lt_zoneLogin_NSLogonForm_Password").clear()
# Replace User Password with Real Data
driver.find_element_by_id("p_lt_zoneLogin_NSLogonForm_Password").send_keys("<PASSWORD>")
time.sleep(1)
driver.find_element_by_id("p_lt_zoneLogin_NSLogonForm_LoginButton").click()
def get_dynamicOnPromotionProducts(docTitle):
promoProducts = []
promoProducts = driver.find_elements_by_xpath('.//div[@class="boxGradient"]')
#print(promoProducts)
#print(len(promoProducts))
if len(promoProducts) > 0:
for elem in promoProducts:
# Promoted Product Titles and price
try:
#print (elem)
title = elem.find_element_by_xpath(".//div[@class='highlightNorm']/div/h4/span").text
infoText = "Promoted Product: "
productInfo = title
productInfoFound(infoText, productInfo)
except: # catch *all* exceptions
infoTextNF = "Couldn't retrieve this Promo Product Name."
productInfoNotFound(infoTextNF)
try:
promo_price = elem.find_element_by_xpath(".//label[@class='promoPriceColor']").text
infoText = "Promo Price: "
productInfo = promo_price
productInfoFound(infoText, productInfo)
except: # catch *all* exceptions
print("Couldn't retrieve Promo Product Price.")
try:
reg_price = elem.find_element_by_xpath(".//div[@class='linkBox']/span")
if reg_price.text:
price = reg_price.text.split()
infoText = "Regular Price: "
productInfo = price[0]
productInfoFound(infoText, productInfo)
except: # catch *all* exceptions
print("Couldn't retrieve Regular Product Price.")
try:
promo_description = elem.find_element_by_xpath(".//div[@class='productTxt']/div").text
infoText = "Product Description: "
productInfo = promo_description
productInfoFound(infoText, productInfo)
except: # catch *all* exceptions
infoTextNF = "Couldn't retrieve this Promo Product Description."
productInfoNotFound(infoTextNF)
try:
promo_code = elem.find_element_by_xpath(".//div[@class='linkBox']/strong").text
print(promo_code)
print("\n")
f = open("NutrispaceOnPromo_"+docTitle+".txt", "a")
f.write(promo_code + "\n"+"\n")
f.close()
except: # catch *all* exceptions
print("Couldn't retrieve this Promo Code.")
print("\n")
# SAVE DATA to A FILE
f = open("NutrispaceOnPromo_"+docTitle+".txt", "a")
f.write("Couldn't retrieve this Promo Code" + "\n"+ "\n")
f.close()
else:
print("No Products found on the Page")
def is_element_present(how, what):
try: driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present():
try: driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text():
try:
alert = driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown():
driver.quit()
assertEqual([], self.verificationErrors)
docTitle = input("Enter the Month and Year, for the Output file: ")
driver = webdriver.Firefox()
driver.implicitly_wait(30)
base_url = "https://www.nutrimetics.co.nz/"
start_url = "https://www.nutrimetics.co.nz/Nutrispace/OD/Ordering/OnPromotion.aspx"
verificationErrors = []
accept_next_alert = True
# Start Login process
nutrispace_Login(base_url)
time.sleep(5)
# Navigate to the Product Promotions Start page
#gen_soup = get_OnPromotionWebpage(start_url)
driver.get(start_url)
time.sleep(8)
# Setup the continuous Scraping until there are no more Promo Product pages left to scrape
continue_scraping = True
while continue_scraping:
# Navigate to the next Product Promotions page
try:
if driver.find_element_by_css_selector("#p_lt_zoneSlavePages_pageplaceholder1_p_lt_zoneContent_pageplaceholder_p_lt_zonebody_NSDataList_pager_LinkButtonLast").get_attribute('disabled'):
start_url= None
except:
print("Couldn't find Last Page Link.")
print("\n")
else:
try:
driver.find_element_by_id("p_lt_zoneSlavePages_pageplaceholder1_p_lt_zoneContent_pageplaceholder_p_lt_zonebody_NSDataList_pager_LinkButtonNext").click()
time.sleep(8)
except:
print("Couldn't find Next Page Link.")
print("\n")
# Dynamic Content needs to be loaded before you can then find elements with Selenium
get_dynamicOnPromotionProducts(docTitle)
if (start_url is None):
#input("Press Enter to exit, and open the Product Promotion text file :")
continue_scraping = False
print("No More Promotion Pages to View")
print("\n")
print("Nutrispace webscraping has Finished. Open the Product Promotion text file.")
<file_sep># Nutrimetics-Promo-ProductScraper
Extract Nutrimetics promo products automatically - to create a simple text file price list for customers
## Instructions - April 2017
Windows Instructions - Python, Selenium, Firefox, Beautifulsoup
- Install Python, if not installed already.
- Install Anaconda (version used was Anaconda3-4.1.1-Windows-x86_64.exe)
o Run cmd
o Cd into the Anaconda directory
- Need to install selenium module
o $ pip install –U selenium
o If Selenium 3 is installed :
- download geckodriver from https://github.com/mozilla/geckodriver/releases for your OS. Unzip the downloaded file and keep it in one of your project folder (ie. D:\ drive or Python path). Now set the path to geckodriver as a system path property manually in Windows.
Search for PATH
Environment variables – system
Add geckodriver to System PATH variable (directory goes at end of list)
Reboot Windows
- $ pip install beautifulsoup4
- Install and setup Firefox (see Setup Section below) to work with Selenium
- Put the Python script file Python_Nutrispace_Login_OnPromotion_UserInput.py into a directory on the computer
- Create a Batchfile, put it on the desktop, so the script User can run the script directly from the Windows Desktop
## Setup Firefox
### How to create Firefox profile for your Selenium?
1. Make sure all your firefox instance are closed
2. Click Start>Run
3. Type “firefox.exe -ProfileManager -no-remote”
4. Select “Create Profile” (i.e. selenium)
5. Click “Next”
6. Enter new profile name
7. Select a directory folder to store your new profile
8. Click “Finish”
9. Select “Don’t ask at startup”
10. Click “Start Firefox” and configure settings based on suggestion below***
11. Set Profile back to “default” (enable you to use your previous settings on your browser)
12. Add -firefoxProfileTemplate command line option as you start the Selenium Server
java -jar selenium-server.jar -firefoxProfileTemplate “<Selenium Profile Directory>”
### Suggested settings for your Selenium Profile
1.From “Menu –View - Toolbars” tab, uncheck “Bookmarks Toolbar”.
2.Right click from toolbar and click “Customize”
3.Remove “Google search” by dragging it to the “Customize Toolbar” window
4.Exit Customize.
5.Click “Options” then set the following:
a. “Main” Tab
– set Home Page to “about:blank”
b. “Tabs” option
– Select “a new window” for new pages
c. “Content” tab
– uncheck “Block pop-up” windows option
d. “Privacy” tab
– uncheck all “History” options – if restart then Win+R – “firefox.exe -ProfileManager -no-remote” in admin mode - use Firefox profile selenium and add settings to it.
e. “Security” tab
– uncheck all “Security” options
– click “Settings” and uncheck all warning options
f. “Advanced” tab
– Uncheck “autoscrolling” option from “General” tab
– uncheck “warn me …” and “Search Engines”option from “Update” tab
6. From the address bar type “about:config” and add the following by right-click anywhere on the page and selecting “new”
– extensions.update.notifyUser (type=boolean; value=false)
– extensions.newAddons (type=boolean; value=false)
- security.insecure_field_warning.contextual.enabled.
and
- security.insecure_password.ui.enabled
Double-click each to change their values to false.
7. From “Tools\Add-ons” install the following:
– Firebug: allows you to edit, debug, and monitor CSS, HTML, and JavaScript on your application under test
– Selenium IDE: allows you to record, edit, and debug Selenium tests
– ScreenGrab: saves entire webpages as images.
| 585e930411c69b5afcde070ae78d05835d0be222 | [
"Markdown",
"Python"
] | 2 | Python | bitrat/Nutrimetics-Promo-ProductScraper | 65730700d269e0f26f46d49e75a6e7d9a72da5fe | 171c03e8edb92f19721c3a0795b9a12cac1cc22d |
refs/heads/master | <repo_name>zemarcelo/Godocraft<file_sep>/3D/Chunk.cs
using System;
using System.Collections;
using Godot;
public class Chunk
{
public Block[,,] chunkData;
public SpatialMaterial material;
public Spatial chunk;
public string Name;
Random rnd = new Random();
/*Testes
public string teste = "oi teste";
public int testeint = 42;
public int[] testearray = new int[] { 1, 2, 3 }; */
void BuildChunk()
{
chunkData = new Block[Mundo.chunkSize, Mundo.chunkSize, Mundo.chunkSize];
for (int z = 0; z < Mundo.chunkSize; z++)
for (int y = 0; y < Mundo.chunkSize; y++)
for (int x = 0; x < Mundo.chunkSize; x++)
{
Vector3 pos = new Vector3(x, y, z);
int worldX = (int)(x + chunk.Translation.x);
int worldY = (int)(y + chunk.Translation.y);
int worldZ = (int)(z + chunk.Translation.z);
GD.Print(Utils.Draw3DStones(worldX, worldY, worldZ));
if (Utils.Draw3DStones(worldX, worldY, worldZ) < 0.45f)
{
chunkData[x, y, z] = new Block(Block.TipoDeBloco.AIR, pos, chunk, material, this, ref chunkData);
}
else if (worldY <= Utils.GenerateStoneHeight(worldX, worldZ))
chunkData[x, y, z] = new Block(Block.TipoDeBloco.STONE, pos, chunk, material, this, ref chunkData);
else if (worldY < Utils.GenerateHeight(worldX, worldZ))
chunkData[x, y, z] = new Block(Block.TipoDeBloco.DIRT, pos, chunk, material, this, ref chunkData);
else if (worldY == Utils.GenerateHeight(worldX, worldZ))
chunkData[x, y, z] = new Block(Block.TipoDeBloco.GRASS, pos, chunk, material, this, ref chunkData);
else
chunkData[x, y, z] = new Block(Block.TipoDeBloco.AIR, pos, chunk, material, this, ref chunkData);
}
}
public void DrawChunk()
{
for (int z = 0; z < Mundo.chunkSize; z++)
for(int y = 0; y < Mundo.chunkSize; y++)
for(int x = 0; x < Mundo.chunkSize; x++)
{
chunkData[x, y, z].Draw();
}
CombinaQuads();
}
public Chunk(Vector3 position, SpatialMaterial m)
{
chunk = new Spatial
{
Name = Mundo.MontaNomeDoChunk(position)
};
Name = chunk.Name;
chunk.Translation = position;
material = m;
BuildChunk();
}
void CombinaQuads()
{
MeshInstance cube = new MeshInstance
{
Name = "Chunk"
};
ArrayMesh cubeArray = new ArrayMesh();
Godot.Collections.Array kids = chunk.GetChildren();
Material[] materiais = new Material[kids.Count];
int contador = 0;
foreach (MeshInstance meshI in kids)
{
Vector3[] vertLocal = (Vector3[])meshI.Mesh.SurfaceGetArrays(0)[(int)ArrayMesh.ArrayType.Vertex];
Vector3[] vertGlobal = new Vector3[vertLocal.Length];
//convertendo as coordenads de locais para globais dos quads
for (int i = 0; i < vertLocal.Length; i++)
{
vertGlobal[i].x = vertLocal[i].x + meshI.Translation.x;
vertGlobal[i].y = vertLocal[i].y + meshI.Translation.y;
vertGlobal[i].z = vertLocal[i].z + meshI.Translation.z;
}
ArrayMesh combineArray = new ArrayMesh();
var arrays = new Godot.Collections.Array();
arrays.Resize((int)ArrayMesh.ArrayType.Max);
arrays[(int)ArrayMesh.ArrayType.Vertex] = vertGlobal;
arrays[(int)ArrayMesh.ArrayType.Normal] = meshI.Mesh.SurfaceGetArrays(0)[(int)ArrayMesh.ArrayType.Normal];
arrays[(int)ArrayMesh.ArrayType.TexUv] = meshI.Mesh.SurfaceGetArrays(0)[(int)ArrayMesh.ArrayType.TexUv];
arrays[(int)ArrayMesh.ArrayType.Index] = meshI.Mesh.SurfaceGetArrays(0)[(int)ArrayMesh.ArrayType.Index];
cubeArray.AddSurfaceFromArrays(Mesh.PrimitiveType.Triangles, arrays);
materiais[contador] = meshI.GetSurfaceMaterial(0);
meshI.QueueFree();
contador++;
}
cube.Mesh = cubeArray;
for (int i = 0; i < materiais.Length; i++) cube.SetSurfaceMaterial(i, materiais[i]);
chunk.AddChild(cube);
}
}<file_sep>/3D/Utils.cs
using System;
using Godot;
public class Utils
{
static int seed = 100;
static int maxHeight = 150;
static float period = 128f;
static int octaves = 4;
static float persistence = 0.5f;
public static int GenerateHeight(float x, float z)
{
OpenSimplexNoise noise = new OpenSimplexNoise();
noise.Seed = seed;
noise.Octaves = octaves;
noise.Persistence = persistence;
noise.Period = period;
float height = Map(0, maxHeight, -1, 1, noise.GetNoise2d( x, z ));
return (int)height;
}
public static float Draw3DStones(float x, float y, float z)
{
OpenSimplexNoise noise = new OpenSimplexNoise();
noise.Seed = seed;
noise.Octaves = 1;
noise.Persistence = persistence;
noise.Period = period * 10f;
float probability = Map(0, 1, -1, 1, noise.GetNoise3d(x, y, z));
return probability;
}
public static int GenerateStoneHeight(float x, float z)
{
OpenSimplexNoise noise = new OpenSimplexNoise();
noise.Seed = seed;
noise.Octaves = octaves + 2;
noise.Persistence = persistence;
noise.Period = period / 3;
float height = Map(0, maxHeight - 5, -1, 1, noise.GetNoise2d(x, z));
return (int)height;
}
static float Map(float newMin, float newMax, float origMin, float origMax, float value)
{
return Mathf.Lerp(newMin, newMax, Mathf.InverseLerp(origMin, origMax, value));
}
/* static float fBM(float x, float z, int oct, float pers)
{
OpenSimplexNoise noise = new OpenSimplexNoise();
float total = 0;
float frequency = 1;
float amplitude = 1;
float maxValue = 0;
for(int i = 0; i < oct; i++)
{
total += noise.GetNoise2d(x * frequency, z * frequency) * amplitude;
maxValue += amplitude;
amplitude *= pers;
frequency *= 2;
}
return total / maxValue;
} */
}
<file_sep>/3D/Block.cs
using System;
using System.Collections.Generic;
using Godot;
public class Block
{
enum LadoDoCubo { BOTTOM, TOP, LEFT, RIGHT, FRONT, BACK };
public enum TipoDeBloco { GRASS, DIRT, STONE, REDSTONE, TESTE, AIR };
TipoDeBloco tBloco;
Chunk Owner;
Spatial parent;
Vector3 position;
SpatialMaterial material;
Block[,,] blocks;
public bool isSolid;
readonly Vector2[,] blocosUVs =
{
/* GRASS TOP */ {new Vector2( 0.1250f, 0.6250f ), new Vector2( 0.1875f, 0.6250f ),
new Vector2( 0.1250f, 0.5625f ), new Vector2( 0.1875f, 0.5625f ) },
/* GRASS SIDE */ {new Vector2( 0.1875f, 0.0625f ), new Vector2( 0.2500f, 0.0625f ),
new Vector2( 0.1875f, 0.0000f ), new Vector2( 0.2500f, 0.0000f ) },
/* DIRT */ {new Vector2( 0.1250f, 0.0000f ), new Vector2( 0.1875f, 0.0000f ),
new Vector2( 0.1250f, 0.0625f ), new Vector2( 0.1875f, 0.0625f ) },
/* STONE */ {new Vector2( 0.0000f, 0.1250f ), new Vector2( 0.0625f, 0.1250f ),
new Vector2( 0.0000f, 0.0625f ), new Vector2( 0.0625f, 0.0625f ) },
/* REDSTONE */ {new Vector2( 0.0000f, 0.1250f ), new Vector2( 0.0625f, 0.1250f ),
new Vector2( 0.0000f, 0.0625f ), new Vector2( 0.0625f, 0.0625f ) },
/* TESTE */ {new Vector2( 0.1250f, 0.0000f ), new Vector2( 0.1875f, 0.0000f ),
new Vector2( 0.1250f, 0.0625f ), new Vector2( 0.1875f, 0.0625f )
}
};
public Block(TipoDeBloco tipo, Vector3 pos, Spatial par, SpatialMaterial mat, Chunk c, ref Block[,,] b )
{
tBloco = tipo;
parent = par;
position = pos;
blocks = b;
if (tBloco == TipoDeBloco.AIR)
isSolid = false;
else
isSolid = true;
//Prepara o Material do Cubo
Owner = c;
material = mat;
}
void CriaQuad(LadoDoCubo lado)
{
ArrayMesh quadArray;
MeshInstance quad = new MeshInstance
{
Name = "Quad"
};
//Criando os Arrays
Vector3[] normalArray = new Vector3[4];
Vector2[] uvArray = new Vector2[4];
Vector3[] vertexArray = new Vector3[4];
int[] indexArray = new int[6];
//calculando UVs no atlas
Vector2 uv00;
Vector2 uv10;
Vector2 uv01;
Vector2 uv11;
if (tBloco == TipoDeBloco.GRASS && lado == LadoDoCubo.TOP)
{
uv00 = blocosUVs[0, 0];
uv10 = blocosUVs[0, 1];
uv01 = blocosUVs[0, 2];
uv11 = blocosUVs[0, 3];
}
else if (tBloco == TipoDeBloco.GRASS && lado == LadoDoCubo.BOTTOM)
{
uv00 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 0];
uv10 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 1];
uv01 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 2];
uv11 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 3];
}
else
{
uv00 = blocosUVs[(int)(tBloco + 1), 0];
uv10 = blocosUVs[(int)(tBloco + 1), 1];
uv01 = blocosUVs[(int)(tBloco + 1), 2];
uv11 = blocosUVs[(int)(tBloco + 1), 3];
}
//Todos os Vertices Possiveis
Vector3 p0 = new Vector3(-0.5f, -0.5f, 0.5f);
Vector3 p1 = new Vector3(0.5f, -0.5f, 0.5f);
Vector3 p2 = new Vector3(0.5f, -0.5f, -0.5f);
Vector3 p3 = new Vector3(-0.5f, -0.5f, -0.5f);
Vector3 p4 = new Vector3(-0.5f, 0.5f, 0.5f);
Vector3 p5 = new Vector3(0.5f, 0.5f, 0.5f);
Vector3 p6 = new Vector3(0.5f, 0.5f, -0.5f);
Vector3 p7 = new Vector3(-0.5f, 0.5f, -0.5f);
switch (lado)
{
case LadoDoCubo.BOTTOM:
vertexArray = new Vector3[] { p0, p1, p2, p3 };
normalArray = new Vector3[] { Vector3.Down, Vector3.Down, Vector3.Down, Vector3.Down };
break;
case LadoDoCubo.TOP:
vertexArray = new Vector3[] { p7, p6, p5, p4 };
normalArray = new Vector3[] { Vector3.Up, Vector3.Up, Vector3.Up, Vector3.Up };
break;
case LadoDoCubo.LEFT:
vertexArray = new Vector3[] { p7, p4, p0, p3 };
normalArray = new Vector3[] { Vector3.Left, Vector3.Left, Vector3.Left, Vector3.Left };
break;
case LadoDoCubo.RIGHT:
vertexArray = new Vector3[] { p5, p6, p2, p1 };
normalArray = new Vector3[] { Vector3.Right, Vector3.Right, Vector3.Right, Vector3.Right };
break;
case LadoDoCubo.FRONT:
vertexArray = new Vector3[] { p4, p5, p1, p0 };
normalArray = new Vector3[] { Vector3.Forward, Vector3.Forward, Vector3.Forward, Vector3.Forward };
break;
case LadoDoCubo.BACK:
vertexArray = new Vector3[] { p6, p7, p3, p2 };
normalArray = new Vector3[] { Vector3.Back, Vector3.Back, Vector3.Back, Vector3.Back };
break;
}
uvArray = new Vector2[] { uv11, uv01, uv00, uv10 };
indexArray = new int[] { 0, 1, 2, 0, 2, 3 };
quadArray = new ArrayMesh();
var arrays = new Godot.Collections.Array();
arrays.Resize((int)ArrayMesh.ArrayType.Max);
arrays[(int)ArrayMesh.ArrayType.Vertex] = vertexArray;
arrays[(int)ArrayMesh.ArrayType.Normal] = normalArray;
arrays[(int)ArrayMesh.ArrayType.TexUv] = uvArray;
arrays[(int)ArrayMesh.ArrayType.Index] = indexArray;
quadArray.AddSurfaceFromArrays(Mesh.PrimitiveType.Triangles, arrays);
quad.Mesh = quadArray;
quad.SetSurfaceMaterial(0, material);
quad.Translation = position;
parent.AddChild(quad);
}
int ConvertBlockIndexToLocal(int i)
{
if ((i == -1))
i = Mundo.chunkSize - 1;
else if (i == Mundo.chunkSize)
i = 0;
return i;
}
public bool HasSolidNeighbour(int x, int y, int z)
{
//var teste = (int)parent.GetNode("Mundo").;
//GD.Print(teste);
//Block[,,] chunks = (Block[,,]) parent.GetNode("chunkData");
//chunks = owner.chunkData;
Block[,,] chunks;
if (x < 0 || x >= Mundo.chunkSize ||
y < 0 || y >= Mundo.chunkSize ||
z < 0 || z >= Mundo.chunkSize)
{
Vector3 neighbourChunkpos = parent.Translation + new Vector3((x - (int)position.x) * Mundo.chunkSize,
(y - (int)position.y) * Mundo.chunkSize,
(z - (int)position.z) * Mundo.chunkSize);
string nName = Mundo.MontaNomeDoChunk(neighbourChunkpos);
int x1 = x; int y1 = y; int z1 = z;
x = ConvertBlockIndexToLocal(x);
y = ConvertBlockIndexToLocal(y);
z = ConvertBlockIndexToLocal(z);
Chunk nChunk;
if (Mundo.chunks.TryGetValue(nName, out nChunk))
chunks = nChunk.chunkData;
else
return false;
}
else
{
chunks = Owner.chunkData;
}
try
{
return chunks[x, y, z].isSolid;
}
catch (System.IndexOutOfRangeException ) { }
return false;
}
public void Draw()
{
if (tBloco == TipoDeBloco.AIR) return;
if (!HasSolidNeighbour((int)position.x, (int)position.y, (int)position.z + 1))
CriaQuad(LadoDoCubo.FRONT);
if (!HasSolidNeighbour((int)position.x, (int)position.y, (int)position.z - 1))
CriaQuad(LadoDoCubo.BACK);
if (!HasSolidNeighbour((int)position.x, (int)position.y + 1, (int)position.z))
CriaQuad(LadoDoCubo.TOP);
if (!HasSolidNeighbour((int)position.x, (int)position.y - 1, (int)position.z))
CriaQuad(LadoDoCubo.BOTTOM);
if (!HasSolidNeighbour((int)position.x - 1, (int)position.y, (int)position.z))
CriaQuad(LadoDoCubo.LEFT);
if (!HasSolidNeighbour((int)position.x + 1, (int)position.y, (int)position.z))
CriaQuad(LadoDoCubo.RIGHT);
}
}
<file_sep>/Scripts/MoveCamera.cs
using Godot;
using System;
public class MoveCamera : Camera
{
[Export] readonly float moveSpeed = 10f;
//View variables
[Export] readonly float mouseSensitivity = .3f;
[Export] readonly float mouseSpeed = 10f;
//[Export] readonly float anguloMaximo = 90.0f;
//[Export] readonly float anguloMinimo = -90.0f;
bool altIsPressed = false;
float cameraVerticalAngle = 0;
float cameraHorizontalAngle = 0;
// Called when the node enters the scene tree for the first time.
public override void _Ready()
{
SetProcessInput(true);
}
// Called every frame. 'delta' is the elapsed time since the previous frame.
public override void _Process(float delta)
{
if (Input.IsActionPressed("ui_right"))
{
TranslateObjectLocal(Transform.basis.x * moveSpeed * delta);
}
if (Input.IsActionPressed("ui_left"))
{
TranslateObjectLocal(-Transform.basis.x * moveSpeed * delta);
}
if (Input.IsActionPressed("ui_up"))
{
TranslateObjectLocal(-Transform.basis.z * moveSpeed * delta);
}
if (Input.IsActionPressed("ui_down"))
{
TranslateObjectLocal(Transform.basis.z * moveSpeed * delta);
}
if(altIsPressed)
{
RotateY(Mathf.Deg2Rad(cameraHorizontalAngle) * mouseSpeed * delta);
RotateX(Mathf.Deg2Rad(cameraVerticalAngle) * mouseSpeed * delta);
}
Transform.Orthonormalized();
}
public override void _Input(InputEvent @event)
{
UpdateCameraInput(@event);
}
void UpdateCameraInput(InputEvent @event)
{
if (@event is InputEventKey eventKey)
if (eventKey.Pressed && eventKey.Scancode == (int)KeyList.Alt)
altIsPressed = true;
else
altIsPressed = false;
if (@event is InputEventMouseMotion mouseMotion)
{
cameraHorizontalAngle = -mouseMotion.Relative.x * mouseSensitivity;
cameraVerticalAngle = -mouseMotion.Relative.y * mouseSensitivity;
}
}
}
<file_sep>/3D/Mundo.cs
/* Mundo: Arquivo que instancia o mundo de cubos.
* Ele determina a quantidade chunks, grupos de cubos, e a altura da
* coluna de cubos.
*/
using Godot;
using System;
using System.Collections.Generic;
public class Mundo : Spatial
{
// Declaração das variáveis principais
[Export] int tamanhoDoChunk = 4;
[Export] int altura = 2;
[Export] int tamamanhoDoMundo = 2;
public static int chunkSize; //quantidade de cubos por chunk
public static int worldSize;
public static int alturaDaColuna; //altura da coluna de chunks
public static Dictionary<String, Chunk> chunks; //Esse Dictionary é um mapa mundo.
//O string corresponde à posição do chunk no mundo.
//Declarando o Material para os cubos
public SpatialMaterial materialDoMundo = new SpatialMaterial();
// método que é executado toda vez que um node entre na scene
public override void _Ready()
{
chunkSize = tamanhoDoChunk;
alturaDaColuna = altura;
worldSize = tamamanhoDoMundo;
//Prepara o Material do Cubo carregando a textura atlas.
materialDoMundo.AlbedoTexture = (Texture)GD.Load("res://imagens/blockatlas.png");
chunks = new Dictionary<string, Chunk>(); // Cria o dictionary que é o mapa de chunks que compõem o mundo
this.SetIdentity(); //Antes de contruir o mundo coloca o node principal no centro do grid
BuildWorld(); //Chama o script que inicia a contrução da coluna
}
//Constrói a coluna de chunks
void BuildChunkColumn()
{
for(int i = 0; i < alturaDaColuna; i++)
{
//A posição do chunk é determinada pela posição do node principal.
//A única alteração é no y que será incrementado pelo indice do for
//até a altura máxima.
Vector3 chunkPosition = new Vector3(this.Translation.x, i * chunkSize, this.Translation.z);
Chunk c = new Chunk(chunkPosition, materialDoMundo); //Declara o chunk na posição calculada
this.AddChild(c.chunk); //adiciona o chunk como filho do nó principal
chunks.Add(c.Name, c); //adiciona o chunk ao mapa
}
//passa por todos os itens do mapa e os desenha
foreach (KeyValuePair<string, Chunk> c in chunks)
{
c.Value.DrawChunk(); //desenha o chunk
}
}
//Constrói o MUNDO
void BuildWorld()
{
for (int z = 0; z < worldSize; z++)
for (int x = 0; x < worldSize; x++)
for (int y = 0; y < alturaDaColuna; y++)
{
//A posição do chunk é determinada pela posição do node principal.
//A única alteração é no y que será incrementado pelo indice do for
//até a altura máxima.
Vector3 chunkPosition = new Vector3( x * chunkSize, y * chunkSize, z * chunkSize );
Chunk c = new Chunk(chunkPosition, materialDoMundo); //Declara o chunk na posição calculada
this.AddChild(c.chunk); //adiciona o chunk como filho do nó principal
chunks.Add(c.Name, c); //adiciona o chunk ao mapa
}
//passa por todos os itens do mapa e os desenha
foreach (KeyValuePair<string, Chunk> c in chunks)
{
c.Value.DrawChunk(); //desenha o chunk
}
}
//Script que gera o nome do chunk com a sua posição para identificá-lo
//no mapa
public static String MontaNomeDoChunk(Vector3 pos)
{
return (int)pos.x + "_" + (int)pos.y + "_" + (int)pos.z;
}
}
<file_sep>/3D/Cubo.cs
using Godot;
using System;
public class Cubo : Spatial
{
Texture texturaDoQuad;
enum LadoDoCubo { BOTTOM, TOP, LEFT, RIGHT, FRONT, BACK };
public enum TipoDeBloco { GRASS, DIRT, STONE, TESTE };
public SpatialMaterial quadMaterial = new SpatialMaterial();
[Export] public TipoDeBloco tBloco; // = TipoDeBloco.GRASS;
readonly Vector2[,] blocosUVs =
{
/* GRASS TOP */ {new Vector2( 0.1250f, 0.6250f ), new Vector2( 0.1875f, 0.6250f ),
new Vector2( 0.1250f, 0.5625f ), new Vector2( 0.1875f, 0.5625f ) },
/* GRASS SIDE */ {new Vector2( 0.1875f, 0.0625f ), new Vector2( 0.2500f, 0.0625f ),
new Vector2( 0.1875f, 0.0000f ), new Vector2( 0.2500f, 0.0000f ) },
/* DIRT */ {new Vector2( 0.1250f, 0.0000f ), new Vector2( 0.1875f, 0.0000f ),
new Vector2( 0.1250f, 0.0625f ), new Vector2( 0.1875f, 0.0625f ) },
/* STONE */ {new Vector2( 0.0000f, 0.1250f ), new Vector2( 0.0625f, 0.1250f ),
new Vector2( 0.0000f, 0.0625f ), new Vector2( 0.0625f, 0.0625f ) },
/* TESTE */ {new Vector2( 0.1250f, 0.0000f ), new Vector2( 0.1875f, 0.0000f ),
new Vector2( 0.1250f, 0.0625f ), new Vector2( 0.1875f, 0.0625f )
}
};
public override void _Ready()
{
CriaCubo();
}
void CriaQuad(LadoDoCubo lado)
{
ArrayMesh quadArray;
MeshInstance quad = new MeshInstance
{
Name = "Quad"
};
//Criando os Arrays
Vector3[] normalArray = new Vector3[4];
Vector2[] uvArray = new Vector2[4];
Vector3[] vertexArray = new Vector3[4];
int[] indexArray = new int[6];
//Prepara o Material do Cubo
texturaDoQuad = (Texture)GD.Load("res://imagens/blockatlas.png");
quadMaterial.AlbedoTexture = texturaDoQuad;
//calculando UVs no atlas
Vector2 uv00;
Vector2 uv10;
Vector2 uv01;
Vector2 uv11;
if(tBloco == TipoDeBloco.GRASS && lado == LadoDoCubo.TOP)
{
uv00 = blocosUVs[0, 0];
uv10 = blocosUVs[0, 1];
uv01 = blocosUVs[0, 2];
uv11 = blocosUVs[0, 3];
}
else if (tBloco == TipoDeBloco.GRASS && lado == LadoDoCubo.BOTTOM)
{
uv00 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 0];
uv10 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 1];
uv01 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 2];
uv11 = blocosUVs[(int)(TipoDeBloco.DIRT + 1), 3];
}
else
{
uv00 = blocosUVs[(int)(tBloco + 1), 0];
uv10 = blocosUVs[(int)(tBloco + 1), 1];
uv01 = blocosUVs[(int)(tBloco + 1), 2];
uv11 = blocosUVs[(int)(tBloco + 1), 3];
}
//Todos os Vertices Possiveis
Vector3 p0 = new Vector3(-0.5f, -0.5f, 0.5f);
Vector3 p1 = new Vector3( 0.5f, -0.5f, 0.5f);
Vector3 p2 = new Vector3( 0.5f, -0.5f, -0.5f);
Vector3 p3 = new Vector3(-0.5f, -0.5f, -0.5f);
Vector3 p4 = new Vector3(-0.5f, 0.5f, 0.5f);
Vector3 p5 = new Vector3( 0.5f, 0.5f, 0.5f);
Vector3 p6 = new Vector3( 0.5f, 0.5f, -0.5f);
Vector3 p7 = new Vector3(-0.5f, 0.5f, -0.5f);
switch (lado)
{
case LadoDoCubo.BOTTOM:
vertexArray = new Vector3[] { p0, p1, p2, p3 };
normalArray = new Vector3[] { Vector3.Down, Vector3.Down, Vector3.Down, Vector3.Down };
break;
case LadoDoCubo.TOP:
vertexArray = new Vector3[] { p7, p6, p5, p4 };
normalArray = new Vector3[] { Vector3.Up, Vector3.Up, Vector3.Up, Vector3.Up };
break;
case LadoDoCubo.LEFT:
vertexArray = new Vector3[] { p7, p4, p0, p3 };
normalArray = new Vector3[] { Vector3.Left, Vector3.Left, Vector3.Left, Vector3.Left };
break;
case LadoDoCubo.RIGHT:
vertexArray = new Vector3[] { p5, p6, p2, p1 };
normalArray = new Vector3[] { Vector3.Right, Vector3.Right, Vector3.Right, Vector3.Right };
break;
case LadoDoCubo.FRONT:
vertexArray = new Vector3[] { p4, p5, p1, p0 };
normalArray = new Vector3[] { Vector3.Forward, Vector3.Forward, Vector3.Forward, Vector3.Forward };
break;
case LadoDoCubo.BACK:
vertexArray = new Vector3[] { p6, p7, p3, p2 };
normalArray = new Vector3[] { Vector3.Back, Vector3.Back, Vector3.Back, Vector3.Back };
break;
}
uvArray = new Vector2[] { uv11, uv01, uv00, uv10};
indexArray = new int[] { 0, 1, 2, 0, 2, 3 };
quadArray = new ArrayMesh();
var arrays = new Godot.Collections.Array();
arrays.Resize((int)ArrayMesh.ArrayType.Max);
arrays[(int)ArrayMesh.ArrayType.Vertex] = vertexArray;
arrays[(int)ArrayMesh.ArrayType.Normal] = normalArray;
arrays[(int)ArrayMesh.ArrayType.TexUv] = uvArray;
arrays[(int)ArrayMesh.ArrayType.Index] = indexArray;
quadArray.AddSurfaceFromArrays(Mesh.PrimitiveType.Triangles, arrays);
quad.Mesh = quadArray;
quad.SetSurfaceMaterial(0, quadMaterial);
AddChild(quad);
}
void CombinaQuads(String nome)
{
MeshInstance cube = new MeshInstance
{
Name = nome
};
ArrayMesh cubeArray = new ArrayMesh();
Godot.Collections.Array kids = GetChildren();
Material[] materiais = new Material[kids.Count];
int contador = 0;
foreach (MeshInstance meshI in kids)
{
cubeArray.AddSurfaceFromArrays(Mesh.PrimitiveType.Triangles, meshI.Mesh.SurfaceGetArrays(0));
materiais[contador] = meshI.GetSurfaceMaterial(0);
meshI.QueueFree();
contador++;
}
cube.Mesh = cubeArray;
for (int i = 0; i < materiais.Length; i++) cube.SetSurfaceMaterial(i, materiais[i]);
AddChild(cube);
}
public void CriaCubo()
{
CriaQuad(LadoDoCubo.FRONT);
CriaQuad(LadoDoCubo.BACK);
CriaQuad(LadoDoCubo.TOP);
CriaQuad(LadoDoCubo.BOTTOM);
CriaQuad(LadoDoCubo.LEFT);
CriaQuad(LadoDoCubo.RIGHT);
CombinaQuads("Cubo");
}
}
| 2be0b95c97d3aae11ce2979169630a390a05a801 | [
"C#"
] | 6 | C# | zemarcelo/Godocraft | 0301cb3ca2852a8a3b9edd5d099c0eaac78b516b | e459c313869fec0e95a5f91b64642eabd65bbf19 |
refs/heads/master | <repo_name>CodeMasterChef/zalo-master-chef<file_sep>/fullTechInfoAsking.js
module.exports = function(ZOAClient, userId , data, responseAI){
var module = {};
var common = require('./common.js')(ZOAClient);
var helper = require('./helper.js')();
module.execute = function () {
var productName = (responseAI.name) ? responseAI.name[0].value : helper.getCache(userId).productName;
var keyword = productName.toLowerCase();
var product = null;
for(let i = 0 ; i < data.length ; i++) {
if(data[i].name) {
var productName = data[i].name.toLowerCase();
if(productName.indexOf(keyword) !== -1) {
product = data[i];
break;
}
}
}
if(product) {
var stringData = JSON.stringify(product.fullTechInfo);
var stringResponse = stringData.replace(/[{}]/g, '\n');
stringResponse = stringResponse.replace(/\",/g, '\n');
stringResponse = stringResponse.replace(/\"/g, '');
stringResponse = "Thông số kĩ thuật của " + productName + " " + stringResponse;
common.sendTextMessage(userId , stringResponse);
}
else {
common.sendTextMessage(userId, "Rất tiếc. Không có thông tin kĩ thuật của sản phẩm.");
}
}
return module;
}<file_sep>/policyAsking.js
module.exports = function (ZOAClient, userId, data, responseAI) {
var module = {};
var common = require('./common.js')(ZOAClient);
var helper = require('./helper.js')();
module.execute = function () {
var productNameList = responseAI.name;
if(productNameList == undefined) {
var productName = helper.getCache(userId).productName;
var product = null;
for (let i = 0; i < data.length; i++) {
if (data[i].name) {
if (data[i].name.toLowerCase() === productName.toLowerCase()) {
product = data[i];
}
}
}
if (product && product.fullSaleInfo && product.fullSaleInfo.policy) {
var stringResponse = helper.convertObjectTParagraph(product.fullSaleInfo.policy);
stringResponse = "Thông tin bảo hành của " + product.name + "\n" + stringResponse;
common.sendTextMessage(userId, stringResponse);
} else {
common.sendTextMessage(userId, "Rất tiếc. Không có thông tin bảo hành của sản phẩm " + productName);
}
return;
}
// multiple select
productNameList.forEach(element => {
var queryProductName = element.value;
var keyword = element.value.toLowerCase();
var product = null;
for (let i = 0; i < data.length; i++) {
if (data[i].name) {
var productName = data[i].name.toLowerCase();
if (productName === keyword) {
product = data[i];
}
}
}
if (product && product.fullSaleInfo && product.fullSaleInfo.policy) {
var stringResponse = helper.convertObjectTParagraph(product.fullSaleInfo.policy);
stringResponse = "Thông tin bảo hành của " + product.name + "\n" + stringResponse;
common.sendTextMessage(userId, stringResponse);
}
else {
common.sendTextMessage(userId, "Rất tiếc. Không có thông tin bảo hành của sản phẩm " + queryProductName);
}
});
}
return module;
}<file_sep>/advisoryAsking.js
module.exports = function (ZOAClient, userId, data, responseAI) {
var module = {};
var common = require('./common.js')(ZOAClient);
var helper = require('./helper.js')();
module.execute = function () {
var productName = responseAI.name[0].value;
var message = `Bạn muốn tư vấn thông gì về ${productName} : \n `;
message+=`Thông tin kĩ thuật \n `;
message+=`Chính sách bảo hành \n `;
message+=`Giá sản phẩm \n `;
common.sendTextMessage(userId, message);
helper.setCache(userId, 'productName' , productName );
}
return module;
}<file_sep>/saleAsking.js
module.exports = function (ZOAClient, userId, data, responseAI) {
var module = {};
var common = require('./common.js')(ZOAClient);
var message = '';
module.excute = function () {
if (!responseAI.hasOwnProperty('name')) {
common.sendTextMessage(userId, 'Rất tiếc! Chúng tôi hiện không mặt hàng này.');
return;
}
var keyword = responseAI.name[0].value;
var isExisted = data.find(f => f.name ? f.name.toLowerCase().indexOf(keyword.toLowerCase()) !== -1 : false);
// console.log(isExisted);
if (isExisted) {
var actions = [{
action: 'oa.query.hide',
title: isExisted.category + ' - ' + isExisted.name,
description: isExisted.price,
thumb: 'https://zalo-hackathon.herokuapp.com/public/images/' + isExisted.imgUrl,
data: 'Tư vấn ' + keyword,
popup: {
title: 'Lựa chọn',
desc: 'Thích thì sao ngại click để rinh ngay về nhà.',
ok: 'Tư Vấn Thêm',
cancel: 'Gọi Đặt Hàng'
}
}]
common.sendInteractionMessage(userId, actions);
} else {
message = 'Rất tiếc! Chúng tôi hiện không mặt hàng ' + keyword + '.';
common.sendTextMessage(userId, message);
}
}
return module;
}<file_sep>/priceAskingEnhance.js
module.exports = function (ZOAClient, userId, data, responseAI) {
var module = {};
var common = require('./common.js')(ZOAClient);
var helper = require('./helper.js')();
module.execute = function (updatedMemory) {
var currentData = global.cache.find(p => p.userId == userId);
var queryProductName = (currentData.productName + ' ' + updatedMemory);
var product = null;
for (let i = 0; i < data.length; i++) {
if (data[i].name) {
var productName = data[i].name.toLowerCase();
if (productName.toLowerCase() == queryProductName.toLowerCase()) {
product = data[i];
}
}
}
if (product && product.price) {
var stringResponse = helper.convertObjectTParagraph(product.price);
stringResponse = "Giá của sản phẩm " + product.name + " là: " + stringResponse;
common.sendTextMessage(userId, stringResponse);
}
else {
common.sendTextMessage(userId, "Rất tiếc. Không có thông tin về giá của sản phẩm " + queryProductName);
}
}
return module;
}<file_sep>/priceAsking.js
module.exports = function (ZOAClient, userId, data, responseAI) {
var module = {};
var common = require('./common.js')(ZOAClient);
var helper = require('./helper.js')();
module.execute = function () {
var productNameList = responseAI.name;
if(productNameList == undefined) {
var productName = helper.getCache(userId).productName;
var product = null;
for (let i = 0; i < data.length; i++) {
if (data[i].name) {
if (data[i].name.toLowerCase() === productName.toLowerCase()) {
product = data[i];
}
}
}
if (product && product.price) {
var stringResponse = helper.convertObjectTParagraph(product.price);
stringResponse = "Giá của sản phẩm " + product.name + " là: " + stringResponse;
common.sendTextMessage(userId, stringResponse);
} else {
common.sendTextMessage(userId, "Rất tiếc. Không có thông tin về giá của sản phẩm " + productName);
}
return;
}
// multiple searching
productNameList.forEach(element => {
var queryProductName = element.value;
var keyword = element.value.toLowerCase();
var product = null;
for (let i = 0; i < data.length; i++) {
if (data[i].name) {
var productName = data[i].name.toLowerCase();
if (productName === keyword) {
product = data[i];
}
}
}
if (product && product.price) {
var stringResponse = helper.convertObjectTParagraph(product.price);
stringResponse = "Giá của sản phẩm " + product.name + " là: " + stringResponse;
common.sendTextMessage(userId, stringResponse);
}
else {
if(element.entities && element.entities.trademark
&& element.entities.trademark[0].value
&& element.entities.trademark[0].value.toLowerCase() =='iphone') {
let isExist = false;
for(var i = 0 ; i < global.cache.length ; i++) {
if(global.cache[i].userId == userId) {
global.cache[i].productName = queryProductName;
isExist = true;
break;
}
}
if(!isExist) {
global.cache.push( { userId : userId , productName :queryProductName });
}
common.sendTextMessage(userId, "Bạn có thể cho tôi biết thêm về dung lượng bộ nhớ là 32GB hay 64GB hay 128GB của " + queryProductName + " không?");
}
else {
common.sendTextMessage(userId, "Rất tiếc. Không có thông tin về giá của sản phẩm " + queryProductName);
}
}
});
}
return module;
} | 7f919517163d173a8cc14f7c1d1f9202051c1e8f | [
"JavaScript"
] | 6 | JavaScript | CodeMasterChef/zalo-master-chef | e9f92e9fdae9e06c622049d7473f7bca0e9f9703 | b33655e846186e40449978aa22eb6f1069b6450c |
refs/heads/master | <file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
#include<algorithm>
using namespace std;
const int MAXN = 16+2;
const int INF = 0x3f3f3f3f;//INF 开得特别大会爆
int V, e;
int dp[1<<MAXN][MAXN], d[MAXN][MAXN];
void floyed() {
for(int k = 1; k <= V; k++) {
for(int i = 1; i <= V; i++) {
for(int j = 1; j <= V; j++) {
d[i][j] = min(d[i][j], d[i][k] + d[k][j]);
}
}
}
}
void slove() {
floyed();
for(int i = 0; i < (1<<V); i++) {
fill(dp[i], dp[i]+V+1, INF);
}
dp[(1<<V)-1][1] = 0;
for(int s = (1<<V)-2; s >= 0; s--) {
for(int v = 1; v <= V; v++) {
for(int u = 1; u <= V; u++) {
if(!(s>>(u-1) & 1)) {
dp[s][v] = min(dp[s][v], dp[s | 1<<(u-1)][u] + d[v][u]);
}
}
}
}
}
int main() {
int T;
cin >> T;
while(T--) {
cin >> V >> e;
//初始化
for(int i = 1; i <= V; i++) {
for(int j = 1; j <= V; j++) {
if(i == j) d[i][j] = 0;
else d[i][j] = INF;
}
}
for(int i = 0; i < e ; i++) {
int x, y, z;
cin >> x >> y >> z;
if(z < d[x][y]) d[x][y] = d[y][x] = z;
}
slove();
cout << dp[0][1] << endl;
}
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
int dp[10][10];//
void init() {
dp[0][0] = 1;
for(int i = 1; i < 10; i++) {
for(int j = 0; j < 10; j++) {
for(int k = 0; k < 10; k++) {
if(j != 4 && !(j == 6 && k == 2))
dp[i][j] += dp[i-1][k];
}
}
}
}
int solve(int x) {
int sum = 0;
int str[10], len = 0;
//memset(str, 0, sizeof(str));
while(x) {
str[len++] = x%10;
x/=10;
}
str[len] = 0;//下面取到了str[i+1]
for(int i = len-1; i >= 0; i--) {
for(int j = 0; j < str[i]; j++) {
if(j != 4 && !(j == 2 && str[i+1] == 6)) sum += dp[i+1][j];
}
if(str[i] == 4 || (str[i] == 2 && str[i+1] == 6)) break;
}
return sum;//个位少算一个,总数少算一个。
}
int main() {
init();
int a, b;
while(cin >> a >> b, a || b) {
cout << solve(b+1) - solve(a) << endl;
}
return 0;
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
#include<algorithm>
using namespace std;
const int MAXN = 2005;
int M, N, W;
int w[MAXN], a[MAXN], b[MAXN];
int dp[MAXN];
void Dp() {
memset(dp, 0, sizeof(dp));
for(int i = 0; i < N ; i++) {
//01背包
for(int j = M; j >= w[i]; j--)
dp[j] = max(dp[j], dp[j-w[i]]+a[i]+b[i]);
//完全背包
for(int j = w[i]; j <= M; j++) {
dp[j] = max(dp[j], dp[j-w[i]]+a[i]);
}
}
}
int main() {
ios::sync_with_stdio(false);
int T;
cin >> T;
while(T--) {
cin >> M >> N;
for(int i = 0; i < N; i++) {
cin >> w[i] >> a[i] >> b[i];
}
Dp();
cout << dp[M] << endl;
}
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
#include<algorithm>
#include<cmath>
#include<iomanip>
using namespace std;
const int MAXN = 60;
int T, N, V;
struct Bucket{
double a, b;
}s[MAXN];
bool cmp(Bucket x, Bucket y) {
return x.a*y.b > y.a*x.b;
}
double res() {
for(int i = 0; i < N; i++) {
cin >> s[i].a;
}
for(int i = 0; i < N; i++) {
cin >> s[i].b;
if(s[i].b && V <= s[i].a) {
return -1;
}
}
double t = 0;
sort(s, s+N, cmp);
for(int i = 0; i < N; i++) {
if(s[i].b == 0) continue;
t += (s[i].b + s[i].a * t) / ((double)V - s[i].a);
}
return t;
}
int main() {
//ios::sync_with_stdio(false);
cin >> T;
while(T--) {
cin >> N >> V;
printf("%.0lf\n", res());
}
return 0;
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
int str[10];
int dp[10][200000];
int getstr(int x) {
int len = 0;
while(x) {
str[len++] = x%10;
x/=10;
}
return len-1;
}
int getint(int x) {
int sum = 0;
int mod = 1;
while(x) {
sum += (x%10) * mod;
x/=10;
mod *= 2;
}
return sum;
}
int b[3];
int dfs(int pos, int sum, bool limit) {
//当前的sum = init_sum - f(x); (x为递归模拟取到的数)
if(pos == -1 ) return sum >= 0;
//不剪枝的话dp数组会越界
if(sum < 0) return 0;
//边界
if(limit == false && dp[pos][sum] != -1) return dp[pos][sum];
int up = limit ? str[pos] : 9;
int ans = 0;
for(int i = 0; i <= up; i++) {
ans += dfs(pos-1, sum-i*(1<<pos), limit && i == up);
}
if(limit == false) dp[pos][sum] = ans;
return ans;
}
int slove(int x, int y) {
int len = getstr(y);
int sum = getint(x);
return dfs(len, sum, true);
}
int main() {
ios::sync_with_stdio(false);
memset(dp, -1, sizeof(dp));
int T;
cin >> T;
for(int i = 1; i <= T; i++) {
int x, y;
cin >> x >> y;
cout << "Case #" << i << ": " << slove(x, y) << endl;
}
return 0;
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
const int MAXN = 100;
long long dp[MAXN][10];
//数位dp一般适合排除,即 不包含xxxxx条件
void init() {
dp[0][0] = 1;
for(int i = 1; i < MAXN; i++) {
for(int j = 0; j < 10; j++) {
for(int k = 0; k < 10; k++) {
if(j == 4 && k == 9) continue;
dp[i][j] += dp[i-1][k];
}
}
}
}
long long solve(long long n) {
long long sum = 0;
int a[100], len = 0;
while(n) {
a[len++] = n%10;
n /= 10;
}
a[len] = 0;//循环中取到了i+1
for(int i = len-1; i >= 0; i--) {
if(i == 0) a[i]++;//这里是取到了最后一个,其实可以再传参的时候往后一个数
for(int j = 0; j < a[i]; j++) {
if(a[i+1] == 4 && j == 9) continue;
sum += dp[i+1][j];
}
if(a[i] == 9 && a[i+1] == 4) break;
}
return sum-1;//去掉0, (0,n]
}
int main() {
ios::sync_with_stdio(false);
init();
int T;
cin >> T;
while(T--) {
long long n;
cin >> n;
cout << n - solve(n) << endl;
}
return 0;
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
const int MAXN = 18;
const int INF = 0x3f3f3f3f;
struct Course {
string name;
int end;
int cost;
}course[MAXN];
struct state{
int time; //当前状态话费的的时间点
int pre; //当前状态新加入的课程序号,即保留路径
int val;//当前状态的最少罚时
state() {
val = INF;
}
}dp[1<<MAXN];
void output(int s) {
if(!s) return;
else {
int index = dp[s].pre;
output(s - (1<<index));
cout << course[index].name << endl;
}
return;
}
int main() {
int T, N;
cin >> T;
while(T--) {
cin >> N;
for(int i = 0; i < N; i++) {
cin >> course[i].name >> course[i].end >> course[i].cost;
}
//初始化;
for(int i = 0; i < 1<<N; i++) {
dp[i].val = INF;
}
dp[0].val = 0;
dp[0].time = 0;
for(int i = 1; i < 1<<N; i++) {
//从后往前保证字典序
for(int j = N-1; j >= 0; j--) {
if(!(1<<j & i)) continue; //该课程不在该状态里
Course x = course[j];
state y = dp[i - (1<<j)];
int val = y.time + x.cost - x.end; //从没有学习j课程的状态y 转移到 状态i 增加的罚时
if(val < 0) val = 0;//罚时为0
if(dp[i].val > y.val + val) {//找到新的最少罚时的状态转移
dp[i].val = y.val+val;
dp[i].time = y.time + x.cost;
dp[i].pre = j;
}
}
}
cout << dp[(1<<N)-1].val << endl;
output((1<<N)-1);
}
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
int dp[15][15][3];//dp[i][j][k] = 取到i位,余数为j, 最后一个数是否为1 和 是否含有13
int N;
int str[10];
int getstr(int N) {
int len = 0;
while(N) {
str[len++] = N%10;
N /= 10;
}
return len-1;
}
int dfs(int pos, int s1, int s2, bool limit) {
if(pos == -1) return s1 == 0 && s2 == 2;
if(limit == false && dp[pos][s1][s2] != -1) return dp[pos][s1][s2];
int up = limit ? str[pos] : 9;
int ans = 0;
for(int i = 0; i <= up; i++) {
int temp;
if(s2 == 2 || (s2 == 1 && i == 3))
temp = 2;
else
temp = i == 1 ? 1 : 0;
ans += dfs(pos-1, (s1*10+i)%13, temp, limit && i == up);
}
if(limit == false) dp[pos][s1][s2] = ans;
return ans;
}
int solve(int N) {
int len = getstr(N);
return dfs(len, 0, 0, true);
}
int main() {
memset(dp, -1, sizeof(dp));
while(cin >> N) {
cout << solve(N) << endl;
}
return 0;
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
const int MAX_N = 105;
const int MAX_S = 65;
int vis[MAX_S];
int dp[MAX_N][MAX_S][MAX_S];
int main() {
ios::sync_with_stdio(false);
int n, m, tot = 0;
cin >> n >> m;
//打表
for(int s = 0; s < (1<<m); s++) {
bool flag = true;
//枚举每个位置
for(int i = 0; i < m; i++) {
if(s & (1<<i)) //当前状态s下第m个位置有炮塔
if( s&(1<<(i+1)) || s&(1<<(i+2)) ) flag = false;//后面两个位置是否有冲突
}
//当前状态s合法
if(flag) {
vis[tot++] = s; //记录所有不含地形的合法状态
}
}
memset(dp, 0, sizeof(dp));
int Max = 0;
for(int l = 1; l <= n; l++) {
string str;
cin >> str;
int d = 0;//地形状态
for(int i = 0; i < m; i++) {
if(str[i] == 'H') d |= 1;
d <<= 1;
}
d >>= 1; //多位移了一位
for(int i = 0; i < tot; i++) if(!(vis[i] & d)) //枚举加上地形后仍合法的状态
for(int j = 0; j < tot; j++) if(!(vis[j]&vis[i])) //枚举与当前行不冲突的上一行的状态
for(int k = 0; k < tot; k++) if( !(vis[k]&vis[j]) && !(vis[k]&vis[i]) ) {
//枚举与当前行和上一行都不冲突的上上一行的状态
dp[l][i][j] = max(dp[l][i][j], dp[l-1][j][k]+ __builtin_popcount(vis[i]) );
Max = max(Max, dp[l][i][j]);
}
}
cout << Max << endl;
}
<file_sep>#include<iostream>
#include<cstdio>
#include<cstring>
#include<string>
using namespace std;
const int MAXN = 15;
int n, m;
long long dp[2][1<<MAXN], vis[MAXN*MAXN][MAXN*MAXN];
int cur;
void update(int a , int b) {
if(b & (1<<m)) //保证第m位为1,即全部覆盖
dp[cur][b^(1<<m)] += dp[cur^1][a];//首位的1取反
}
long long solve() {
memset(dp, 0, sizeof(dp));
cur = 0;
dp[cur][(1<<m)-1] = 1;
for(int i = 0; i < n; i++) {
for(int j = 0; j < m; j++) {
cur ^= 1;
memset(dp[cur], 0, sizeof(dp[cur]));
for(int k = 0; k < (1<<m); k++) {
//不放
update(k, k<<1);
//竖着放
if(i && !(k&(1<<(m-1)))) update(k, (k<<1)^(1<<m)^1);//第一位最后一位置反
//横着放
if(j && !(k&1)) update(k, (k<<1)^3); //末两位置反
}
}
}
return dp[cur][(1<<m)-1];
}
int main() {
ios::sync_with_stdio(false);
memset(vis, -1, sizeof(vis));
while(cin >> n >> m) {
if(n < m) swap(n, m);
if(vis[n][m] < 0) vis[n][m] = solve();//记忆化
cout << vis[n][m] << endl;
}
return 0;
}
| 0b340de44168451e3770fbe40f4cf724e75e6e42 | [
"C++"
] | 10 | C++ | zhyjo/dp2 | e48c2edc9f8cb399435f227ee7423293879fe551 | 480fbb21fa552256973ec69a752255ddebd0dbc7 |
refs/heads/master | <file_sep>class Timer
#write your code here
@seconds
def initialize
@seconds = 0
end
def seconds=(val)
@seconds =val
end
def seconds
@seconds
end
def time_string
rest = @seconds
houres = rest /( 60 * 60)
if houres >=1
rest = rest - (houres * 3600)
end
minutes = rest/ 60
if minutes >=1
rest = rest - (minutes * 60)
end
seconds = rest
result = padded(houres) + ":" + padded(minutes) + ":" + padded(seconds)
return result
end
def padded number
if number >= 0 and number < 10
return '0' + number.to_s
else
return number.to_s
end
end
end
<file_sep>#write your code here
def translate str
def isVowel char
if char == "a" or char == "e" or char == "i" or char == "o" or char == "u"
return true
end
end
def pigLat string
word_arr = string.split(//)
if isVowel(word_arr[0])
result = word_arr.join + "ay"
elsif
if (not isVowel(word_arr[1]) and not isVowel(word_arr[2])) or (word_arr[1] == "q" and word_arr[2] == "u" and not isVowel(word_arr[0]))
firstlet = word_arr[0]
seconlet = word_arr[1]
thirdlet = word_arr[2]
word_arr.push(firstlet, seconlet,thirdlet)
3.times { word_arr.delete_at(0)}
result = word_arr.join + "ay"
elsif not isVowel(word_arr[1]) or (word_arr[0] == "q" and word_arr[1] == "u")
firstlet = word_arr[0]
seconlet = word_arr[1]
word_arr.push(firstlet, seconlet)
2.times {word_arr.delete_at(0)}
result = word_arr.join + "ay"
else
firstlet = word_arr[0]
word_arr.push(firstlet)
word_arr.delete_at(0)
result = word_arr.join + "ay"
end
end
return result
end
if str.include? " "
arrsent = str.split
i = 0
while i< arrsent.length
newElem = pigLat(arrsent[i])
arrsent[i] = newElem
i = i+1
end
main_result = arrsent.join(" ")
else
main_result = pigLat(str)
end
return main_result
end<file_sep>#write your code here
def echo var
var
end
def shout var
var.upcase!
var
end
def repeat var1 , var2=2
if var2 == 1
return var1 + " " + var1
else
result = (var1 + " ") * var2
return result.chop
end
end
def start_of_word var1 , var2
result = var1.slice(0..var2-1)
result
end
def first_word sentence
result = sentence.split
result[0]
end
def titleize title
result = title.split
result.each do |variable|
if variable != "and" and variable != "or" and variable != "the" and variable != "over"
variable.capitalize!
end
end
result[0].capitalize!
newres = result.join(" ")
return newres
end
<file_sep>class Book
# write your code here
def title=(val)
if val.include? " "
arrTitle = val.split
i = 0
while i < arrTitle.length
if arrTitle[i] == 'the' or arrTitle[i] == 'a' or arrTitle[i] == 'an' or arrTitle[i] == 'the' or arrTitle[i] == 'and' or arrTitle[i] == 'of' or arrTitle[i] == 'in'
else
new_val = arrTitle[i].capitalize
arrTitle[i] = new_val
end
i = i+1
end
firstWord = arrTitle[0].capitalize
arrTitle[0] = firstWord
val = arrTitle.join(" ")
else
val = val.capitalize
end
@title = val
end
def title
@title
end
end
<file_sep>My solution of Ruby project from The Odin project<file_sep>#write your code here
def ftoc numb
celsius = (5.00 / 9.00) * (numb - 32)
celsius
end
def ctof numb
fahrenheit = (9.00 / 5.00) * numb + 32
fahrenheit
end<file_sep>#write your code here
def add first_a, second_a
result_a = first_a+second_a
result_a
end
def subtract first_s, second_s
result_s = first_s-second_s
result_s
end
def sum array
if array.count == 0
return 0
end
result_arr = 0
i = 0
while i<array.length
result_arr = result_arr + array[i]
i = i + 1
end
result_arr
end
def multiply first_m, second_m
result_m = first_m * second_m
result_m
end
def pow first_p, second_p
result_p = 0
second_p.times do
result_p = first_p * first_p
end
result_p
end
def fact numb
if numb == 1 or numb == 0
result = 1
else
result = numb * fact(numb-1)
end
result
end | ed967be8eea3ed210ed3a08850b003c336e95b88 | [
"Markdown",
"Ruby"
] | 7 | Ruby | Sanandelo/ruby_test | 7ad93f564ae30ec2b0d0190d0d555f9ac0fceb3f | 2ddd1cb51495697361140300de3d625e22c280bc |
refs/heads/master | <file_sep>/**
* A simple, highly customizable logging plugin for node.js
*/
// Module imports
var dateFormat = require('dateformat'),
colors = require('colors'),
util = require("util");
// Container for Events registered by module
var events = { };
// Default Logging options
var options = {
level: 0,
format: "%timestamp% - %event%:%padding% %message%",
timestamp: "HH:MM:ss"
};
// Class Defintion of "log_event"
var LogEvent = (function() {
/**
* Constructor
*
* @param {Object} options
*/
function LogEvent( options ) {
this.event = options.event;
this.level = options.level || 0;
this.color = options.color || 'white';
};
/**
* Configuring an event
*
* @param {Object} config
* @return {LogEvent} this
*/
LogEvent.prototype.config = function( config ) {
for(var key in config) {
this[key] = config[key];
}
return this;
}
/**
* Calculates the padding space required for prettier output
*
* No parameter is required
* @return {String} padding_spaces
*/
LogEvent.prototype.__defineGetter__ ('padding', function() {
var maxLength = 0;
var paddingLength;
var padding = '';
for(var key in events) {
if (events.hasOwnProperty(key)) {
maxLength = maxLength < events[key].event.length
? events[key].event.length
: maxLength;
}
}
paddingLength = maxLength - this.event.length;
for(var i=0; i < paddingLength; i++) { padding += ' '; }
return padding;
});
/**
* Outputting the Event to Screen, only if its event is equal or above
* the configuration level. Uses `console.log`. Function signature
* resembles that of Node.js `util.format()`
*/
LogEvent.prototype.output = function(input) {
if(options.level <= this.level ) {
var message = util.format.apply(null, input);
var format = this.format || options.format;
var output = format
.replace( '%timestamp%', dateFormat( new Date(), this.timestamp || options.timestamp ) ) //timestamp
.replace( '%event%', this.event[ this.color ] ) //log event & color
.replace( '%padding%', this.padding )
.replace( '%message%', message );
console.log( output );
}
}
return LogEvent;
})();
/**
* Configures the Global Options
*
* @param {Object} config
*/
exports.config = function( config ) {
for(var key in config) {
if(options.hasOwnProperty(key)) {
options[key] = config[key];
}
}
return this;
}
/**
* High-order function. The returned function returns the "LogEvent" object
* registered on the "events" object with its key/property name as "e"
* if no arguments are passed. Otherwise it outputs it to console.
*
* @param {String} e
* @param {LogEvent|null}
*/
var nFn = function(e) {
return function() {
if(arguments.length==0) {
return events[e];
} else {
events[e].output(arguments);
}
}
}
/**
* Defining new event types. "newEvents" is an object of options used to
* create new log events. The new event is defined on this "module" object
*
* @param {Object} newEvents
* @return {LogEvent} this
*/
exports.new = function(newEvents) {
for(event in newEvents) {
events[event] = new LogEvent( newEvents[event] );
this[event] = nFn(event);
}
return this;
}
/**
* Defintion of Custom Event types
*/
exports.new({
debug: { color: 'grey', level: 0, event: 'debug' },
info: { color: 'green', level: 1, event: 'info' },
warn: { color: 'yellow', level: 2, event: 'warning' },
error: { color: 'red', level: 3, event: 'error' }
});
| 4fe679afa1c89293766aec4caba6b66724ab38bf | [
"JavaScript"
] | 1 | JavaScript | GochoMugo/custom-logger | db62575b5abdbb508784fed4bd1b52a0dcbe28dd | 09ddc4dc9e6800df9dbb5fba37a717d0f297a2aa |
refs/heads/master | <repo_name>Michele-web/app_cordova<file_sep>/cfa51/platforms/ios/platform_www/cordova_plugins.js
cordova.define('cordova/plugin_list', function(require, exports, module) {
module.exports = [
{
"id": "cordova-plugin-exit.exit",
"file": "plugins/cordova-plugin-exit/www/exit.js",
"pluginId": "cordova-plugin-exit",
"clobbers": [
"cordova.plugins.exit"
]
},
{
"id": "cordova-plugin-inappbrowser.inappbrowser",
"file": "plugins/cordova-plugin-inappbrowser/www/inappbrowser.js",
"pluginId": "cordova-plugin-inappbrowser",
"clobbers": [
"cordova.InAppBrowser.open",
"window.open"
]
},
{
"id": "cordova-plugin-screen-orientation.screenorientation",
"file": "plugins/cordova-plugin-screen-orientation/www/screenorientation.js",
"pluginId": "cordova-plugin-screen-orientation",
"clobbers": [
"cordova.plugins.screenorientation"
]
}
];
module.exports.metadata = {
"cordova-plugin-exit": "1.0.3",
"cordova-plugin-inappbrowser": "3.0.0",
"cordova-plugin-whitelist": "1.3.3",
"cordova-plugin-screen-orientation": "3.0.1"
};
});<file_sep>/cfa51/platforms/android/app/src/main/assets/www/js/theme.js
var DEFAULT_VALUE="1";
var DARK = "0";
$(document).ready(function(){
var valueTheme = localStorage.getItem('theme');
if(valueTheme != undefined && valueTheme!= null){
setTheme(valueTheme);
setCheckTheme(valueTheme);
}else{
setTheme(DEFAULT_VALUE);
setCheckTheme(DEFAULT_VALUE);
}
});
//cambio tema
$(document).on('click','#changeTheme',function(){
var valueTheme = $('.radio_check').val();
localStorage.setItem('theme',valueTheme);
setTheme(valueTheme);
});
function setTheme(valueTheme){
if(valueTheme==DARK){
darkTheme();
}else{
lightTheme();
}
}
function setCheckTheme(valueTheme){
var check = $('input[type=radio].optionTheme').filter(function(){ return this.value==valueTheme});
if(check[0]!= undefined){
$(check[0]).addClass('radio_check');
check[0].checked = true;
}
}
function darkTheme(){
$('body').css({'background-color':'#000'});
$('.bmd-layout-drawer').css({'background-color':'#000','color':'#fff'});
$('hr.dividi').css({'border-top':'1px solid rgba(241, 241, 241, 0.99)'});
$('label.bmd-label-floating').css({'color':'var(--colore_applicazione)'});
$('.form-control, .custom-file-control, .is-focused .form-control, .is-focused .custom-file-control').css({'background-image':'linear-gradient(to top, var(--colore_applicazione) 2px, rgba(0, 150, 136, 0) 2px), linear-gradient(to top, var(--colore_applicazione) 1px, rgba(0, 0, 0, 0) 1px)'})
$('.radio label, .is-focused .radio label, .radio-inline, .is-focused .radio-inline, .checkbox label, .is-focused .checkbox label, .checkbox-inline, .is-focused .checkbox-inline, .switch label, .is-focused .switch label').css({'color':'var(--colore_applicazione)'})
$('.check').css({'border':'0.125rem solid var(--colore_applicazione)'});
$('input[type=text]').css({'color':'#fff'});
$('input[type=password]').css({'color':'#fff'});
$('input[type=text].text_card').css({'color':'#000'});
$('input[type=password]text_card').css({'color':'#000'});
$('.bmd-layout-backdrop').css({'background-color':'rgba(251, 248, 248, 0.5)'});
}
function lightTheme(){
$('body').css({'background-color':''});
$('.bmd-layout-drawer').css({'background-color':'','color':''});
// $('hr.dividi').css({'border-top':'1px solid rgba(241, 241, 241, 0.99)'});
$('label.bmd-label-floating').css({'color':''});
$('.form-control, .custom-file-control, .is-focused .form-control, .is-focused .custom-file-control').css({'background-image':'linear-gradient(to top, var(--colore_applicazione) 2px, rgba(0, 150, 136, 0) 2px), linear-gradient(to top, rgba(0, 0, 0, 0.26) 1px, rgba(0, 0, 0, 0) 1px)'})
$('.radio label, .is-focused .radio label, .radio-inline, .is-focused .radio-inline, .checkbox label, .is-focused .checkbox label, .checkbox-inline, .is-focused .checkbox-inline, .switch label, .is-focused .switch label').css({'color':'rgba(0, 0, 0, 0.26)'})
$('input[type=text]').css({'color':''});
$('input[type=password]').css({'color':''});
$('.bmd-layout-backdrop').css({'background-color':'rgba(0, 0, 0, 0.5)'});
}
<file_sep>/cfa51/www/js/news.js
var context = 'news';
function verificaNews(){
creaCards('Titolo informazione','contenuto informazione',context);
}<file_sep>/cfa51/platforms/android/app/src/main/assets/www/js/custom.js
// Wait for device API libraries to load
//
var doubleBackToExitPressedOnce = false;
function hideSplash() {
window.location.href ="contenuto.html";
}
function onLoad() {
document.addEventListener("deviceready", onDeviceReady, false);
}
//device APIs are available
//
function onDeviceReady() {
document.addEventListener("pause", onPause, false);
document.addEventListener("resume", onResume, false);
document.addEventListener("menubutton", onMenuKeyDown, false);
document.addEventListener('backbutton',onBackPress, false);
}
function onPause() {
// Handle the pause event
}
function onResume() {
// Handle the resume event
}
function onMenuKeyDown() {
// Handle the menubutton event
}
function onBackPress() {
if (cordova.platformId !== 'windows') {
if(doubleBackToExitPressedOnce){
cordova.plugins.exit();
}
doubleBackToExitPressedOnce = true;
toast("Clicca ancora INDIETRO per uscire");
setTimeout(function(){
doubleBackToExitPressedOnce = false;
}, 2000);
}
}
// toolbar persistence
$(document).ready(function(){
// $("[data-role='header'],[data-role='footer']").toolbar();
// $("#panelMenu").panel();
if(localStorage.getItem('href')!= null){
var href = localStorage.getItem('href');
$("#home").trigger( "click" );
// if(href == '#prenotazioni'){
// $("#prenota").trigger( "click" );
// }
}
});
$(document).on('click','#home',function(){
var ref = cordova.InAppBrowser.open('http://www.crossfitarea51.com/rome', '_blank', 'location=yes');
ref.show();
//window.open("tel:+393926039664", "_blank");
});
$(document).on('click','#prenota',function(){
$.mobile.loading( "show" );
setTimeout( function(){
var logged = localStorage.getItem("logged");
if(!logged){
$('#login').show();
}else{
$('#prenotati').show();
}
$.mobile.loading( "hide" );
}, 2500);
});
$(document).on('click','#submitButton',function(){
// var activePage = $(':mobile-pagecontainer').pagecontainer('getActivePage');
if($('#username').val().length > 0 && $('#password').val().length > 0){
// userHandler.username = $('#email').val();
localStorage.setItem('logged',true);
// $('#login').hide();
// $('#prenotati').show();
// Send data to server through the Ajax call
// action is functionality we want to call and outputJSON is our data
// $.ajax({url: 'auth.php',
// data: {action : 'authorization', formData : $('#check-user').serialize()},
// type: 'post',
// async: 'true',
// dataType: 'json',
// beforeSend: function() {
// // This callback function will trigger before data is sent
// $.mobile.loading('show'); // This will show Ajax spinner
// },
// complete: function() {
// // This callback function will trigger on data sent/received complete
// $.mobile.loading('hide'); // This will hide Ajax spinner
// },
// success: function (result) {
// // Check if authorization process was successful
// if(result.status == 'success') {
// userHandler.status = result.status;
// $.mobile.changePage("#second");
// } else {
// alert('Logon unsuccessful!');
// }
// },
// error: function (request,error) {
// // This callback function will trigger on unsuccessful action
// alert('Network error has occurred please try again!');
// }
// });
} else {
// toast("Compilare i campi richiesti");
var notification = document.querySelector('.mdl-js-snackbar');
var data = {
message: 'Inserire Username e Password',
timeout: 10000
};
notification.MaterialSnackbar.showSnackbar(data);
}
return false; // cancel original event to prevent form submitting
});
function checkSelected(value){
unselectListView();
if($(value).children('span').hasClass('no_select')){
$(value).children('span').addClass('clicked').addClass('select_listview');
localStorage.setItem('href',$(value).attr('href'));
}
}
function goHome(){
unselectListView();
window.location.href ="index.html";
}
function unselectListView(){
$('a span').removeClass('clicked').removeClass('select_listview');
}
//$(document).on('swipeleft swiperight',function(event){
//
//
//
// if ( $( ".ui-page-active" ).jqmData( "panel" ) !== "open" ) {
// if ( event.type === "swipeleft" ) {
// $( "#panelMenu" ).panel( "close" );
//
// } else if ( event.type === "swiperight" ) {
// $( "#panelMenu" ).panel( "open" );
//
// }
// }
//
//});
<file_sep>/cfa51/www/js/login.js
$(document).on('click','#salva_credenziali',function(){
var checked = $(this).attr('checked');
if(checked == undefined){
$(this).attr("checked", "checked");
$(this).val('on');
}else{
$(this).removeAttr("checked");
$(this).val('off');
}
});
$(document).on('click','#submitButton',function(){
if($('#username').val().length > 0 && $('#password').val().length > 0){
// logica login
var checked = $('#salva_credenziali').val();
if(checked == 'on'){
localStorage.setItem('logged',true);
localStorage.setItem('username',$('#username').val());
}
document.location.href ="home.html";
} else {
mostraToast('Inserire Username e Password');
}
});
$(document).on('click','#recuperaButton',function(){
if($('#username').val().length > 0){
// logica per il recupero
// localStorage.setItem('logged',true);
} else {
mostraToast('Inserire Username/Email');
}
});
$(document).on('click','#logout',function(){
logout();
});
function logout(){
localStorage.removeItem('logged');
localStorage.removeItem('username');
document.location.href ="login.html";
}<file_sep>/cfa51/platforms/ios/www/js/area51.js
// Wait for device API libraries to load
//
var doubleBackToExitPressedOnce = false;
function onLoad() {
document.addEventListener("deviceready", onDeviceReady, false);
}
//device APIs are available
//
function onDeviceReady() {
document.addEventListener("pause", onPause, false);
document.addEventListener("resume", onResume, false);
document.addEventListener("menubutton", onMenuKeyDown, false);
document.addEventListener('backbutton',onBackPress, false);
}
function onPause() {
// Handle the pause event
}
function onResume() {
// Handle the resume event
}
function onMenuKeyDown() {
// Handle the menubutton event
}
function onBackPress() {
if (cordova.platformId !== 'windows') {
if(doubleBackToExitPressedOnce){
cordova.plugins.exit();
}
doubleBackToExitPressedOnce = true;
toast("Clicca ancora INDIETRO per uscire");
setTimeout(function(){
doubleBackToExitPressedOnce = false;
}, 2000);
}
}
function mostraToast(messaggio){
$.snackbar({content: messaggio});
}
$(document).ready(function(){
$('body').bootstrapMaterialDesign();
});
$(document).on('click','#submitButton',function(){
if($('#username').val().length > 0 && $('#password').val().length > 0){
// logica login
localStorage.setItem('logged',true);
} else {
mostraToast('Inserire Username e Password');
}
});
$(document).on('click','#recuperaButton',function(){
if($('#username').val().length > 0){
// logica per il recupero
// localStorage.setItem('logged',true);
} else {
mostraToast('Inserire Username/Email');
}
});
| 69038316ec8fae41802085a9e607a47ef3e30f45 | [
"JavaScript"
] | 6 | JavaScript | Michele-web/app_cordova | b0b833cfd9c604c77e421ba646aa31e8bcaacb9a | e8e47c4c2de3dffe18650fb38bd81ddd8430bdfa |
refs/heads/master | <file_sep>package net.zsygfddsd.qujing.base.common;
/**
* Created by mac on 16/3/3.
*/
public class ComRespInfo<T> {
/**
* result : 0
* resultcode : 2
* msg : 参数错误
* data : null
*/
private Boolean result;
private int resultcode;
private String message;
private T data;
public Boolean getResult() {
return result;
}
public void setResult(Boolean result) {
this.result = result;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public int getResultcode() {
return resultcode;
}
public void setResultcode(int resultcode) {
this.resultcode = resultcode;
}
public T getData() {
return data;
}
public void setData(T data) {
this.data = data;
}
}
<file_sep>apply plugin: 'com.android.application'
apply plugin: 'com.neenbedankt.android-apt'
android {
compileSdkVersion 23
buildToolsVersion "23.0.3"
defaultConfig {
applicationId "net.zsygfddsd.qujing"
minSdkVersion 14
targetSdkVersion 23
versionCode 1
versionName "1.0"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
sourceSets {
main {
res.srcDirs = [
'src/main/res/layouts/activity',
'src/main/res/layouts/fragment',
'src/main/res/layouts/view',
'src/main/res/layouts/cell',
'src/main/res/layouts',
'src/main/res'
]
}
}
}
dependencies {
compile fileTree(include: ['*.jar'], dir: 'libs')
testCompile 'junit:junit:4.12'
compile 'com.android.support:appcompat-v7:24.2.1'
compile 'com.android.support:recyclerview-v7:24.2.1'
//dagger 2
compile 'com.google.dagger:dagger:2.7'
apt 'com.google.dagger:dagger-compiler:2.7'
//butterknife
compile 'com.jakewharton:butterknife:8.2.1'
apt 'com.jakewharton:butterknife-compiler:8.2.1'
//picasso
compile 'com.squareup.picasso:picasso:2.5.2'
//Rx
compile 'com.squareup.retrofit2:retrofit:2.1.0'
compile 'com.squareup.retrofit2:converter-gson:2.1.0'
compile 'io.reactivex:rxandroid:1.2.1'
compile 'com.squareup.retrofit2:adapter-rxjava:2.1.0'
compile 'com.trello:rxlifecycle:0.6.1'
compile 'com.trello:rxlifecycle-components:0.6.1'
compile 'com.squareup.okhttp3:logging-interceptor:3.4.1'
//eventbus
compile 'org.greenrobot:eventbus:3.0.0'
//permissionsDispatcher
compile 'com.github.hotchemi:permissionsdispatcher:2.1.3'
apt 'com.github.hotchemi:permissionsdispatcher-processor:2.1.3'
//leakcanary
debugCompile 'com.squareup.leakcanary:leakcanary-android:1.4-beta2'
releaseCompile 'com.squareup.leakcanary:leakcanary-android-no-op:1.4-beta2'
testCompile 'com.squareup.leakcanary:leakcanary-android-no-op:1.4-beta2'
//Bugly
// compile 'com.tencent.bugly:crashreport:2.2.2'
// compile 'com.tencent.bugly:nativecrashreport:3.0'
// compile 'com.android.support.constraint:constraint-layout:1.0.0-alpha8'
}
<file_sep>package net.zsygfddsd.qujing.modules.WelfareList;
import android.graphics.Bitmap;
import android.text.TextUtils;
import android.util.Log;
import android.widget.ImageView;
import android.widget.TextView;
import com.squareup.picasso.Picasso;
import com.squareup.picasso.Transformation;
import com.trello.rxlifecycle.FragmentEvent;
import net.zsygfddsd.qujing.R;
import net.zsygfddsd.qujing.base.adapter.GeneralRecyclerViewHolder;
import net.zsygfddsd.qujing.base.fragment.BaseRecyclerViewFragment;
import net.zsygfddsd.qujing.bean.ComRespInfo;
import net.zsygfddsd.qujing.bean.Welfare;
import net.zsygfddsd.qujing.common.utils.DeviceUtils;
import net.zsygfddsd.qujing.components.httpLoader.HttpLoader;
import net.zsygfddsd.qujing.components.httpLoader.RequestInfo;
import java.util.List;
import rx.Observable;
import rx.Subscriber;
import rx.android.schedulers.AndroidSchedulers;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
/**
* Created by mac on 16/5/12.
*/
public class WelfareListFragment extends BaseRecyclerViewFragment<Welfare> {
@Override
public void loadData(RequestInfo reqInfo, String pageSize, String page, boolean showDialog, boolean canCancel) {
if (DeviceUtils.isHasNetWork()) {
String type = reqInfo.getBodyParams().get("type");
Observable<ComRespInfo<List<Welfare>>> observable = HttpLoader.getInstance().welfareHttp().getWelfareList(type, pageSize, page);
observable.compose(this.<ComRespInfo<List<Welfare>>>bindUntilEvent(FragmentEvent.DESTROY))
.observeOn(AndroidSchedulers.mainThread())
.subscribeOn(Schedulers.io())
.subscribe(new Subscriber<ComRespInfo<List<Welfare>>>() {
@Override
public void onCompleted() {
if (isRefreshing()) {
completeRefreshing();
}
}
@Override
public void onError(Throwable e) {
showToast("获取失败!");
if (isRefreshing()) {
completeRefreshing();
}
}
@Override
public void onNext(ComRespInfo<List<Welfare>> listComRespInfo) {
if (!listComRespInfo.isError()) {
setHasNextPage(true);
items.clear();
items = listComRespInfo.getResults();
if (isClear) {
itemDatas.clear();
isClear = false;
}
itemDatas.addAll(items);
updateData();
if (itemDatas.size() == 0) {
// TODO: 16/1/6 在此显示无数据时的图片
}
} else {
showToast("获取失败!");
}
}
});
} else {
if (isRefreshing()) {
completeRefreshing();
}
showToast("请检查网络连接");
}
}
@Override
public void bindChildViewsData(GeneralRecyclerViewHolder mViewHolder, Welfare itemData, int position) {
final ImageView welfareImg = mViewHolder.getChildView(R.id.iv_welfare);
TextView welfareDec = mViewHolder.getChildView(R.id.tv_welfare_dec);
Transformation transformation = new Transformation() {
@Override
public Bitmap transform(Bitmap source) {
int targetWidth = welfareImg.getWidth();
Log.i("welfareImg", "source.getHeight()=" + source.getHeight() + ",source.getWidth()=" + source.getWidth() + ",targetWidth=" + targetWidth);
if (source.getWidth() == 0) {
return source;
}
//如果图片小于设置的宽度,则返回原图
if (source.getWidth() < targetWidth) {
return source;
} else {
//如果图片大小大于等于设置的宽度,则按照设置的宽度比例来缩放
double aspectRatio = (double) source.getHeight() / (double) source.getWidth();
int targetHeight = (int) (targetWidth * aspectRatio);
if (targetHeight != 0 && targetWidth != 0) {
Bitmap result = Bitmap.createScaledBitmap(source, targetWidth, targetHeight, false);
if (result != source) {
// Same bitmap is returned if sizes are the same
source.recycle();
}
return result;
} else {
return source;
}
}
}
@Override
public String key() {
return "transformation" + " desiredWidth";
}
};
if (!TextUtils.isEmpty(itemData.getUrl())) {
Picasso.with(ct).load(itemData.getUrl()).transform(transformation)/*.resize(ScreenUtils.getScreenWidth(ct), DensityUtils.dp2px(ct, 200f))*/.into(welfareImg);
}
welfareDec.setText(itemData.getDesc());
}
@Override
public void OnItemClicked(final Welfare itemData, final int position) {
// Observable<Welfare> clickObservable = Observable.create(new Observable.OnSubscribe<Welfare>() {
// @Override
// public void call(Subscriber<? super Welfare> subscriber) {
// try {
// if (!subscriber.isUnsubscribed()) {
// subscriber.onStart();
// subscriber.onNext(itemData);
// subscriber.onCompleted();
// }
// } catch (Exception e) {
// subscriber.onError(e);
// }
// }
// });
Observable.just(position)
.map(new Func1<Integer, String>() {
@Override
public String call(Integer integer) {
return "当前点击的位置是" + position;
}
})
.subscribeOn(AndroidSchedulers.mainThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new Subscriber<String>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(String s) {
showToast(s);
}
});
}
}
<file_sep>package net.zsygfddsd.qujing.modules.welfarelist;
import net.zsygfddsd.qujing.base.module.network_recyclerview.BasePageContract;
/**
* Created by mac on 16/7/24.
*/
public class WelfareListContract {
public interface View extends BasePageContract.IBaseRecyclerView<Presenter> {
}
public interface Presenter extends BasePageContract.IBaseRecyclerViewPresenter {
}
}
<file_sep>package net.zsygfddsd.qujing.modules.WelfareList;
import android.graphics.Bitmap;
import android.support.annotation.LayoutRes;
import android.text.TextUtils;
import android.util.Log;
import android.widget.ImageView;
import android.widget.TextView;
import com.squareup.picasso.Picasso;
import com.squareup.picasso.Transformation;
import net.zsygfddsd.qujing.R;
import net.zsygfddsd.qujing.base.adapter.GeneralRecyclerViewHolder;
import net.zsygfddsd.qujing.base.module.network_recyclerview.BaseRecyclerViewNetFragment;
import net.zsygfddsd.qujing.bean.Welfare;
import rx.Observable;
import rx.Subscriber;
import rx.android.schedulers.AndroidSchedulers;
import rx.functions.Func1;
/**
* Created by mac on 16/5/12.
*/
public class WelfareListFragment extends BaseRecyclerViewNetFragment<WelfareListContract.Presenter, Welfare> implements WelfareListContract.View{
public static WelfareListFragment newInstance(@LayoutRes int itemLayoutId){
WelfareListFragment welfareListFragment = new WelfareListFragment();
welfareListFragment.init(itemLayoutId);
return welfareListFragment;
}
@Override
public void setPresenter(WelfareListContract.Presenter presenter) {
super.setPresenter(presenter);
}
@Override
public void bindChildViewsData(GeneralRecyclerViewHolder mViewHolder, Welfare itemData, int position) {
final ImageView welfareImg = mViewHolder.getChildView(R.id.iv_welfare);
TextView welfareDec = mViewHolder.getChildView(R.id.tv_welfare_dec);
Transformation transformation = new Transformation() {
@Override
public Bitmap transform(Bitmap source) {
int targetWidth = welfareImg.getWidth();
Log.i("welfareImg", "source.getHeight()=" + source.getHeight() + ",source.getWidth()=" + source.getWidth() + ",targetWidth=" + targetWidth);
if (source.getWidth() == 0) {
return source;
}
//如果图片小于设置的宽度,则返回原图
if (source.getWidth() < targetWidth) {
return source;
} else {
//如果图片大小大于等于设置的宽度,则按照设置的宽度比例来缩放
double aspectRatio = (double) source.getHeight() / (double) source.getWidth();
int targetHeight = (int) (targetWidth * aspectRatio);
if (targetHeight != 0 && targetWidth != 0) {
Bitmap result = Bitmap.createScaledBitmap(source, targetWidth, targetHeight, false);
if (result != source) {
// Same bitmap is returned if sizes are the same
source.recycle();
}
return result;
} else {
return source;
}
}
}
@Override
public String key() {
return "transformation" + " desiredWidth";
}
};
if (!TextUtils.isEmpty(itemData.getUrl())) {
Picasso.with(ct).load(itemData.getUrl()).transform(transformation)/*.resize(ScreenUtils.getScreenWidth(ct), DensityUtils.dp2px(ct, 200f))*/.into(welfareImg);
}
welfareDec.setText(itemData.getDesc());
}
@Override
public void OnItemClicked(final Welfare itemData, final int position) {
Observable.just(position)
.map(new Func1<Integer, String>() {
@Override
public String call(Integer integer) {
return "当前点击的位置是" + position;
}
})
.subscribeOn(AndroidSchedulers.mainThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new Subscriber<String>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(String s) {
showToast(s);
}
});
}
}
<file_sep>package com.zsygfddsd.spacestation.base.module.network_recyclerview;
import com.zsygfddsd.spacestation.base.adapter.multirecycler.ItemEntityList;
import com.zsygfddsd.spacestation.base.module.base.BaseContract;
import com.zsygfddsd.spacestation.base.module.network.BaseNetContract;
/**
* Created by mac on 16/6/11.
*/
public class BasePageContract {
public interface IBaseRecyclerView<T extends IBaseRecyclerViewPresenter> extends BaseNetContract.IBaseNetView<T> {
void setHasNextPage(boolean hasNext);
int getItemLayoutId();
int getBottomViewLayoutId();
void setRefreshEnable(boolean enable);
ItemEntityList getItemEntityList();
// List<D> getItemDatas();
void updateData();
//
// void updateData(List<D> itemdatas);
//
// void updateData(int position);
void showRefreshIndication();
void hideRefreshInfication();
}
public interface IBaseRecyclerViewPresenter extends BaseContract.IBasePresenter {
void onInitData();
void onLoadMore();
void onLoadRefresh();
}
}
<file_sep>package com.zsygfddsd.spacestation.base.activity;
import android.app.ProgressDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.pm.ActivityInfo;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AlertDialog;
import android.util.Log;
import android.widget.Toast;
import com.trello.rxlifecycle.components.support.RxAppCompatActivity;
import com.zsygfddsd.spacestation.base.BaseApplication;
import com.zsygfddsd.spacestation.base.module.network.BaseNetContract;
public class BaseActivity extends RxAppCompatActivity implements BaseNetContract.INetView {
public Context mContext;
public ProgressDialog pDialog;
private Toast toast;
private AlertDialog toLoginDialog;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
this.mContext = this;
((BaseApplication) getApplication()).addActivity(this);
/**
* 设置为竖屏
*/
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
}
@Override
protected void onDestroy() {
super.onDestroy();
((BaseApplication) getApplication()).removeActivity(this);
}
public void showToast(String content) {
if (toast == null) {
toast = Toast.makeText(mContext, content, Toast.LENGTH_SHORT);
} else {
toast.setText(content);
}
toast.show();
}
@Override
public void showLoading(boolean cancelable, @Nullable final ILoadingCancelListener listener) {
if (pDialog == null) {
pDialog = new ProgressDialog(mContext);
}
pDialog.setCancelable(cancelable);
pDialog.setOnCancelListener(new DialogInterface.OnCancelListener() {
@Override
public void onCancel(DialogInterface dialog) {
if (listener != null) {
listener.onLoadCancelListener();
}
hideLoading();
}
});
pDialog.setMessage("Loading...");
Log.e("Thread", "showLoading线程的名字是---------" + Thread.currentThread().getName());
pDialog.show();
}
@Override
public void hideLoading() {
if (pDialog != null && pDialog.isShowing()) {
Log.e("Thread", "hideLoading线程的名字是---------" + Thread.currentThread().getName());
pDialog.dismiss();
pDialog = null;
}
}
public void showLoadingError() {
showToast("获取失败");
}
public void showEmptyPage() {
showToast("暂无数据");
}
public void showNoNetWork() {
showToast("网络连接失败");
}
@Override
public void showToLoginDialog() {
// if (toLoginDialog == null) {
// toLoginDialog = new AlertDialog.Builder(mContext)
// .setTitle("重新登录")
// .setMessage("登录过期,请重新登录")
// .setNegativeButton("取消", new DialogInterface.OnClickListener() {
// @Override
// public void onClick(DialogInterface dialog, int which) {
// dialog.dismiss();
// }
// })
// .setPositiveButton("确定", new DialogInterface.OnClickListener() {
// @Override
// public void onClick(DialogInterface dialog, int which) {
// Intent toLogin = new Intent(mContext, LoginActivity.class);
// mContext.startActivity(toLogin);
// dialog.dismiss();
// }
// }).create();
// }
// toLoginDialog.show();
}
}
<file_sep>package net.zsygfddsd.qujing.components.httpLoader;
import java.util.concurrent.TimeUnit;
import okhttp3.OkHttpClient;
import retrofit2.Retrofit;
import retrofit2.adapter.rxjava.RxJavaCallAdapterFactory;
import retrofit2.converter.gson.GsonConverterFactory;
/**
* Created by mac on 16/7/19.
*/
public class HttpLoader implements HttpContract {
private static final int DEFAULT_TIMEOUT = 5;
private Retrofit retrofit;
private WelfareService welfareHttp;
private HttpLoader() {
//手动创建一个OkHttpClient并设置超时时间
OkHttpClient.Builder builder = new OkHttpClient.Builder();
builder.connectTimeout(DEFAULT_TIMEOUT, TimeUnit.SECONDS);
retrofit = new Retrofit.Builder()
.client(builder.build())
.addConverterFactory(GsonConverterFactory.create())
.addCallAdapterFactory(RxJavaCallAdapterFactory.create())
// .addCallAdapterFactory(RxJavaCallAdapterFactory.create())
.baseUrl(WelfareService.BaseUrl)
.build();
}
public static HttpLoader getInstance() {
return HttpLoaderHolder.instance;
}
//设计模式推荐的内部静态类实现的单例模式
private static class HttpLoaderHolder {
private static final HttpLoader instance = new HttpLoader();
}
@Override
public WelfareService welfareHttp() {
if (welfareHttp == null) {
welfareHttp = retrofit.create(WelfareService.class);
}
return welfareHttp;
}
}
<file_sep>package net.zsygfddsd.qujing.common.helpers.ImgLoadHelper;
/**
* Created by mac on 16/5/12.
*/
public class ImageLoadHelper {
}
<file_sep>package com.zsygfddsd.spacestation.base.module.network_refresh;
import android.os.Bundle;
import android.support.annotation.CallSuper;
import android.support.annotation.Nullable;
import android.support.v4.widget.SwipeRefreshLayout;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.FrameLayout;
import com.zsygfddsd.spacestation.R;
import com.zsygfddsd.spacestation.base.module.network.BaseNetFragment;
/**
* Created by mac on 15/12/19.
* T: 是IBaseRecyclerViewPresenter
* D: 是item的bean
*/
public abstract class BaseRefreshFragment<T extends BaseRefreshContract.IBaseRefreshPresenter, DATA> extends BaseNetFragment<T> implements BaseRefreshContract.IBaseRefreshView<T, DATA>, SwipeRefreshLayout.OnRefreshListener {
protected SwipeRefreshLayout refreshView;
private BaseRefreshContract.IBaseRefreshPresenter mPresenter;
private FrameLayout refreshContentView;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.yys_frag_com_refresh, null);
refreshView = (SwipeRefreshLayout) view.findViewById(R.id.com_refreshLayout);
refreshView.setOnRefreshListener(this);
refreshContentView = (FrameLayout) view.findViewById(R.id.frame_refresh_content);
refreshContentView.addView(initView(inflater, container, savedInstanceState));
return view;
}
protected abstract View initView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState);
protected abstract void initData(Bundle savedInstanceState);
@Override
public void onActivityCreated(@Nullable Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
initData(savedInstanceState);
}
@CallSuper
@Override
public void setPresenter(T presenter) {
super.setPresenter(presenter);
mPresenter = presenter;
}
@Override
public void onRefresh() {
mPresenter.onRefreshData();
}
@Override
public void showRefreshIndication() {
if (!refreshView.isRefreshing()) {
refreshView.setRefreshing(true);
}
}
@Override
public void hideRefreshInfication() {
if (refreshView.isRefreshing()) {
refreshView.setRefreshing(false);
}
}
}
<file_sep>package net.zsygfddsd.qujing.components.httpLoader;
/**
* Created by mac on 16/7/19.
*/
public interface HttpContract {
WelfareService welfareHttp();
}
<file_sep>package net.zsygfddsd.qujing.common.helpers.http.transformer;
import net.zsygfddsd.qujing.base.common.ComRespInfo;
import net.zsygfddsd.qujing.base.module.network.BaseNetContract;
import rx.Observable;
import rx.Subscriber;
import rx.functions.Action0;
/**
* Created by mac on 16/7/26.
* subscriber订阅subscribe之前和onCompleted()/onError()执行时的操作,
* 比如:
* 1,showLoading(),hideLoading(),etc
*/
public class EmitBeforeAndAfterTransformer<T> implements Observable.Transformer<ComRespInfo<T>, ComRespInfo<T>> {
private BaseNetContract.IBaseNetView netView;
private Subscriber subscriber;
private boolean canShowLoading;
private boolean canLoadCelable;
public EmitBeforeAndAfterTransformer(BaseNetContract.IBaseNetView netView, Subscriber subscriber, boolean canShowLoading, boolean canLoadCelable) {
this.netView = netView;
this.subscriber = subscriber;
this.canShowLoading = canShowLoading;
this.canLoadCelable = canLoadCelable;
}
public EmitBeforeAndAfterTransformer(BaseNetContract.IBaseNetView netView, Subscriber subscriber) {
this.netView = netView;
this.subscriber = subscriber;
this.canShowLoading = true;
this.canLoadCelable = false;
}
@Override
public Observable<ComRespInfo<T>> call(Observable<ComRespInfo<T>> comRespInfoObservable) {
return comRespInfoObservable.doOnSubscribe(new Action0() {
@Override
public void call() {
if (canShowLoading) {
netView.showLoading(canLoadCelable, new BaseNetContract.IBaseNetView.ILoadingCancelListener() {
@Override
public void onLoadCancelListener() {
if (!subscriber.isUnsubscribed()) {
subscriber.unsubscribe();
}
}
});
}
}
}).doOnTerminate(new Action0() {
@Override
public void call() {
if (canShowLoading) {
netView.hideLoading();
}
}
});
}
}
<file_sep>package net.zsygfddsd.qujing.base.adapter;
import android.content.Context;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseAdapter;
import java.util.ArrayList;
import java.util.List;
/***********************************************
* @类名 :抽象方法GeneralListAdapter
* @描述 :一个简化操作的ListView适配器
* @主要参数 :
* @主要接口 :
* @作者 :yanys
* @日期 :2015年7月10日上午10:01:11
* @版本 :1.0
* @备注 :使用时定义一个自己的adapter类继承GeneralListAdapter
***********************************************/
public abstract class GeneralListAdapter<T> extends BaseAdapter {
public Context ct;
public List<T> data;
private int itemLayoutId;
/**
* ListView的通用适配器
*
* @param ct 上下文
* @param data 传入的列表数据封装,eg:List<HashMap<K, V>>;
* @param itemLayoutId 传入的条目Item的布局ID
*/
public GeneralListAdapter (Context ct, List<T> data, int itemLayoutId) {
this.ct = ct;
this.data = data;
this.itemLayoutId = itemLayoutId;
}
public GeneralListAdapter (Context ct, int itemLayoutId) {
this (ct, new ArrayList<T> (), itemLayoutId);
}
@Override
public int getCount () {
if (data == null) {
return 0;
}
return data.size ();
}
@Override
public T getItem (int position) {
if (data.isEmpty ()) {
return null;
}
return data.get (position);
}
@Override
public long getItemId (int position) {
if (data.isEmpty ()) {
return 0;
}
return position;
}
@Override
public View getView (int position, View convertView, ViewGroup parent) {
final ViewHolder mViewHolder = getViewHolder (position, convertView, parent);
SetChildViewData (mViewHolder, data.get (position), position);
return mViewHolder.getView ();
}
protected ViewHolder getViewHolder (int position, View convertView, ViewGroup parent) {
return ViewHolder.Create (ct, convertView, parent, itemLayoutId, position);
}
public void update (List<T> data) {
this.data = data;
notifyDataSetChanged ();
}
/**
* 将数据分发绑定给Item中的各个子View
*
* @param mViewHolder ViewHolder帮助类
* @param itemData 该Item的所有数据
* @param position 该Item的position
*/
public abstract void SetChildViewData (ViewHolder mViewHolder, T itemData, int position);
}
<file_sep>package net.zsygfddsd.qujing.common.utils;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.net.Uri;
import android.telephony.TelephonyManager;
import android.text.TextUtils;
import android.util.Log;
public class DeviceUtils {
private Context context;
public DeviceUtils(Context context) {
this.context = context.getApplicationContext();
}
/**
* 判断能否使用网络
*/
public boolean isHasNetWork() {
ConnectivityManager cm = (ConnectivityManager) context
.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo network = cm.getActiveNetworkInfo();
return network != null && (network.isConnected() || network.isRoaming() || network.isAvailable());
}
public String getDeviceId() {
TelephonyManager tm = (TelephonyManager) context
.getSystemService(Context.TELEPHONY_SERVICE);
return tm.getDeviceId();
}
public void callPhone(Activity activity, String phoneNum) {
if (!TextUtils.isEmpty(phoneNum)) {
Intent call = new Intent(Intent.ACTION_DIAL, Uri.parse("tel:" + phoneNum));
activity.startActivity(call);
} else {
Log.e("DeviceUtils", "电话号码为空");
}
}
}
<file_sep>package net.zsygfddsd.qujing.common.helpers.http.Subscriber;
import android.content.Context;
import android.support.annotation.CallSuper;
import net.zsygfddsd.qujing.base.common.ComRespInfo;
import net.zsygfddsd.qujing.base.module.network.BaseNetContract;
import net.zsygfddsd.qujing.common.utils.DeviceUtils;
import rx.Subscriber;
/**
* Created by mac on 16/7/27.
*/
public abstract class NetAndErrorCheckerSubscriber<T> extends Subscriber<ComRespInfo<T>> {
private Context context;
private BaseNetContract.INetView netView;
public NetAndErrorCheckerSubscriber(Context context, BaseNetContract.INetView netView) {
this.context = context;
this.netView = netView;
}
@Override
public void onStart() {
super.onStart();
if (!new DeviceUtils(context).isHasNetWork()) {
if (!isUnsubscribed()) {
unsubscribe();
}
netView.showNoNetWork();
}
}
@CallSuper
@Override
public void onNext(ComRespInfo<T> tComRespInfo) {
// TODO: 2016/10/17 公共errorCode处理
// if ((tComRespInfo.getResult() != 1) && tComRespInfo.getResultcode() == 99 || tComRespInfo.getResultcode() == 97) {
// netView.showToLoginDialog();
// }
}
}
<file_sep>package net.zsygfddsd.qujing.modules.WelfareList;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v7.app.AppCompatActivity;
import android.widget.FrameLayout;
import net.zsygfddsd.qujing.R;
import net.zsygfddsd.qujing.common.URLs;
import net.zsygfddsd.qujing.common.utils.FragUtils;
import net.zsygfddsd.qujing.components.HttpVolley.RequestInfo;
import butterknife.BindView;
import butterknife.ButterKnife;
public class WelfareListActivity extends AppCompatActivity {
private static String Tag_WelfareListFragment = "WelfareListFragment";
@BindView(R.id.mainFrame)
FrameLayout mainFrame;
private FragUtils fragUtils;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_welfare_list);
ButterKnife.bind(this);
fragUtils = new FragUtils(getSupportFragmentManager(), R.id.mainFrame, new String[]{"WelfareListFragment"}, new FragUtils.IFragmentInitMethods() {
@Override
public Fragment initFrag(String fragTag) {
Fragment tempFrag = null;
if (fragTag == Tag_WelfareListFragment) {
WelfareListFragment welfareListFragment = new WelfareListFragment();
welfareListFragment.init(Tag_WelfareListFragment,new RequestInfo(URLs.GET_LIST_WELFARE),true,false,R.layout.item_welfare);
tempFrag = welfareListFragment;
}
return tempFrag;
}
});
fragUtils.showFragment(Tag_WelfareListFragment);
}
}
<file_sep># android-architectures-demos
a demo to learn the android major architectures
* qujing : Volley
* qujingRetrofit : Retrofit2
* qujingRx : Retrofit 2 + RxJava + RxAndroid + RxLifecycle
* qujingRxReMVP : MVP + Retrofit 2 + RxJava + RxAndroid + RxLifecycle
* qujingRxReMVP+ :一个成熟的经过实际项目考验的mvp构架
* qujingRxReMVPDagger :Rxjava + MVP + Retrofit 2 + Dagger 2
<file_sep>package net.zsygfddsd.qujing.components.httpLoader.transformer;
import net.zsygfddsd.qujing.bean.ComRespInfo;
import rx.Observable;
import rx.android.schedulers.AndroidSchedulers;
import rx.schedulers.Schedulers;
/**
* Created by mac on 16/7/26.
* Schedulers线程调度器的统一处理
*/
public class SchedulerTransformer<T> implements Observable.Transformer<ComRespInfo<T>, ComRespInfo<T>> {
public SchedulerTransformer() {
}
@Override
public Observable<ComRespInfo<T>> call(Observable<ComRespInfo<T>> comRespInfoObservable) {
return comRespInfoObservable
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.unsubscribeOn(Schedulers.io());
}
}
<file_sep>package net.zsygfddsd.qujing.common.widgets;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.Rect;
import android.support.annotation.ColorInt;
import android.support.v7.widget.RecyclerView;
import android.util.TypedValue;
import android.view.View;
/**
* <p/>
* 分割线绘制规则,
* 上下左右都出头,分割线要求完全不透明,不然交叉处会出现重叠
*/
public abstract class DividerGridItemDecoration extends RecyclerView.ItemDecoration {
// private Drawable mDrawable;
private Paint mPaint;
private int lineWidth;//px 分割线宽
/**
* A single color value in the form 0xAARRGGBB.
**/
private int colorRGB;
private boolean isLastItemShowDivider = true;
private boolean isLastItemShowTopDivider = true;
public DividerGridItemDecoration(Context context, int lineWidthDp, @ColorInt int mColorRGB) {
this.colorRGB = mColorRGB;
this.lineWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, lineWidthDp, context.getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setColor(colorRGB);
mPaint.setStyle(Paint.Style.FILL);
}
public DividerGridItemDecoration(Context context, float lineWidthDp, @ColorInt int mColorRGB) {
this.colorRGB = mColorRGB;
this.lineWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, lineWidthDp, context.getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setColor(colorRGB);
mPaint.setStyle(Paint.Style.FILL);
}
@Override
public void onDraw(Canvas c, RecyclerView parent, RecyclerView.State state) {
//上下左右
drawChildTopHorizontal(c, parent);
drawChildBottomHorizontal(c, parent);
drawChildLeftVertical(c, parent);
drawChildRightVertical(c, parent);
}
public void drawChildBottomHorizontal(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int left = child.getLeft() - params.leftMargin - lineWidth;
int right = child.getRight() + params.rightMargin + lineWidth;
int top = child.getBottom() + params.bottomMargin;
int bottom = top + lineWidth;
if ((!isLastItemShowDivider && i == childCount - 1)) {
} else {
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
//配置最后一个item的bottom是否显示分割线
public DividerGridItemDecoration configLastItemShowDivider(boolean isLastItemShowDivider) {
this.isLastItemShowDivider = isLastItemShowDivider;
return this;
}
//配置position item的不显示分割线
public DividerGridItemDecoration configLastItemShowTopDivider(boolean isLastItemShowTopDivider) {
this.isLastItemShowTopDivider = isLastItemShowTopDivider;
return this;
}
public void drawChildTopHorizontal(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int left = child.getLeft() - params.leftMargin - lineWidth;
int right = child.getRight() + params.rightMargin + lineWidth;
int bottom = child.getTop() - params.topMargin;
int top = bottom - lineWidth;
if ((!isLastItemShowTopDivider && i == childCount - 1)) {
} else {
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
public void drawChildLeftVertical(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int top = child.getTop() - params.topMargin - lineWidth;
int bottom = child.getBottom() + params.bottomMargin + lineWidth;
int right = child.getLeft() - params.leftMargin;
int left = right - lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
public void drawChildRightVertical(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int top = child.getTop() - params.topMargin - lineWidth;
int bottom = child.getBottom() + params.bottomMargin + lineWidth;
int left = child.getRight() + params.rightMargin;
int right = left + lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
@Override
public void getItemOffsets(Rect outRect, View view, RecyclerView parent, RecyclerView.State state) {
//outRect 看源码可知这里只是把Rect类型的outRect作为一个封装了left,right,top,bottom的数据结构,
//作为传递left,right,top,bottom的偏移值来用的
int itemPosition = ((RecyclerView.LayoutParams) view.getLayoutParams()).getViewLayoutPosition();
//
boolean[] sideOffsetBooleans = getItemSidesIsHaveOffsets(itemPosition);
int left = sideOffsetBooleans[0] ? lineWidth : 0;
int top = sideOffsetBooleans[1] ? lineWidth : 0;
int right = sideOffsetBooleans[2] ? lineWidth : 0;
int bottom = sideOffsetBooleans[3] ? lineWidth : 0;
outRect.set(left, top, right, bottom);
}
/**
* 顺序:left, top, right, bottom
*
* @return boolean[4]
*/
public abstract boolean[] getItemSidesIsHaveOffsets(int itemPosition);
}
<file_sep>package net.zsygfddsd.qujing.base.module.network_recyclerview;
/**
* Created by mac on 16/7/27.
*/
public interface PageConfig {
int PageSize = 4;
}
<file_sep>package net.zsygfddsd.qujing.base.adapter;
import android.content.Context;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.ViewGroup;
import java.util.HashMap;
import java.util.List;
/**
* Created by mac on 16/1/6.
*/
public abstract class GeneralRecyclerAdapter<T> extends RecyclerView.Adapter<GeneralRecyclerViewHolder> {
protected Context ct;
private int itemLayoutId;
private List<T> itemDatas;
private int headerLayoutId;
private HashMap<String, Object> headerData;
private int footerLayoutId;
private HashMap<String, Object> footerData;
private LayoutInflater mLayoutInflater;
protected int mHeaderCount;//头部View个数
protected int mFooterCount;//底部View个数
public enum ITEM_TYPE {
ITEM_TYPE_HEADER,
ITEM_TYPE_CONTENT,
ITEM_TYPE_BOTTOM
}
// public GeneralRecyclerAdapter(Context ct, int itemLayoutId, List<T> itemDatas, View headerView, HashMap<String, Object> headerData, View footerView, HashMap<String, Object> footerData) {
// this.ct = ct;
// LayoutInflater mLayoutInflater = LayoutInflater.from(ct);
// this.itemView = mLayoutInflater.inflate(itemLayoutId, null);
// this.itemDatas = itemDatas;
// this.headerView = headerView;
// this.headerData = headerData;
// this.footerView = footerView;
// this.footerData = footerData;
// }
public GeneralRecyclerAdapter (Context ct, int itemLayoutId, List<T> itemDatas, int headerLayoutId, HashMap<String, Object> headerData, int footerLayoutId, HashMap<String, Object> footerData) {
this.ct = ct;
this.itemLayoutId = itemLayoutId;
this.itemDatas = itemDatas;
this.headerLayoutId = headerLayoutId;
this.headerData = headerData;
this.footerLayoutId = footerLayoutId;
this.footerData = footerData;
mLayoutInflater = LayoutInflater.from (ct);
}
public GeneralRecyclerAdapter (Context ct, int itemLayoutId, List<T> itemDatas) {
this (ct, itemLayoutId, itemDatas, -1, null, -1, null);
}
@Override
public GeneralRecyclerViewHolder onCreateViewHolder (ViewGroup parent, int viewType) {
if (viewType == ITEM_TYPE.ITEM_TYPE_CONTENT.ordinal ()) {
return new GeneralRecyclerViewHolder (mLayoutInflater.inflate (itemLayoutId, parent, false));
} else if (viewType == ITEM_TYPE.ITEM_TYPE_HEADER.ordinal ()) {
return new GeneralRecyclerViewHolder (mLayoutInflater.inflate (headerLayoutId, parent, false));
} else if (viewType == ITEM_TYPE.ITEM_TYPE_BOTTOM.ordinal ()) {
return new GeneralRecyclerViewHolder (mLayoutInflater.inflate (footerLayoutId, parent, false));
}
return null;
}
@Override
public void onBindViewHolder (GeneralRecyclerViewHolder holder, int position) {
if (holder != null) {
if (getItemViewType (position) == ITEM_TYPE.ITEM_TYPE_CONTENT.ordinal ()) {
T itemData = itemDatas.get (position - getHeadItemCount ());
onBindChildViewData (holder, itemData, position);
} else if (getItemViewType (position) == ITEM_TYPE.ITEM_TYPE_BOTTOM.ordinal ()) {
onBindFootViewData (holder, footerData);
} else if (getItemViewType (position) == ITEM_TYPE.ITEM_TYPE_HEADER.ordinal ()) {
onBindHeadViewData (holder, headerData);
}
}
}
@Override
public int getItemViewType (int position) {
int dataItemCount = getContentItemCount ();
if (mHeaderCount != 0 && position < mHeaderCount) {//头部View
return ITEM_TYPE.ITEM_TYPE_HEADER.ordinal ();
} else if (mFooterCount != 0 && position >= (mHeaderCount + dataItemCount)) {//底部View
return ITEM_TYPE.ITEM_TYPE_BOTTOM.ordinal ();
} else {
return ITEM_TYPE.ITEM_TYPE_CONTENT.ordinal ();
}
}
@Override
public int getItemCount () {
return getHeadItemCount () + getContentItemCount () + getFootItemCount ();
}
public void notifyHeaderDataChanged () {
if (mHeaderCount > 0) {
notifyItemChanged (1);
}
}
public void notifyFooterDataChanged () {
if (mFooterCount > 0) {
notifyItemChanged (getHeadItemCount () + getContentItemCount ());
}
}
//获取中间内容个数
public int getContentItemCount () {
return itemDatas.size ();
}
public int getHeadItemCount () {
if (headerLayoutId == -1) {
mHeaderCount = 0;
} else {
mHeaderCount = 1;
}
return mHeaderCount;
}
public int getFootItemCount () {
if (footerLayoutId == -1) {
mFooterCount = 0;
} else {
mFooterCount = 1;
}
mFooterCount = 1;
return mFooterCount;
}
public abstract void onBindChildViewData (GeneralRecyclerViewHolder holder, T itemData, int position);
public abstract void onBindHeadViewData (GeneralRecyclerViewHolder headViewHolder, HashMap<String, Object> headerData);
public abstract void onBindFootViewData (GeneralRecyclerViewHolder footViewHolder, HashMap<String, Object> footerData);
}
<file_sep>package net.zsygfddsd.qujing.modules.common;
import android.content.Context;
import net.zsygfddsd.qujing.common.helpers.dagger.ActivityScoped;
import net.zsygfddsd.qujing.data.DataSource;
import net.zsygfddsd.qujing.modules.welfarelist.WelfareListContract;
import net.zsygfddsd.qujing.modules.welfarelist.WelfareListPresenter;
import dagger.Module;
import dagger.Provides;
/**
* Created by mac on 2016/10/14.
*/
@Module
public class PresenterModule {
@ActivityScoped
@Provides
WelfareListPresenter provideWelfareListPresenter(Context context, WelfareListContract.View mView, DataSource.WelfareDataSource mRepository) {
return new WelfareListPresenter(context, mView, mRepository);
}
}
<file_sep>package net.zsygfddsd.qujing.modules.welfarelist;
import android.content.Context;
import net.zsygfddsd.qujing.base.common.ComRespInfo;
import net.zsygfddsd.qujing.base.module.network_recyclerview.BasePagePresenter;
import net.zsygfddsd.qujing.data.DataSource;
import net.zsygfddsd.qujing.data.bean.Welfare;
import java.util.List;
import rx.Observable;
/**
* Created by mac on 16/7/24.
*/
public class WelfareListPresenter extends BasePagePresenter<List<Welfare>, Welfare> implements WelfareListContract.Presenter {
private Context _context;
private WelfareListContract.View _view;
private DataSource.WelfareDataSource _repository;
// @Inject
public WelfareListPresenter(Context context, WelfareListContract.View mView, DataSource.WelfareDataSource mRepository) {
super(context, mView);
_context = context;
_view = mView;
_repository = mRepository;
_view.setPresenter(this);
}
@Override
public boolean getIsHasNextFromResponse(List<Welfare> result) {
return true;
}
@Override
public List<Welfare> getListFromResponse(List<Welfare> result) {
return result;
}
@Override
public Observable<ComRespInfo<List<Welfare>>> getRequestObservable(int page, int pageSize) {
return _repository.getWelfareList("福利", pageSize + "", page + "");
}
}
<file_sep>package net.zsygfddsd.qujing.data.repository;
import com.zsygfddsd.spacestation.data.bean.ComRespInfo;
import net.zsygfddsd.qujing.common.helpers.ResponseTransformer;
import net.zsygfddsd.qujing.data.DataSource;
import net.zsygfddsd.qujing.data.bean.Welfare;
import net.zsygfddsd.qujing.data.http.HttpLoader;
import java.util.List;
import rx.Observable;
/**
* Created by mac on 2016/10/11.
*/
public final class WelfareRepository implements DataSource.WelfareDataSource {
// @Inject
public WelfareRepository() {
}
@Override
public Observable<ComRespInfo<List<Welfare>>> getWelfareList(String type, String pageSize, String page) {
return HttpLoader.getInstance().welfareHttp().getWelfareList(type, pageSize, page)
.compose(new ResponseTransformer<List<Welfare>>());
}
}
<file_sep>package com.zsygfddsd.spacestation.common.helpers.http;
import android.content.Context;
import com.trello.rxlifecycle.FragmentEvent;
import com.trello.rxlifecycle.components.support.RxFragment;
import com.zsygfddsd.spacestation.common.helpers.http.transformer.SchedulerTransformer;
import com.zsygfddsd.spacestation.data.bean.ComRespInfo;
import rx.Observable;
/**
* Created by mac on 16/7/26.
*/
public class ObservableFactory {
public static <T> Observable<ComRespInfo<T>> createNetObservable(Context context, Observable<ComRespInfo<T>> observable, RxFragment rxFragment) {
return observable
.compose(new SchedulerTransformer<T>())
.compose(rxFragment.<ComRespInfo<T>>bindUntilEvent(FragmentEvent.DESTROY));
}
}
<file_sep>package net.zsygfddsd.qujing.modules.WelfareDetail;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AppCompatActivity;
import android.widget.TextView;
import net.zsygfddsd.qujing.R;
import butterknife.BindView;
import butterknife.ButterKnife;
import butterknife.OnClick;
/**
* Created by mac on 16/7/20.
*/
public class WelfareDetailActivity extends AppCompatActivity {
@BindView(R.id.textview)
TextView textview;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
ButterKnife.bind(this);
}
@OnClick(R.id.textview)
public void onClick() {
}
}
<file_sep>package net.zsygfddsd.qujing.components.httpLoader.transformer;
import android.content.Context;
import net.zsygfddsd.qujing.bean.ComRespInfo;
import rx.Observable;
import rx.functions.Func1;
/**
* Created by mac on 16/7/26.
* 返回数据的错误预处理
*/
public class ErrorCheckerTransformer<T> implements Observable.Transformer<ComRespInfo<T>, ComRespInfo<T>> {
private Context context;
public ErrorCheckerTransformer(Context context) {
this.context = context;
}
@Override
public Observable<ComRespInfo<T>> call(Observable<ComRespInfo<T>> comRespInfoObservable) {
return comRespInfoObservable.map(new Func1<ComRespInfo<T>, ComRespInfo<T>>() {
@Override
public ComRespInfo<T> call(ComRespInfo<T> tComRespInfo) {
if (tComRespInfo.isError()) {
//// TODO: 16/7/26 mark 在这里做error处理
}
return tComRespInfo;
}
});
}
}
<file_sep>apply plugin: 'com.android.application'
apply plugin: 'com.neenbedankt.android-apt'
android {
compileSdkVersion rootProject.ext.android.compileSdkVersion
buildToolsVersion rootProject.ext.android.buildToolsVersion
defaultConfig {
applicationId "net.zsygfddsd.qujing"
minSdkVersion rootProject.ext.android.minSdkVersion
targetSdkVersion rootProject.ext.android.targetSdkVersion
versionCode 1
versionName "1.0"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
sourceSets {
main {
res.srcDirs = [
'src/main/res/layouts/activity',
'src/main/res/layouts/fragment',
'src/main/res/layouts/view',
'src/main/res/layouts/cell',
'src/main/res/layouts',
'src/main/res'
]
}
}
}
dependencies {
compile fileTree(include: ['*.jar'], dir: 'libs')
testCompile 'junit:junit:4.12'
compile rootProject.ext.dependencies["appcompat-v7"]
//dagger 2
compile rootProject.ext.dependencies["dagger2"]
apt rootProject.ext.dependencies["dagger-compiler"]
//butterknife
compile rootProject.ext.dependencies["butterknife"]
apt rootProject.ext.dependencies["butterknife-compiler"]
//recyclerview
compile rootProject.ext.dependencies["recyclerview-v7"]
//picasso
compile rootProject.ext.dependencies["picasso"]
//Rx
compile rootProject.ext.dependencies["retrofit2"]
compile rootProject.ext.dependencies["retrofit2-converter-gson"]
compile rootProject.ext.dependencies["rxandroid"]
compile rootProject.ext.dependencies["retrofit2-adapter-rxjava"]
compile rootProject.ext.dependencies["rxlifecycle"]
compile rootProject.ext.dependencies["rxlifecycle-components"]
compile rootProject.ext.dependencies["okhttp3-logging-interceptor"]
//eventbus
compile rootProject.ext.dependencies["eventbus3"]
//permissionsDispatcher
compile rootProject.ext.dependencies["permissionsdispatcher"]
apt rootProject.ext.dependencies["permissionsdispatcher-processor"]
//leakcanary
debugCompile rootProject.ext.dependencies["leakcanary"]
releaseCompile rootProject.ext.dependencies["leakcanary-no-op"]
testCompile rootProject.ext.dependencies["leakcanary-no-op"]
//Bugly
// compile 'com.tencent.bugly:crashreport:2.2.2'
// compile 'com.tencent.bugly:nativecrashreport:3.0'
// compile 'com.android.support.constraint:constraint-layout:1.0.0-alpha8'
compile project(':spacestation')
}
<file_sep>package net.zsygfddsd.qujing.base.module.network_recyclerview;
import android.content.Context;
import android.os.Bundle;
import android.support.annotation.CallSuper;
import android.support.annotation.Nullable;
import android.support.v4.widget.SwipeRefreshLayout;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.StaggeredGridLayoutManager;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import net.zsygfddsd.qujing.R;
import net.zsygfddsd.qujing.base.adapter.GeneralRecyclerViewHolder;
import net.zsygfddsd.qujing.base.adapter.multirecycler.ItemEntityList;
import net.zsygfddsd.qujing.base.adapter.multirecycler.MultiRecyclerAdapter;
import net.zsygfddsd.qujing.base.adapter.multirecycler.OnBind;
import net.zsygfddsd.qujing.base.module.network.BaseNetFragment;
import net.zsygfddsd.qujing.common.widgets.DividerGridItemDecoration;
/**
* Created by mac on 15/12/19.
* T: 是IBaseRecyclerViewPresenter
* D: 是item的bean
*/
public abstract class BaseRecyclerViewNetFragment<T extends BasePageContract.IBaseRecyclerViewPresenter> extends BaseNetFragment<T> implements BasePageContract.IBaseRecyclerView<T>, SwipeRefreshLayout.OnRefreshListener {
protected static final String ITEM_LAYOUT_ID = "itemLayoutId";
protected SwipeRefreshLayout refreshView;
protected RecyclerView recyclerView;
protected MultiRecyclerAdapter adapter;
private T mPresenter;
protected ItemEntityList itemEntityList = new ItemEntityList();
protected int itemLayoutId = android.R.layout.simple_list_item_1;// item的布局id,默认是只有一个textview
protected int bottomItemLayoutId = android.R.layout.simple_list_item_1;// item的布局id,默认是只有一个textview
protected boolean hasNextPage = true;//是否还有下一页数据
private boolean canLoadMore = true;
private int loadOffset = 2;//设置滚动到倒数第几个时开始加载下一页,默认是倒数第2个
private LinearLayoutManager layoutManager;
private RecyclerView.ItemDecoration itemDecoration = null;
protected Bundle data2Bundle(int itemLayoutId) {
Bundle bundle = new Bundle();
bundle.putInt(ITEM_LAYOUT_ID, itemLayoutId);
return bundle;
}
protected void init(int itemLayoutId) {
Bundle bundle = data2Bundle(itemLayoutId);
setArguments(bundle);
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
Bundle args = getArguments();
if (args != null) {
this.itemLayoutId = args.getInt(ITEM_LAYOUT_ID) == -1 ? android.R.layout.simple_list_item_1 : args.getInt(ITEM_LAYOUT_ID);
}
this.bottomItemLayoutId = getBottomViewLayoutId();
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
return initView(inflater, container, savedInstanceState);
}
@Override
public void onActivityCreated(@Nullable Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
initData(savedInstanceState);
}
private View initView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.frag_com_recyclerview, null);
refreshView = (SwipeRefreshLayout) view.findViewById(R.id.com_refreshLayout);
recyclerView = (RecyclerView) view.findViewById(R.id.com_recyclerView);
// refreshView.setColorSchemeResources();
// recyclerView.setHasFixedSize(true);//如果item大小不会因为内容变化而变化,则设为true,提高绘制效率
// recyclerView.setLayoutManager(new LinearLayoutManager(ct, LinearLayout.VERTICAL, false));
layoutManager = new LinearLayoutManager(ct);
recyclerView.setLayoutManager(layoutManager);
RecyclerView.ItemDecoration divider = getItemDecoration(ct);
if (divider == null) {
itemDecoration = new DividerGridItemDecoration(ct, 1, 0xffEBEBF1) {
@Override
public boolean[] getItemSidesIsHaveOffsets(int itemPosition) {
boolean[] temp = {false, false, false, true};
return temp;
}
}.configLastItemShowDivider(false);
} else {
itemDecoration = divider;
}
recyclerView.addItemDecoration(itemDecoration);
initRecyclerView(recyclerView);
refreshView.setOnRefreshListener(this);
return view;
}
protected RecyclerView.ItemDecoration getItemDecoration(Context ct) {
return null;
}
public void setRefreshEnable(boolean enable) {
refreshView.setEnabled(enable);
}
private void initData(Bundle savedInstanceState) {
itemEntityList
.addOnBind(itemLayoutId, new OnBind() {
@Override
public void onBindChildViewData(GeneralRecyclerViewHolder holder, Object itemData, int position) {
bindChildViewsData(holder, itemData, position);
}
})
.addOnBind(bottomItemLayoutId, new OnBind() {
@Override
public void onBindChildViewData(GeneralRecyclerViewHolder holder, Object itemData, int position) {
if (hasNextPage) {
holder.setText(R.id.item_bottom_text, "正在加载中...");
} else {
holder.setText(R.id.item_bottom_text, "您已滚动到最底部了");
}
}
});
adapter = new MultiRecyclerAdapter(ct, itemEntityList);
// layoutManager.setSpanSizeLookup(new GridLayoutManager.SpanSizeLookup() {
// @Override
// public int getSpanSize(int position) {
// int viewType = adapter.getItemViewType(position);
// return viewType == itemLayoutId ? 1 : 4;
// }
// });
recyclerView.setAdapter(adapter);
canLoadMore = getCanLoadMore();
onInitData();
if (canLoadMore) {
final RecyclerView.LayoutManager layoutManager = recyclerView.getLayoutManager();
final int[] lastVisibleItemPos = new int[1];
recyclerView.addOnScrollListener(new RecyclerView.OnScrollListener() {
@Override
public void onScrollStateChanged(RecyclerView recyclerView, int newState) {
super.onScrollStateChanged(recyclerView, newState);
}
@Override
public void onScrolled(RecyclerView recyclerView, int dx, int dy) {
super.onScrolled(recyclerView, dx, dy);
if (!(layoutManager instanceof StaggeredGridLayoutManager)) {
lastVisibleItemPos[0] = ((LinearLayoutManager) layoutManager).findLastCompletelyVisibleItemPosition();
}
int totalCount = layoutManager.getItemCount();
if (lastVisibleItemPos[0] == totalCount - 1) {
if (isHasNextPage()) {
//加载下一页
onLoadMore();
} else {
}
}
}
}
);
}
}
@CallSuper
@Override
public void setPresenter(T presenter) {
super.setPresenter(presenter);
mPresenter = presenter;
}
/**
* 设置是否可以加载更多,默认true可以加载,
* 重写它改false,没有加载更多功能
*
* @return
*/
protected boolean getCanLoadMore() {
return true;
}
@Override
public void onRefresh() {
onLoadRefresh();
}
@Override
public void showRefreshIndication() {
if (!refreshView.isRefreshing()) {
refreshView.setRefreshing(true);
}
}
@Override
public void hideRefreshInfication() {
if (refreshView.isRefreshing()) {
refreshView.setRefreshing(false);
}
}
/**
* 若想改变RecyclerView的某些属性,只需重写此方法
*
* @param mRecyclerView 该fragment中默认的RecyclerView
*/
public void initRecyclerView(RecyclerView mRecyclerView) {
}
/**
* 是否还有下一页
*
* @return
*/
public boolean isHasNextPage() {
return hasNextPage;
}
/**
* 设置是否还有下一页
*
* @param hasNext
*/
@Override
public void setHasNextPage(boolean hasNext) {
this.hasNextPage = hasNext;
}
@Override
public void updateData() {
adapter.notifyDataSetChanged();
}
@Override
public ItemEntityList getItemEntityList() {
return itemEntityList;
}
@Override
public int getItemLayoutId() {
return itemLayoutId;
}
@Override
public int getBottomViewLayoutId() {
return R.layout.item_recycler_bottom_view;
}
/**
* 第一页的数据加载
*/
public void onInitData() {
mPresenter.onInitData();
}
/**
* 加载更多
*/
public void onLoadMore() {
mPresenter.onLoadMore();
}
/**
* 下拉刷新
*/
public void onLoadRefresh() {
mPresenter.onLoadRefresh();
}
/**
* 给Item布局的各个控件设置分配好的数据
*
* @param holder item的holder,利用getChildView(eg:控件id)的方法得到该控件
* @param itemData 封装好的分配给该item的数据,数据一般为Hashmap<K,V>或者Modle等类型
* @param position 当前item的position
*/
public abstract void bindChildViewsData(GeneralRecyclerViewHolder holder, Object itemData, int position);
}
<file_sep>package net.zsygfddsd.qujing.base.module.network_recyclerview;
import android.content.Context;
import net.zsygfddsd.qujing.base.module.network.BaseNetPresenter;
import net.zsygfddsd.qujing.bean.ComRespInfo;
import net.zsygfddsd.qujing.components.httpLoader.ObservableFactory;
import net.zsygfddsd.qujing.components.httpLoader.Subscriber.NetCheckerSubscriber;
import net.zsygfddsd.qujing.components.httpLoader.transformer.EmitBeforeAndAfterTransformer;
import java.util.ArrayList;
import java.util.List;
import rx.Observable;
/**
* Created by mac on 16/6/11.
* DATA:表示ComRespInfo<DATA> 中的DATA的bean
* D:表示每一个item的bean
*/
public abstract class BasePagePresenter<DATA, D> extends BaseNetPresenter implements BasePageContract.IBaseRecyclerViewPresenter {
private Context context;
private BasePageContract.IBaseRecyclerView mView;
private int page = 1;
private int pageSize = PageConfig.PageSize;
private volatile boolean isClear = false;//是否清空列表所有数据
private volatile List<D> items = new ArrayList<>();// list中当前最新页的数据
private DefaultLoadingDialogShowConfig defaultLoadingShowConfig;
public BasePagePresenter(Context context, BasePageContract.IBaseRecyclerView mView) {
super(mView);
this.context = context;
this.mView = mView;
}
@Override
public void start() {
super.start();
defaultLoadingShowConfig = getDefaultLoadingShowConfig();
}
public NetCheckerSubscriber getDefaultSubscriber() {
return new NetCheckerSubscriber<ComRespInfo<DATA>>(context) {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(ComRespInfo<DATA> dataComRespInfo) {
if (!dataComRespInfo.isError()) {
boolean hasnext = getIsHasNextFromResponse(dataComRespInfo.getResults());
mView.setHasNextPage(hasnext);
if (hasnext) {
mView.showLoadMoreIndication();
} else {
mView.showScrolledToBottom();
}
items.clear();
items = getListFromResponse(dataComRespInfo.getResults());
if (isClear) {
mView.getItemDatas().clear();
isClear = false;
}
mView.getItemDatas().addAll(items);
mView.updateData();
if (mView.getItemDatas().size() == 0) {
mView.showEmptyPage();
}
} else {
mView.showLoadingError();
}
}
};
}
public DefaultLoadingDialogShowConfig getDefaultLoadingShowConfig() {
return new DefaultLoadingDialogShowConfig(false, false, false);
}
public abstract boolean getIsHasNextFromResponse(DATA result);
public abstract List<D> getListFromResponse(DATA result);
public abstract Observable<ComRespInfo<DATA>> getRequestObservable(int page, int pageSize);
public void loadData(Observable<ComRespInfo<DATA>> observable, boolean canShowLoading, boolean canLoadCelable) {
NetCheckerSubscriber subscriber = getDefaultSubscriber();
ObservableFactory.createNetObservable(context, observable, mView.getRxView())
.compose(new EmitBeforeAndAfterTransformer<DATA>(mView, subscriber, canShowLoading, canLoadCelable))
.subscribe(subscriber);
}
@Override
public void onInitData() {
page = 1;
isClear = true;
loadData(getRequestObservable(page, pageSize), defaultLoadingShowConfig.isInitShow, false);
}
@Override
public void onLoadMore() {
page++;
isClear = false;
loadData(getRequestObservable(page, pageSize), defaultLoadingShowConfig.isLoadMoreShow, false);
}
@Override
public void onLoadRefresh() {
page = 1;
isClear = true;
loadData(getRequestObservable(page, pageSize), defaultLoadingShowConfig.isRefreshShow, false);
}
class DefaultLoadingDialogShowConfig {
boolean isInitShow = false;
boolean isRefreshShow = false;
boolean isLoadMoreShow = false;
public DefaultLoadingDialogShowConfig() {
}
public DefaultLoadingDialogShowConfig(boolean isInitShow, boolean isRefreshShow, boolean isLoadMoreShow) {
this.isInitShow = isInitShow;
this.isRefreshShow = isRefreshShow;
this.isLoadMoreShow = isLoadMoreShow;
}
}
}
<file_sep>package net.zsygfddsd.qujing.base.fragment;
import android.content.Context;
import android.os.Bundle;
import android.os.Handler;
import android.support.v4.widget.SwipeRefreshLayout;
import android.support.v7.widget.GridLayoutManager;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.StaggeredGridLayoutManager;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.LinearLayout;
import android.widget.TextView;
import net.zsygfddsd.qujing.R;
import net.zsygfddsd.qujing.base.adapter.GeneralRecyclerAdapter;
import net.zsygfddsd.qujing.base.adapter.GeneralRecyclerViewHolder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
/**
* Created by mac on 15/12/19.
*/
public abstract class RecyclerViewFragment<T> extends BaseFragment implements SwipeRefreshLayout.OnRefreshListener {
protected static String Tag_footer_text = "footertext";
protected static final String ITEM_LAYOUT_ID = "itemLayoutId";
protected SwipeRefreshLayout refreshView;
protected RecyclerView recyclerView;
protected GeneralRecyclerAdapter adapter;
protected List<T> itemDatas = new ArrayList<>();
protected int itemLayoutId = android.R.layout.simple_list_item_1;// item的布局id,默认是只有一个textview
protected int headViewLayoutId = -1;//header布局id
protected HashMap<String, Object> headerData = new HashMap<>();//headerview的数据
protected HashMap<String, Object> footerData = new HashMap<>();//footerview的数据
protected boolean hasNextPage = true;//是否还有下一页数据
private boolean canLoadMore = true;
private int loadOffset = 2;//设置滚动到倒数第几个时开始加载下一页,默认是倒数第2个
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
}
@Override
public View initView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.frag_com_recyclerview, null);
refreshView = (SwipeRefreshLayout) view.findViewById(R.id.com_refreshLayout);
recyclerView = (RecyclerView) view.findViewById(R.id.com_recyclerView);
// refreshView.setColorSchemeResources();
// recyclerView.setHasFixedSize(true);//如果item大小不会因为内容变化而变化,则设为true,提高绘制效率
recyclerView.setLayoutManager(new LinearLayoutManager(ct, LinearLayout.VERTICAL, false));
initRecyclerView(recyclerView);
refreshView.setOnRefreshListener(this);
return view;
}
// public void setLoadOffset(int mLoadOffset) {
// this.loadOffset = mLoadOffset;
// }
@Override
public void initData(Bundle savedInstanceState) {
adapter = initAdapter(ct, itemDatas, itemLayoutId);
if (adapter == null) {
iHeaderViewSetting = initHeadViewSetting();
if (iHeaderViewSetting != null) {
headViewLayoutId = iHeaderViewSetting.getHeadViewLayoutId();
headerData = iHeaderViewSetting.getHeadViewData();
} else {
headViewLayoutId = -1;
headerData = null;
}
footerData.put(Tag_footer_text, "正在加载中...");
adapter = new RecyclerViewAdapter(ct, itemLayoutId, itemDatas, headViewLayoutId, headerData, R.layout.item_recycler_bottom_view, footerData);
}
recyclerView.setAdapter(adapter);
onInitData();
canLoadMore = getCanLoadMore();
if (canLoadMore) {
final RecyclerView.LayoutManager layoutManager = recyclerView.getLayoutManager();
final int[] lastVisibleItemPos = new int[1];
recyclerView.addOnScrollListener(new RecyclerView.OnScrollListener() {
@Override
public void onScrollStateChanged(RecyclerView recyclerView, int newState) {
super.onScrollStateChanged(recyclerView, newState);
Log.e("scroll", "Scroll State--------" + newState);
// if (!(layoutManager instanceof StaggeredGridLayoutManager)) {
// lastVisibleItemPos[0] = ((LinearLayoutManager) layoutManager).findLastCompletelyVisibleItemPosition();
// }
// int totalCount = layoutManager.getItemCount();
// if (newState == RecyclerView.SCROLL_STATE_IDLE && lastVisibleItemPos[0] == totalCount - 1) {
// if (isHasNextPage()) {
// footerData.put(Tag_footer_text, "正在加载中...");
// adapter.notifyFooterDataChanged();
// onLoadMore();
// } else {
// footerData.put(Tag_footer_text, "您已滚动到最底部了");
// adapter.notifyFooterDataChanged();
// }
// }
}
@Override
public void onScrolled(RecyclerView recyclerView, int dx, int dy) {
super.onScrolled(recyclerView, dx, dy);
Log.e("scroll", "Scroll vertically--------" + dy);
if (!(layoutManager instanceof StaggeredGridLayoutManager)) {
lastVisibleItemPos[0] = ((LinearLayoutManager) layoutManager).findLastCompletelyVisibleItemPosition();
}
int totalCount = layoutManager.getItemCount();
if (lastVisibleItemPos[0] == totalCount - 1) {
if (isHasNextPage()) {
// footerData.put(Tag_footer_text, "正在加载中...");
// adapter.notifyFooterDataChanged();
// onLoadMore();
updateFooterHandler.postDelayed(new Runnable() {
@Override
public void run() {
footerData.put(Tag_footer_text, "正在加载中...");
adapter.notifyFooterDataChanged();
onLoadMore();
}
},0);
} else {
updateFooterHandler.postDelayed(new Runnable() {
@Override
public void run() {
footerData.put(Tag_footer_text, "您已滚动到最底部了");
adapter.notifyFooterDataChanged();
}
},0);
}
}
}
}
);
} else {
updateFooterHandler.postDelayed(new Runnable() {
@Override
public void run() {
footerData.put(Tag_footer_text, "您已滚动到最底部了");
adapter.notifyFooterDataChanged();
}
},0);
}
}
Handler updateFooterHandler = new Handler();
protected void changeFooterText(boolean isToBottom){
if (isToBottom) {
footerData.put(Tag_footer_text, "您已滚动到最底部了");
adapter.notifyFooterDataChanged();
}else{
footerData.put(Tag_footer_text, "正在加载中...");
adapter.notifyFooterDataChanged();
}
} /*******
* headerview -start
**************/
public interface IHeaderViewSetting {
int getHeadViewLayoutId();
HashMap<String, Object> getHeadViewData();
void onBindHeadViewData(GeneralRecyclerViewHolder headViewHolder, HashMap<String, Object> headerData);
}
private IHeaderViewSetting iHeaderViewSetting = null;
/**
* 若要添加一个headerview则重写此方法
*/
public IHeaderViewSetting initHeadViewSetting() {
return null;
}
/*******
* headerview -end
**************/
/*******
* footerview -start
**************/
// TODO: 16/3/3
/*******
* footerview -end
**************/
/**
* 设置是否可以加载更多,默认true可以加载,
* 重写它改false,没有加载更多功能
*
* @return
*/
protected boolean getCanLoadMore() {
return true;
}
@Override
public void onRefresh() {
onLoadRefresh();
if (canLoadMore) {
footerData.put(Tag_footer_text, "正在加载中...");
adapter.notifyFooterDataChanged();
} else {
footerData.put(Tag_footer_text, "您已滚动到最底部了");
adapter.notifyFooterDataChanged();
}
}
/**
* 若想改变RecyclerView的某些属性,只需重写此方法
*
* @param mRecyclerView 该fragment中默认的RecyclerView
*/
public void initRecyclerView(RecyclerView mRecyclerView) {
}
/**
* 得到当前列表中数据的数目
*
* @return
*/
public int getItemDatasCount() {
return itemDatas.size();
}
/**
* 若想改变listview的adapter,只需重写此方法
*
* @param ct
* @param data
* @param itemLayoutId
* @return BaseAdapter
*/
public GeneralRecyclerAdapter initAdapter(Context ct, List<T> data, int itemLayoutId) {
return null;
}
/**
* 模拟手下拉,自动刷新
* 要在oncreate方法中调用必须开一个子线程
*/
public void performAutoRefresh() {
refreshView.setRefreshing(true);
onRefresh();
}
/*
* 列表上的操作-》更新界面列表
*/
public void updateData() {
adapter.notifyDataSetChanged();
}
public void updateData(List<T> datas) {
itemDatas = datas;
adapter.notifyDataSetChanged();
}
public void updateData(int position) {
adapter.notifyItemChanged(position);
}
/**
* 是否还有下一页
*
* @return
*/
public boolean isHasNextPage() {
return hasNextPage;
}
/**
* 设置是否还有下一页
*
* @param hasNextPage
*/
public void setHasNextPage(boolean hasNextPage) {
this.hasNextPage = hasNextPage;
}
public void completeRefreshing() {
refreshView.setRefreshing(false);
}
public boolean isRefreshing(){
return refreshView.isRefreshing();
}
private class RecyclerViewAdapter extends GeneralRecyclerAdapter<T> {
public RecyclerViewAdapter(Context ct, int itemLayoutId, List<T> itemDatas, int headerLayoutId, HashMap<String, Object> headerData, int footerLayoutId, HashMap<String, Object> footerData) {
super(ct, itemLayoutId, itemDatas, headerLayoutId, headerData, footerLayoutId, footerData);
// this.imageOptions = initImageOptions();
}
// public ImageOptions initImageOptions() {
// return new ImageOptions.Builder()
// // .setSize(DensityUtil.dip2px(120), DensityUtil.dip2px(120))
// // .setRadius(DensityUtil.dip2px(5))
// .setCrop(true)
// // 加载中或错误图片的ScaleType
// //.setPlaceholderScaleType(ImageView.ScaleType.MATRIX)
// .setImageScaleType(ImageView.ScaleType.CENTER_CROP)
// .setLoadingDrawableId(R.mipmap.ic_launcher)
// .setFailureDrawableId(R.mipmap.ic_launcher)
// .build();
// }
@Override
public void onAttachedToRecyclerView(RecyclerView recyclerView) {
super.onAttachedToRecyclerView(recyclerView);
RecyclerView.LayoutManager manager = recyclerView.getLayoutManager();
if (manager instanceof GridLayoutManager) {
final GridLayoutManager gridManager = ((GridLayoutManager) manager);
gridManager.setSpanSizeLookup(new GridLayoutManager.SpanSizeLookup() {
@Override
public int getSpanSize(int position) {
if (getItemViewType(position) == ITEM_TYPE.ITEM_TYPE_HEADER.ordinal()) {
return gridManager.getSpanCount();
} else if (getItemViewType(position) == ITEM_TYPE.ITEM_TYPE_BOTTOM.ordinal()) {
if ((getContentItemCount() % gridManager.getSpanCount()) == 0) {
return gridManager.getSpanCount();
} else {
return 1;
}
} else {
return 1;
}
}
});
}
}
@Override
public void onBindChildViewData(GeneralRecyclerViewHolder holder, final T itemData, final int position) {
holder.itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
OnItemClicked(itemData, position);
}
});
bindChildViewsData(holder, itemData, position/*, imageOptions*/);
}
@Override
public void onBindHeadViewData(GeneralRecyclerViewHolder headViewHolder, HashMap<String, Object> headerData) {
if (iHeaderViewSetting != null) {
iHeaderViewSetting.onBindHeadViewData(headViewHolder, headerData);
}
}
@Override
public void onBindFootViewData(GeneralRecyclerViewHolder footViewHolder, HashMap<String, Object> footerData) {
TextView textview = footViewHolder.getChildView(R.id.item_bottom_text);
if (itemDatas.size() > 13) {
String text = (String) footerData.get(Tag_footer_text);
textview.setText(text);
} else {
textview.setText("");
}
}
}
/**
* 第一页的数据加载
*/
public abstract void onInitData();
/**
* 加载更多
*/
public abstract void onLoadMore();
/**
* 下拉刷新
*/
public abstract void onLoadRefresh();
/**
* 给Item布局的各个控件设置分配好的数据
*
* @param mViewHolder item的holder,利用getChildView(eg:控件id)的方法得到该控件
* @param itemData 封装好的分配给该item的数据,数据一般为Hashmap<K,V>或者Modle等类型
* @param position 当前item的position
* // * @param imageOptions 定义好的xUtils中的图片加载工具的配置
*/
public abstract void bindChildViewsData(GeneralRecyclerViewHolder mViewHolder, T itemData,
int position/*, ImageOptions imageOptions*/);
/*
* item的点击事件
* @param parent
* @param itemDatas
* @param view
* @param position
* @param id
*/
public abstract void OnItemClicked(T itemData, int position);
}
<file_sep>package net.zsygfddsd.qujing.base.module.network_refresh;
import net.zsygfddsd.qujing.base.common.ComRespInfo;
import net.zsygfddsd.qujing.base.module.base.BaseContract;
import net.zsygfddsd.qujing.base.module.network.BaseNetContract;
/**
* Created by mac on 16/6/11.
*/
public class BaseRefreshContract {
public interface IBaseRefreshView<T extends IBaseRefreshPresenter, DATA> extends BaseNetContract.IBaseNetView<T> {
void onBindViewData(ComRespInfo<DATA> dataComRespInfo);
void showRefreshIndication();
void hideRefreshInfication();
}
public interface IBaseRefreshPresenter extends BaseContract.IBasePresenter {
void onRefreshData();
}
}
<file_sep>package net.zsygfddsd.qujing.data;
import com.zsygfddsd.spacestation.data.bean.ComRespInfo;
import net.zsygfddsd.qujing.data.bean.Welfare;
import java.util.List;
import rx.Observable;
/**
* Created by mac on 2016/10/11.
*/
public class DataSource {
public interface WelfareDataSource {
Observable<ComRespInfo<List<Welfare>>> getWelfareList(String type, String pageSize, String page);
}
}
<file_sep>package com.zsygfddsd.spacestation.base.activity;
import android.os.Bundle;
import android.support.annotation.LayoutRes;
import android.support.v7.widget.Toolbar;
import android.view.View;
import android.view.Window;
import android.widget.FrameLayout;
import android.widget.TextView;
import com.zsygfddsd.spacestation.R;
/**
* 使用原生toolbar的兼容
* Created by Clock on 2016/2/3.
*/
public abstract class BaseToolBarActivity extends BaseActivity {
protected Toolbar mToolbar;
protected TextView mToolbarTitle;
protected FrameLayout mContentView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
supportRequestWindowFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.yys_activity_main);
mToolbar = (Toolbar) findViewById(R.id.toolbar);
mToolbarTitle = (TextView) findViewById(R.id.toolbar_title);
mContentView = (FrameLayout) findViewById(R.id.contentView);
setSupportActionBar(mToolbar);
getSupportActionBar().setDisplayShowTitleEnabled(false);
mToolbar.setTitleTextColor(0xffffffff);//白色
}
public void setToolBarTitle(String title) {
mToolbarTitle.setText(title);
}
public Toolbar getToolbar() {
return mToolbar;
}
public TextView getToolbarTitleTextView() {
return mToolbarTitle;
}
public void addViewToContent(@LayoutRes int layoutId) {
mContentView.addView(View.inflate(this, layoutId, null));
}
}
<file_sep>package net.zsygfddsd.qujing.common.utils;
import android.app.Activity;
import android.app.ActivityManager;
import android.app.ActivityManager.RunningTaskInfo;
import android.app.Application;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageInfo;
import android.content.pm.PackageManager;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.net.Uri;
import android.telephony.TelephonyManager;
import android.text.TextUtils;
import android.util.Log;
import net.zsygfddsd.qujing.QJapplication;
import java.util.ArrayList;
import java.util.List;
public class DeviceUtils {
private static final String mDataBaseName = "QJ";
private static Application application = QJapplication.getInstance();
private static Context context = application.getBaseContext ();
/**
* 判断能否使用网络
*/
public static boolean isHasNetWork () {
ConnectivityManager cm = (ConnectivityManager) context
.getSystemService (Context.CONNECTIVITY_SERVICE);
NetworkInfo network = cm.getActiveNetworkInfo ();
return network != null && (network.isConnected () || network.isRoaming () || network.isAvailable ());
}
public static String getDeviceId () {
TelephonyManager tm = (TelephonyManager) context
.getSystemService (Context.TELEPHONY_SERVICE);
return tm.getDeviceId ();
}
/**
* 获取程序是否在后台运行
*
* @param mContext
* @return
*/
public static boolean isRunBackground (Context mContext) {
ActivityManager activityManager = (ActivityManager) mContext
.getSystemService (Context.ACTIVITY_SERVICE);
List<RunningTaskInfo> tasksInfo = activityManager.getRunningTasks (1);
if (tasksInfo.size () > 0) {
// 应用程序位于堆栈的顶层
if (mContext.getPackageName ().equals (
tasksInfo.get (0).topActivity.getPackageName ())) {
return false;
}
}
return true;
}
/**
* 判断手机内是否安装某个包的应用
*
* @param packageName 应用包名
* @return 安装则为true,没有安装则为false
*/
public static boolean isInstalled (String packageName) {
PackageManager packageManager = application.getPackageManager ();
List<PackageInfo> pinfo = packageManager.getInstalledPackages (0);
List<String> pName = new ArrayList<> ();// 用于存储所有已安装程序的包名
// 从pinfo中将包名字逐一取出,压入pName list中
if (pinfo != null) {
for (PackageInfo aPinfo : pinfo) {
String pn = aPinfo.packageName;
pName.add (pn);
}
}
return pName.contains (packageName);// 判断pName中是否有目标程序的包名,有TRUE,没有FALSE
}
public static String getDataBaseName () {
return mDataBaseName;
}
public static void callPhone (Activity activity, String phoneNum) {
if (!TextUtils.isEmpty (phoneNum)) {
Intent call = new Intent (Intent.ACTION_DIAL, Uri.parse ("tel:" + phoneNum));
activity.startActivity (call);
} else {
Log.i("DeviceUtils","电话号码为空");
}
}
}
<file_sep>package com.zsygfddsd.spacestation.base.module.network;
import com.zsygfddsd.spacestation.base.module.base.BaseContract;
/**
* Created by mac on 16/6/13.
*/
public class BaseNetPresenter implements BaseContract.IBasePresenter {
private BaseNetContract.IBaseNetView mView;
public BaseNetPresenter(BaseNetContract.IBaseNetView mView) {
this.mView = mView;
}
@Override
public void start() {
}
@Override
public void destroy() {
}
}
<file_sep>ext {
android = [compileSdkVersion: 24,
buildToolsVersion: "23.0.3",
minSdkVersion : 15,
targetSdkVersion : 23,
]
dependencies = [
//common
"support-v4" : 'com.android.support:support-v4:23.1.1',
"appcompat-v7" : 'com.android.support:appcompat-v7:23.1.1',
"design" : 'com.android.support:design:23.1.1',
"butterknife" : 'com.jakewharton:butterknife:8.2.1',
"butterknife-compiler" : 'com.jakewharton:butterknife-compiler:8.2.1',
"dagger2" : 'com.google.dagger:dagger:2.7',
"dagger-compiler" : 'com.google.dagger:dagger-compiler:2.7',
"eventbus3" : 'org.greenrobot:eventbus:3.0.0',
"picasso" : 'com.squareup.picasso:picasso:2.5.2',
//analysis
//leakcanary
"leakcanary" : 'com.squareup.leakcanary:leakcanary-android:1.4-beta2',
"leakcanary-no-op" : 'com.squareup.leakcanary:leakcanary-android-no-op:1.4-beta2',
//permissionsDispatcher
"permissionsdispatcher" : 'com.github.hotchemi:permissionsdispatcher:2.1.3',
"permissionsdispatcher-processor": 'com.github.hotchemi:permissionsdispatcher-processor:2.1.3',
//Rx+Retrofit2+okhttp
"retrofit2" : 'com.squareup.retrofit2:retrofit:2.1.0',
"retrofit2-converter-gson" : 'com.squareup.retrofit2:converter-gson:2.1.0',
"rxandroid" : 'io.reactivex:rxandroid:1.2.1',
"retrofit2-adapter-rxjava" : 'com.squareup.retrofit2:adapter-rxjava:2.1.0',
"rxlifecycle" : 'com.trello:rxlifecycle:0.6.1',
"rxlifecycle-components" : 'com.trello:rxlifecycle-components:0.6.1',
"okhttp3-logging-interceptor" : 'com.squareup.okhttp3:logging-interceptor:3.4.1',
//widget
"cardview-v7" : 'com.android.support:cardview-v7:23.1.1',
"recyclerview-v7" : 'com.android.support:recyclerview-v7:24.2.1',
//utils
]
}<file_sep>package net.zsygfddsd.qujing.components.HttpVolley;
import com.android.volley.VolleyError;
/**
* Created by mac on 16/3/3.
*/
public class VolleyResponse {
//string请求
public interface strReqCallback{
void success(String response);
void error(VolleyError error);
}
//jsonObject请求
//jsonArray请求
//...
}
<file_sep>package net.zsygfddsd.qujing.modules.WelfareList;
import android.graphics.Bitmap;
import android.text.TextUtils;
import android.util.Log;
import android.widget.ImageView;
import android.widget.TextView;
import com.alibaba.fastjson.JSON;
import com.squareup.picasso.Picasso;
import com.squareup.picasso.Transformation;
import net.zsygfddsd.qujing.R;
import net.zsygfddsd.qujing.base.adapter.GeneralRecyclerViewHolder;
import net.zsygfddsd.qujing.base.fragment.BaseRecyclerViewFragment;
import net.zsygfddsd.qujing.bean.PageModel;
import net.zsygfddsd.qujing.bean.Welfare;
import java.util.List;
/**
* Created by mac on 16/5/12.
*/
public class WelfareListFragment extends BaseRecyclerViewFragment<Welfare> {
@Override
public List<Welfare> handleData(PageModel pageModel) {
List<Welfare> welfareList = JSON.parseArray(pageModel.getList(), Welfare.class);
return welfareList;
}
@Override
public void bindChildViewsData(GeneralRecyclerViewHolder mViewHolder, Welfare itemData, int position) {
final ImageView welfareImg = mViewHolder.getChildView(R.id.iv_welfare);
TextView welfareDec = mViewHolder.getChildView(R.id.tv_welfare_dec);
Transformation transformation = new Transformation() {
@Override
public Bitmap transform(Bitmap source) {
int targetWidth = welfareImg.getWidth();
Log.i("welfareImg","source.getHeight()="+source.getHeight()+",source.getWidth()="+source.getWidth()+",targetWidth="+targetWidth);
if(source.getWidth()==0){
return source;
}
//如果图片小于设置的宽度,则返回原图
if(source.getWidth()<targetWidth){
return source;
}else{
//如果图片大小大于等于设置的宽度,则按照设置的宽度比例来缩放
double aspectRatio = (double) source.getHeight() / (double) source.getWidth();
int targetHeight = (int) (targetWidth * aspectRatio);
if (targetHeight != 0 && targetWidth != 0) {
Bitmap result = Bitmap.createScaledBitmap(source, targetWidth, targetHeight, false);
if (result != source) {
// Same bitmap is returned if sizes are the same
source.recycle();
}
return result;
} else {
return source;
}
}
}
@Override
public String key() {
return "transformation" + " desiredWidth";
}
};
if (!TextUtils.isEmpty(itemData.getUrl())) {
Picasso.with(ct).load(itemData.getUrl()).transform(transformation)/*.resize(ScreenUtils.getScreenWidth(ct), DensityUtils.dp2px(ct, 200f))*/.into(welfareImg);
}
welfareDec.setText(itemData.getDesc());
}
@Override
public void OnItemClicked(Welfare itemData, int position) {
}
}
<file_sep>package net.zsygfddsd.qujing.base.module.network_refresh;
import android.content.Context;
import net.zsygfddsd.qujing.base.module.network.BaseNetPresenter;
import net.zsygfddsd.qujing.data.bean.ComRespInfo;
import net.zsygfddsd.qujing.common.helpers.ObservableFactory;
import net.zsygfddsd.qujing.common.helpers.Subscriber.NetCheckerSubscriber;
import rx.Observable;
/**
* Created by mac on 16/6/11.
* DATA:表示ComRespInfo<DATA> 中的DATA的bean
* D:表示每一个item的bean
*/
public abstract class BaseRefreshPresenter<DATA> extends BaseNetPresenter implements BaseRefreshContract.IBaseRefreshPresenter {
private Context context;
private BaseRefreshContract.IBaseRefreshView mView;
public BaseRefreshPresenter(Context context, BaseRefreshContract.IBaseRefreshView mView) {
super(mView);
this.context = context;
this.mView = mView;
}
@Override
public void start() {
super.start();
}
public NetCheckerSubscriber getDefaultSubscriber() {
return new NetCheckerSubscriber<DATA>(context, mView) {
@Override
public void onCompleted() {
mView.hideRefreshInfication();
}
@Override
public void onError(Throwable e) {
mView.showLoadingError();
mView.hideRefreshInfication();
}
@Override
public void onNext(ComRespInfo<DATA> dataComRespInfo) {
super.onNext(dataComRespInfo);
if (!dataComRespInfo.isError()) {
mView.onBindViewData(dataComRespInfo);
} else {
mView.showToast("刷新失败!");
}
}
};
}
public abstract Observable<ComRespInfo<DATA>> getRequestObservable();
public void loadData(Observable<ComRespInfo<DATA>> observable) {
NetCheckerSubscriber subscriber = getDefaultSubscriber();
ObservableFactory.createNetObservable(context, observable, mView.getRxView())
.subscribe(subscriber);
}
@Override
public void onRefreshData() {
loadData(getRequestObservable());
}
}
<file_sep>package net.zsygfddsd.qujing.common.widgets;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.Rect;
import android.support.annotation.ColorInt;
import android.support.v7.widget.RecyclerView;
import android.util.SparseArray;
import android.util.TypedValue;
import android.view.View;
/**
* <p/>
* 分割线绘制规则,
* 上下左右都出头,分割线要求完全不透明,不然交叉处会出现重叠
*/
public class DividerItemDecoration extends RecyclerView.ItemDecoration {
// private Drawable mDrawable;
private Paint mPaint;
private int lineWidth;//px 分割线宽
/**
* A single color value in the form 0xAARRGGBB.
**/
private int colorRGB;
private SparseArray<Boolean[]> isDrawDivider = new SparseArray<>();
// private List<Boolean[]> isDrawDivider = new ArrayList<>();
public DividerItemDecoration(Context context, int lineWidthDp, @ColorInt int mColorRGB) {
this.colorRGB = mColorRGB;
this.lineWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, lineWidthDp, context.getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setColor(colorRGB);
mPaint.setStyle(Paint.Style.FILL);
}
public DividerItemDecoration(Context context, float lineWidthDp, @ColorInt int mColorRGB) {
this.colorRGB = mColorRGB;
this.lineWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, lineWidthDp, context.getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setColor(colorRGB);
mPaint.setStyle(Paint.Style.FILL);
}
@Override
public void onDraw(Canvas c, RecyclerView parent, RecyclerView.State state) {
//上下左右
drawChildTopHorizontal(c, parent);
drawChildBottomHorizontal(c, parent);
drawChildLeftVertical(c, parent);
drawChildRightVertical(c, parent);
}
public void drawChildBottomHorizontal(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
if (isDrawDivider.get(i, new Boolean[]{false, false, false, false})[3]) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int left = child.getLeft() - params.leftMargin - lineWidth;
int right = child.getRight() + params.rightMargin + lineWidth;
int top = child.getBottom() + params.bottomMargin;
int bottom = top + lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
public void drawChildTopHorizontal(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
if (isDrawDivider.get(i, new Boolean[]{false, false, false, false})[1]) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int left = child.getLeft() - params.leftMargin - lineWidth;
int right = child.getRight() + params.rightMargin + lineWidth;
int bottom = child.getTop() - params.topMargin;
int top = bottom - lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
public void drawChildLeftVertical(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
if (isDrawDivider.get(i, new Boolean[]{false, false, false, false})[0]) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int top = child.getTop() - params.topMargin - lineWidth;
int bottom = child.getBottom() + params.bottomMargin + lineWidth;
int right = child.getLeft() - params.leftMargin;
int left = right - lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
public void drawChildRightVertical(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
if (isDrawDivider.get(i, new Boolean[]{false, false, false, false})[2]) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int top = child.getTop() - params.topMargin - lineWidth;
int bottom = child.getBottom() + params.bottomMargin + lineWidth;
int left = child.getRight() + params.rightMargin;
int right = left + lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
@Override
public void getItemOffsets(Rect outRect, View view, RecyclerView parent, RecyclerView.State state) {
//outRect 看源码可知这里只是把Rect类型的outRect作为一个封装了left,right,top,bottom的数据结构,
//作为传递left,right,top,bottom的偏移值来用的
int itemPosition = ((RecyclerView.LayoutParams) view.getLayoutParams()).getViewLayoutPosition();
//
Boolean[] sideOffsetBooleans = isDrawDivider.get(itemPosition, new Boolean[]{false, false, false, false});
int left = sideOffsetBooleans[0] ? lineWidth : 0;
int top = sideOffsetBooleans[1] ? lineWidth : 0;
int right = sideOffsetBooleans[2] ? lineWidth : 0;
int bottom = sideOffsetBooleans[3] ? lineWidth : 0;
outRect.set(left, top, right, bottom);
}
}
<file_sep>apply plugin: 'com.android.library'
apply plugin: 'com.neenbedankt.android-apt'
android {
compileSdkVersion rootProject.ext.android.compileSdkVersion
buildToolsVersion rootProject.ext.android.buildToolsVersion
defaultConfig {
minSdkVersion rootProject.ext.android.minSdkVersion
targetSdkVersion rootProject.ext.android.targetSdkVersion
versionCode 1
versionName "1.0"
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
}
sourceSets {
main {
res.srcDirs = [
'src/main/res/layouts/activity',
'src/main/res/layouts/fragment',
'src/main/res/layouts/view',
'src/main/res/layouts/cell',
'src/main/res/layouts',
'src/main/res'
]
}
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
}
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', {
exclude group: 'com.android.support', module: 'support-annotations'
})
testCompile 'junit:junit:4.12'
compile rootProject.ext.dependencies["appcompat-v7"]
//Rx
compile rootProject.ext.dependencies["retrofit2"]
compile rootProject.ext.dependencies["retrofit2-converter-gson"]
compile rootProject.ext.dependencies["rxandroid"]
compile rootProject.ext.dependencies["retrofit2-adapter-rxjava"]
compile rootProject.ext.dependencies["rxlifecycle"]
compile rootProject.ext.dependencies["rxlifecycle-components"]
compile rootProject.ext.dependencies["okhttp3-logging-interceptor"]
//recyclerview
compile rootProject.ext.dependencies["recyclerview-v7"]
//dagger 2
compile rootProject.ext.dependencies["dagger2"]
apt rootProject.ext.dependencies["dagger-compiler"]
//butterknife
compile rootProject.ext.dependencies["butterknife"]
apt rootProject.ext.dependencies["butterknife-compiler"]
}
| 68cf8d0d1ff1953464193acf04a991a2739dc706 | [
"Markdown",
"Java",
"Gradle"
] | 42 | Java | yanyusong/android-architectures-demos | 4239333bf8a164a2e135e263e79ec6515db2f993 | 80ed6b34346a877ed17a2533c6c4963ebe3e2643 |
refs/heads/master | <file_sep>from goripser import *
from plot import *
from scipy.integrate import simps
from multiprocessing import Pool, Manager, Lock
import os
def predict_avg_experi(dir,limit):
sgf_files = []
for files,_ in zip(os.listdir(dir), range(limit)):
if files.endswith(".sgf"):
sgf_files.append(os.path.join(dir,files))
man = Manager()
correct_pred = man.Value('i',0)
l = man.Lock()
pool = Pool(processes=6)
for game_file_paths in sgf_files:
pool.apply_async(predict_worker,args=(game_file_paths,correct_pred,l,))
pool.close()
pool.join()
print("Success Rate For" + dir + ": " + str(correct_pred.value/len(sgf_files)) + "\n")
def predict_worker(game_file_paths,counter,lock):
proc = SGFProcessor(game_file_paths)
x = np.arange(proc.num_of_moves())
black_conn = []
white_conn = []
for _, dgms in proc.filter_game(0,400):
black_h1, white_h1 = dgms[0][1], dgms[1][1]
black_mean, white_mean = np.mean(black_h1,axis=0), np.mean(white_h1,axis=0)
black_conn.append(black_mean[0])
white_conn.append(white_mean[0])
score = simps(np.asarray(black_conn) - np.asarray(white_conn),x)
lock.acquire()
print("Score for " + game_file_paths + " : " + str(score) + " Winner: " + proc.winner_name + " \n")
lock.release()
winner = 'b' if (score < 0) else 'w'
if winner == proc.winner:
counter.value += 1
def run_wass_routine(dir):
sgf_files = []
for files in os.listdir(dir):
if files.endswith(".sgf"):
sgf_files.append(os.path.join(dir,files))
for game_file_paths in sgf_files:
Analytics(game_file_paths).game_wdist()
def worker_conn(game_file_paths):
Analytics(game_file_paths).game_avg_conn()
def run_conn_routine(dir):
sgf_files = []
for files in os.listdir(dir):
if files.endswith(".sgf"):
sgf_files.append(os.path.join(dir,files))
#pool = Pool(processes=6)
for game_file_paths in sgf_files:
worker_conn(game_file_paths)
#pool.close()
#pool.join()
def test_anim_routine(dir):
"""
sgf_files = []
for files in os.listdir(dir):
if files.endswith(".sgf"):
sgf_files.append(os.path.join(dir,files))
for game_file_paths in sgf_files:
"""
GameAnimator(dir).animate()
def test_save_routine(dir):
sgf_files = []
for files in os.listdir(dir):
if files.endswith(".sgf"):
sgf_files.append(os.path.join(dir,files))
for game_file_paths in sgf_files:
SaveGameProg(game_file_paths).plot()
def test_score_routine(dir):
sgf_files = []
for files in os.listdir(dir):
if files.endswith(".sgf"):
sgf_files.append(os.path.join(dir,files))
for game_file_paths in sgf_files:
Analytics(game_file_paths).game_scoring()
def test_scroll_routine(file):
GameScroll(file).scroll()
<file_sep># TDAGo
Rudimentary analysis of progressions of Go games using Persistence Homology.
<file_sep>import os
import matplotlib.pyplot as plt
import matplotlib.animation as animate
from matplotlib.widgets import Slider, TextBox
import numpy as np
from routines import *
from multiprocessing import Pool
from copy import copy
class PlotFSHandler: #Handles all parsing and file handling for plot printing
def __init__(self,pathname):
self.plot_root_dir = os.path.join(os.environ['HOME'], "/Work/TDAGo/testplots/")
dir_name = os.path.dirname(pathname)
dir_name_index = dir_name.rfind('/')+1
self.plot_dir = os.path.join(self.plot_root_dir,dir_name[dir_name_index:])
self.file_name = os.path.split(pathname)[1]
if(not os.path.isdir(self.plot_dir)):
os.mkdir(self.plot_dir)
def get_save_loc(self):
return os.path.join(self.plot_dir,self.file_name)
class GameAnimator: #animates the persistence diagrams and board to see how game progresses
def __init__(self,pathname):
self.pathname = pathname
self.figure = plt.figure(figsize=(25,25))
self.proc = SGFProcessor(pathname)
self.board_rout = BoardPHAniRoutine(self.figure,[(321,322),(323,324)],self.proc) #Testing these routines for now. Animator will have to be more abstracted later
self.wdist = WassAniRoutine(self.figure,325,self.pathname)
self.move_box = MoveBoxRoutine(self.figure,326)
self.save_loc = PlotFSHandler(pathname).get_save_loc()
def init(self):
self.line = self.wdist.init_routine()
def update(self,i,line):
self.board_rout.update(i)
self.move_box.update(i)
return self.wdist.update(i)
def animate(self):
num_moves = self.proc.num_of_moves()
self.init()
ani = animate.FuncAnimation(self.figure,self.update,frames=num_moves,fargs=[self.line], save_count=400)
Writer = animate.writers['ffmpeg']
writer = Writer(fps=10,bitrate=-1)
ani.save(self.save_loc + ".mp4",writer=writer) #save animation
class StaticPlot: #Simply outputs one routine as a png plot
def __init__(self,pathname,routine,start=0,finish=400):
#assert
self.save_loc = PlotFSHandler(pathname).get_save_loc()
self.figure = plt.Figure((20,20))
name = os.path.split(pathname)[1]
self.routine = routine(self.figure,111,pathname,start=start,finish=finish,name=name)
def plot(self): #We simply run the plotting routines and save them to disk
self.routine.run()
self.figure.savefig(self.save_loc + ".png") #Save file in corresponding directory
class SaveGameProg: #animates the persistence diagrams and board to see how game progresses
def __init__(self,pathname):
self.pathname = pathname
self.proc = SGFProcessor(pathname)
self.figure = plt.figure(figsize=(20,20))
self.white_board = self.figure.add_subplot(321)
self.white_board.set_xticks(np.arange(20))
self.white_board.set_yticks(np.arange(20))
self.white_board.set_title('White Stone Positions')
self.white_dgms = self.figure.add_subplot(322)
self.white_dgms.set_title('White PH')
self.black_board = self.figure.add_subplot(323) #Make plots wider for better visibility
self.black_board.set_xticks(np.arange(20))
self.black_board.set_yticks(np.arange(20))
self.black_board.set_title('Black Stone Positions')
self.black_dgms = self.figure.add_subplot(324)
self.black_dgms.set_title('Black PH')
self.save_loc = PlotFSHandler(pathname).get_save_loc()
self.draw_board(self.black_board)
self.draw_board(self.white_board)
def draw_board(self,ax):
for i in range(19):
ax.axhline(i,color="black")
ax.axvline(i,color="black")
ax.set_facecolor('burlywood')
def update(self,data_tup):
(black_stones, white_stones), (black_dgms,white_dgms) = data_tup
self.white_board.scatter(white_stones[:,0], white_stones[:,1], color='red',s=150) #anything more efficient then scattering it every time?
self.black_board.scatter(black_stones[:,0], black_stones[:,1],color='black',s=150)
plt.sca(self.black_dgms)
plt.cla()
self.black_dgms.set_title('Black PH') #Terribly way of just referencing the plot and clearing the plot to update Persistence Diagrams. Dnot feel like wrestlin with matplotlib right now.
plot_diagrams(black_dgms)
plt.sca(self.white_dgms)
plt.cla()
self.white_dgms.set_title('White PH')
plot_diagrams(white_dgms)
def plot(self):
if(not os.path.isdir(self.save_loc)):
os.mkdir(self.save_loc)
#pool = Pool(processes=6)
for data_tup, num in zip(self.proc.filter_game(0,400),range(400)):
self.update(data_tup)
self.figure.savefig(os.path.join(self.save_loc,str(num) + ".png"))
#pool.apply(fig.savefig, args=(os.path.join(self.save_loc,str(num) + ".png"),))
#pool.close()
#pool.join()
class Analytics: #Simply outputs evolution of distance as a plot.
def __init__(self,pathname,start=0,finish=400):
self.disarr = DistanceArray(pathname,start,finish)
self.pathname = pathname
self.save_loc = PlotFSHandler(pathname).get_save_loc()
self.name = os.path.split(pathname)[1]
self.start_num = start
self.finish_num = finish
def game_avg_conn(self):
proc = SGFProcessor(self.pathname)
black_conn = []
white_conn = []
for _, dgms in proc.filter_game(0,400):
black_h1, white_h1 = dgms[0][1], dgms[1][1]
black_mean, white_mean = np.mean(black_h1,axis=0), np.mean(white_h1,axis=0)
black_conn.append(black_mean[0])
white_conn.append(white_mean[0])
x = np.arange(proc.num_of_moves())
fig, ax = plt.subplots()
ax.plot(x,black_conn,color="black",label=proc.black_player)
ax.plot(x,white_conn,color="red",label=proc.white_player)
ax.legend()
ax.set(xlabel="Move #",ylabel="Avg Distance of detected groups",title="Plot Of Avg Distance as " + self.name + " progresses. (Winner:" + proc.winner_name + ")" )
plt.savefig(self.save_loc + "_conngraph.png")
def game_scoring(self):
def score(h1_dgms):
acc = 0
for i in h1_dgms:
d = i[1]
b = i[0]
# acc +=
return acc
proc = SGFProcessor(self.pathname)
black_score = []
white_score = []
x = np.arange(proc.num_of_moves())
for _, dgms in proc.filter_game(0,400):
black_h1, white_h1 = dgms[0][1], dgms[1][1]
black_score.append(score(black_h1))
white_score.append(score(white_h1))
fig, ax = plt.subplots()
ax.plot(x,black_score,color="black",label=proc.black_player)
ax.plot(x,white_score,color="red",label=proc.white_player)
ax.legend()
ax.set(xlabel="Move #",ylabel="Score",title="Plot Of Score as " + self.name + " progresses. (Winner:" + proc.winner + ")" )
plt.savefig(self.save_loc + "_scoregraph.png")
def game_wdist(self):
x,y = self.disarr.get_wass_array()
fig,ax = plt.subplots()
ax.plot(x,y,1)
ax.set(xlabel='Move #',ylabel='Wasserstein Dist',title="Plot Of WDist as " + self.name + " progresses")
plt.savefig(self.save_loc + "_wdist.png") #Save file in corresponding directory
def game_bdist(self):
x,y = self.disarr.get_bottle_array()
ax.plot(x,y,1)
ax.set(xlabel='Move #',ylabel='Bottleneck Dist',title="Plot Of BDist as " + self.name + " progresses")
plt.savefig(self.save_loc + ".png") #Save file in corresponding directory
<file_sep>from plot import *
from experiments import *
import warnings
warnings.filterwarnings("ignore") #Ignore warnings for now
import sys
import os
import argparse
def main():
parser = argparse.ArgumentParser(description='Analysis of Go Games')
parser.add_argument('dir',nargs='*')
parser.add_argument('--conn',dest="conn",action='store_true')
parser.add_argument('--avg',dest="avg",action='store_true')
parser.add_argument('--score',dest="score",action='store_true')
parser.add_argument('--anim',dest="anim",action='store_true')
args = parser.parse_args()
if args.conn:
run_conn_routine(args.dir[0])
if args.avg:
predict_avg_experi(args.dir[0],args.dir[1])
if args.score:
test_score_routine(args.dir[0])
if args.anim:
test_anim_routine(args.dir[0])
#test_save_routine(str(argv[0]))
if __name__ == '__main__':
main()
#Test routines
#Animation routines
#Persistence Diagrams?\
#Go analysis features.
#Ideas
#How to interpret H_1 points on DGMS? For example, if a point has a earlier,later birthtime vs earlier,later deathtime? How do we interpret this as properties of possible enclosed territory.
#We can now start to add points to the white/black board to model obstructions to building territory. A good idea would be to find ways to create "meaningful" boards for analysis of specific advantage properties.
#Research more about Go fighting strategies and early,late game caveats
#Create a modular framework such that you have TDA-DATA -> plot modules -> customizable plot figure -> analysis interface
#Create a caching scheme to cache all sequential computations and diagrams made. See cache-tools
<file_sep>from abc import ABC, abstractmethod
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animate
from matplotlib.widgets import Slider, TextBox
from goripser import *
from persim import plot_diagrams
class Routine():
def __init__(self,figure, plot_cood, title=None):
self.figure = figure
self.ax = self.figure.add_subplot(plot_cood)
self.title = title
if self.title is not None:
self.ax.set_title(self.title)
class StaticRoutine(Routine,ABC):
@abstractmethod
def run(self):
pass
class AniRoutine(Routine,ABC):
@abstractmethod
def init_routine(self):
pass
@abstractmethod
def update(self,index):
pass
#Routine responsible for calculating and plotting WDist of a game
class WassRoutine(StaticRoutine):
def __init__(self, figure, plot_cood, pathname,start=0,finish=400,name='game'):
assert isinstance(figure,plt.Figure) #Gotta work with figures here.
super().__init__(figure,plot_cood)
self.x, self.y = (DistanceArray(pathname,start,finish)).get_wass_array() #Get the wass array for game at pathname
self.name = name
def run(self):
self.ax.plot(self.x,self.y,1)
self.ax.set(xlabel='Move #',ylabel='Wasserstein Dist',title="Plot Of WDist as " + self.name + " progresses")
class BottleRoutine(StaticRoutine):
def __init__(self, figure, plot_cood, pathname,start=0,finish=400,name='game'):
assert isinstance(figure,plt.Figure) #Gotta work with figures here.
super().__init__(figure,plot_cood)
self.x, self.y = (DistanceArray(pathname,start,finish)).get_bottle_array() #Get the wass array for game at pathname
self.name = name
def run(self):
self.ax.plot(self.x,self.y,1)
self.ax.set(xlabel='Move #',ylabel='Wasserstein Dist',title="Plot Of BDist as " + self.name + " progresses")
class WassAniRoutine(AniRoutine):
def __init__(self,figure,plot_cood,pathname,start=0,finish=400):
super().__init__(figure, plot_cood,title="WDist Progression")
self.ax.set_xlabel("Move #")
self.ax.set_ylabel("WDist between White and Black")
self.x, self.y = (DistanceArray(pathname,start,finish)).get_wass_array() #Get the wass array for game at pathname
def init_routine(self):
self.ax, = self.ax.plot(self.x[:1],self.y[:1])
return self.ax
def update(self,index):
self.ax.set_data(self.x[:index],self.y[:index])
return self.ax
class DGMSAniRoutine(AniRoutine):
def __init__(self,figure,plot_cood,color):
self.color = "White" if color == 'w' else "Black"
super().__init__(figure, plot_cood,title= self.color + " PH")
def init_routine(self): #Don't have to init anything for PH
pass
def update(self,dgms):
plt.sca(self.ax) #change active subplot to relevant one
plt.cla() #figure out how to use gca for this
self.ax.set_title(self.title)
plot_diagrams(dgms)
class BoardDrawRoutine(AniRoutine): #Only supports single color for now.
def __init__(self,figure,plot_cood,color):
super().__init__(figure, plot_cood,title= color + " Stone Positions")
self.ax.set_xticks(np.arange(20))
self.ax.set_yticks(np.arange(20))
self.__draw_board(self.ax)
self.color = 'red' if color == 'w' else 'black' #using red color scheme for white stones for constrast on board
def __draw_board(self,ax):
for i in range(19):
ax.axhline(i,color="black")
ax.axvline(i,color="black")
ax.set_facecolor('burlywood')
def init_routine(self):
pass
def update(self,board_pos):
self.ax.scatter(board_pos[:,0], board_pos[:,1], color=self.color,s=250)
class BoardPHAniRoutine(AniRoutine):
def __init__(self,figure,plot_cood_tups,proc): #(black,white)
assert isinstance(proc,SGFProcessor)
self.rout_list = [BoardDrawRoutine(figure,plot_cood_tups[0][0],'b'), DGMSAniRoutine(figure,plot_cood_tups[0][1],'b'),
BoardDrawRoutine(figure,plot_cood_tups[1][0],'w'), DGMSAniRoutine(figure,plot_cood_tups[1][1],'w')]
self.proc = proc
self.board_list = [i for i in (self.proc).filter_game(0,400)] #Comsumes a lot of memory. I'm going to have to revuse filter_game to ensure modularity here
def init_routine(self):
pass
def update(self,index):
board, dgms = self.board_list[index]
self.rout_list[0].update(board[0])
self.rout_list[1].update(dgms[0])
self.rout_list[2].update(board[1])
self.rout_list[3].update(dgms[1])
class MoveBoxRoutine(AniRoutine):
def __init__(self,figure,plot_cood):
super().__init__(figure,plot_cood)
self.move_box = TextBox(self.ax,'Move #: ',0)
def update(self,index):
self.move_box.set_val(index)
<file_sep>from sgfmill import sgf
from sgfmill import sgf_moves
from ripser import ripser
from persim import wasserstein, wasserstein_matching
from persim import bottleneck, bottleneck_matching
import numpy as np
import scipy.spatial.distance as dist
from copy import copy
class SGFProcessor:
def __init__(self,pathname):
self.pathname = pathname
num_of_threads = 100
with open(self.pathname, "rb") as f:
sgf_src = f.read()
try:
self.sgf_game = sgf.Sgf_game.from_bytes(sgf_src)
except:
raise Exception("SGF file format error")
try:
_ , self.play_seq = sgf_moves.get_setup_and_moves(self.sgf_game)
except:
raise Exception("")
self.initial_board = []
for i in range(19):
self.initial_board.append([0,i])
self.initial_board.append([18,i])
self.initial_board.append([i,0])
self.initial_board.append([i,18])
@property
def white_player(self):
return self.sgf_game.get_player_name("w")
@property
def black_player(self):
return self.sgf_game.get_player_name("b")
@property
def winner(self):
return self.sgf_game.get_winner()
@property
def winner_name(self):
return self.sgf_game.get_player_name(self.sgf_game.get_winner())
self.black_move_pos = []
self.white_move_pos = []
black_moves = np.empty([0,2]) #accumlator variables
white_moves = np.empty([0,2])
pool = Pool(processes=6)
process_results = [] #list of processes
black_move_pos = np.asarray(self.initial_board)
white_move_pos = np.asarray(self.initial_board)
for (color, move), num in zip(self.play_seq, range(self.num_of_moves())):
row, col = move
if (color == 'b'):
black_moves = np.append(black_moves,[[row,col]],axis=0)
process_results.append(pool.apply_async(self.cal_dgms,args=((num,color,black_moves),)))
else:
white_moves = np.append(white_moves,[[row,col]],axis=0)
process_results.append(pool.apply_async(self.cal_dgms,args=((num,color,white_moves),)))
self.black_move_pos.append(copy(black_moves)) #Using a lot of memory for the sake of convienence.
self.white_move_pos.append(copy(white_moves))
pool.close()
pool.join()
self.dgms_list = [p.get() for p in process_results]
self.dgms_list.sort()
self.empty_dgms = TDATools.filter_rips(np.empty([0,2]))
def cal_dgms(self, board_tup):
index, color, board = board_tup
return (index, color, TDATools.filter_rips(board))
def num_of_moves(self): #Gives total number of moves in a game.
return len(self.play_seq)
def filter_game(self, start, finish):
finish = min(finish, self.num_of_moves())
for i in range(start, finish):
j = max(0, i-1)
yield (self.black_move_pos[i], self.white_move_pos[i]), (self.dgms_list[i][2], self.dgms_list[j][2] if j > 0 else self.empty_dgms)
class DistanceArray:
def __init__(self,pathname,start,finish):
self.start = start
self.finish = finish
self.proc = SGFProcessor(pathname)
def get_wass_array(self):
y = []
for _, dgms in (self.proc).filter_game(self.start,self.finish):
wdist, _ = TDATools.match_wasserstein(dgms[0],dgms[1])
y.append(wdist)
x = np.arange(self.start,
min(self.proc.num_of_moves(),self.finish))
return x,y
def get_bottle_array(self):
y = []
for _, dgms in (self.proc).filter_game(start_num=self.start_num,finish_num=self.finish_num):
wdist, _ = TDATools.match_bottleneck(dgms[0],dgms[1])
y.append(wdist)
x = np.arange(self.start,
min(self.proc.num_of_moves(),self.finish))
return x,y
class TDATools: #Revise this later.
#returns relevant dgms for matrix of stone positions
@staticmethod
def filter_rips(move_pos):
pdis = dist.squareform(dist.pdist(move_pos,'cityblock'))
return ripser(pdis,distance_matrix=True)['dgms']
@staticmethod
def match_wasserstein(dgms1, dgms2):
return wasserstein(dgms1[1],dgms2[1],matching=True)
@staticmethod
def match_bottleneck(dgms1,dgms2):
return bottleneck(dgms1[1],dgms2[1], matching=True)
| 0e8c4e6d121dd98c26ba6a04a1ce588006d3ab38 | [
"Markdown",
"Python"
] | 6 | Python | ekim1919/TDAGo | 014db546dae3dedb4f7206288333756fc358ed8a | b8ffcb5c9dbdd17a8d91096e298f6d714210bc57 |
refs/heads/master | <file_sep>/*
Execution command: g++ main.cpp -lfreeglut -lopengl32
*/
#include<stdio.h>
#include<GL/glu.h>
#include<GL/glut.h>
void display();
int main(int argc, char *argv[]) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB);
glutInitWindowPosition(0, 0);
glutInitWindowSize(500, 300);
glutCreateWindow("Window");
glutDisplayFunc(display);
glutMainLoop();
/* code */
return 0;
}
void display() {
}<file_sep># (OpenGL) Freeglut simple C++ example
In this repo, I'm using Freeglut to introduce C++ graphical basics.
## Instalation
### MinGW
You may know that I'm using the MinGW compiler for C/C++ on Windows OS. On the internet, there are a lot of videos and tutorials where explain how to install MinGW and for that reason, I'm gonna skip that part.
### OpenGL in C/C++
This part may be the most difficult (in my own experience) because there are no good guides to install OpenGL, especially on Windows. However, I could help by giving you some useful links to install OpenGL.
First of all, we won't install explicitly OpenGL, instead, we will install freeglut. In general, that should not be a problem and the functionalities are the same.
An excellent tutorial to install is the following, especially the 'MinGW, OpenGL, GLU and GLUT' section.
https://www3.ntu.edu.sg/home/ehchua/programming/opengl/HowTo_OpenGL_C.html
But if you are more visual, the following video may help you to manage the files and folders properly and some extra details for CLion IDE.
https://www.youtube.com/watch?v=AUFZnA3lW_Q&t=288s
I hope those links can help you.
## Run the code
At this point, you may have all the dependencies from the Freeglut library in the correct way. So let's start with the code.
By cloning this repository you will able to run the main.cpp file with the following command in your terminal:
```
g++ main.cpp -lfreeglut -lopengl32
```
Note that to run this command you must be located in the root of the project folder (inside your terminal) otherwise, you will get an error.
The `-lfreeglut` and`-lopengl32` are the dependencies that will be linked by the compiler and use the Freeglut properly.
After running the command you'll have an executable `a.exe`. You only need to run it with the command
```
a.exe
```
And that's all, hopefully, you can see the window produced by the program.

As you can see this is a very basic example of Freeglut, but now you can make amazing projects with some graphic details, just be sure to keep learning about Freeglut and its different applications.
Successes
| ca47b6e3fc4b46d1da38b4b5c302b6e0e5ca6acd | [
"Markdown",
"C++"
] | 2 | C++ | JeanleeRoy/freeglut-cpp | e6bd98b4f4842421ef1e8caa5a248c4647a4280c | 974e223b1f1e1d92e8556c2438208545a9715098 |
refs/heads/master | <repo_name>alvin921/FishChatServer<file_sep>/monitor/monitor.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"github.com/astaxie/beego"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/monitor/controllers"
_ "github.com/oikomi/FishChatServer/monitor/routers"
)
/*
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
const char* build_time(void) {
static const char* psz_build_time = "["__DATE__ " " __TIME__ "]";
return psz_build_time;
}
*/
import "C"
var (
buildTime = C.GoString(C.build_time())
)
func BuildTime() string {
return buildTime
}
const VERSION string = "0.10"
func init() {
flag.Set("alsologtostderr", "false")
flag.Set("log_dir", "false")
}
func version() {
fmt.Printf("monitor version %s Copyright (c) 2014-2015 <NAME> (<EMAIL>) \n", VERSION)
}
var InputConfFile = flag.String("conf_file", "monitor.json", "input conf file name")
func main() {
version()
fmt.Printf("built on %s\n", BuildTime())
flag.Parse()
cfg := NewMonitorConfig(*InputConfFile)
err := cfg.LoadConfig()
if err != nil {
log.Error(err.Error())
return
}
server, err := libnet.Listen(cfg.TransportProtocols, cfg.Listen)
if err != nil {
log.Error(err.Error())
return
}
log.Info("server start: ", server.Listener().Addr().String())
m := NewMonitor(cfg)
//TODO not use go
m.subscribeChannels()
go server.Serve(func(session *libnet.Session) {
})
beego.Router("api/v1/monitor", &controllers.MonitorController{})
//beego.SetStaticPath("/views", "/mh/mygo/src/github.com/oikomi/FishChatServer/monitor/views")
beego.SetStaticPath("/views", "views")
beego.Run()
}
<file_sep>/libnet/buffer.go
package libnet
import (
"encoding/binary"
"io"
"math"
"sync/atomic"
"unicode/utf8"
"unsafe"
)
var (
enableBufferPool = true
globalPool = newBufferPool()
)
// Turn On/Off buffer pool. Default is enable.
func BufferPoolEnable(enable bool) {
enableBufferPool = enable
}
// Limit buffer pool memory usage. Default is 10M.
func BufferPoolLimit(size int) int {
if size == 0 {
return int(globalPool.sizeLimit)
}
old := globalPool.sizeLimit
globalPool.sizeLimit = int64(size)
return int(old)
}
// Get/Set initialization capacity for new buffer. Default is 4096.
func BufferInitSize(size int) int {
if size == 0 {
return globalPool.bufferInitSize
}
old := globalPool.bufferInitSize
globalPool.bufferInitSize = size
return old
}
// Limit buffer size in object pool.
// Large buffer will not return to object pool when it freed. Default is 102400.
func BufferSizeLimit(size int) int {
if size == 0 {
return globalPool.bufferSizeLimit
}
old := globalPool.bufferSizeLimit
globalPool.bufferSizeLimit = size
return old
}
// Buffer pool state.
type PoolState struct {
InHitRate float64 // Hit rate of InBuffer.
InFreeRate float64 // InBuffer free rate.
InDropRate float64 // Drop rate of large OutBuffer.
OutHitRate float64 // Hit rate of OutBuffer.
OutFreeRate float64 // OutBuffer free rate.
OutDropRate float64 // Drop rate of large OutBuffer.
}
// Get buffer pool state.
func BufferPoolState() PoolState {
var (
inGet = float64(atomic.LoadUint64(&globalPool.inGet))
inNew = float64(atomic.LoadUint64(&globalPool.inNew))
inFree = float64(atomic.LoadUint64(&globalPool.inFree))
inDrop = float64(atomic.LoadUint64(&globalPool.inDrop))
)
var (
outGet = float64(atomic.LoadUint64(&globalPool.outGet))
outNew = float64(atomic.LoadUint64(&globalPool.outNew))
outFree = float64(atomic.LoadUint64(&globalPool.outFree))
outDrop = float64(atomic.LoadUint64(&globalPool.outDrop))
)
return PoolState{
InHitRate: (inGet - inNew) / inGet,
InFreeRate: inFree / inGet,
InDropRate: inDrop / inGet,
OutHitRate: (outGet - outNew) / outGet,
OutFreeRate: outFree / outGet,
OutDropRate: outDrop / outGet,
}
}
type bufferPool struct {
size int64
// InBuffer
inGet uint64
inNew uint64
inFree uint64
inDrop uint64
in unsafe.Pointer
// OutBuffer
out unsafe.Pointer
outGet uint64
outNew uint64
outFree uint64
outDrop uint64
sizeLimit int64
bufferInitSize int
bufferSizeLimit int
}
func newBufferPool() *bufferPool {
return &bufferPool{
sizeLimit: 10240000,
bufferInitSize: 4096,
bufferSizeLimit: 102400,
}
}
func (pool *bufferPool) GetInBuffer() (in *InBuffer) {
var ptr unsafe.Pointer
for {
ptr = atomic.LoadPointer(&pool.in)
if ptr == nil {
break
}
if atomic.CompareAndSwapPointer(&pool.in, ptr, ((*InBuffer)(ptr)).next) {
break
}
}
atomic.AddUint64(&pool.inGet, 1)
if ptr == nil {
atomic.AddUint64(&pool.inNew, 1)
in = &InBuffer{Data: make([]byte, 0, pool.bufferInitSize), pool: pool}
} else {
in = (*InBuffer)(ptr)
atomic.AddInt64(&pool.size, -int64(cap(in.Data)))
}
in.isFreed = false
return in
}
func (pool *bufferPool) GetOutBuffer() (out *OutBuffer) {
var ptr unsafe.Pointer
for {
ptr = atomic.LoadPointer(&pool.out)
if ptr == nil {
break
}
if atomic.CompareAndSwapPointer(&pool.out, ptr, ((*OutBuffer)(ptr)).next) {
break
}
}
atomic.AddUint64(&pool.outGet, 1)
if ptr == nil {
atomic.AddUint64(&pool.outNew, 1)
out = &OutBuffer{Data: make([]byte, 0, pool.bufferInitSize), pool: pool}
} else {
out = (*OutBuffer)(ptr)
atomic.AddInt64(&pool.size, -int64(cap(out.Data)))
}
out.isFreed = false
out.isBroadcast = false
out.refCount = 0
return out
}
func (pool *bufferPool) PutInBuffer(in *InBuffer) {
atomic.AddUint64(&pool.inFree, 1)
if cap(in.Data) >= pool.bufferSizeLimit || atomic.LoadInt64(&pool.size) >= pool.sizeLimit {
atomic.AddUint64(&pool.inDrop, 1)
return
}
in.Data = in.Data[0:0]
in.ReadPos = 0
in.isFreed = true
for {
in.next = atomic.LoadPointer(&pool.in)
if atomic.CompareAndSwapPointer(&pool.in, in.next, unsafe.Pointer(in)) {
atomic.AddInt64(&pool.size, int64(cap(in.Data)))
break
}
}
}
func (pool *bufferPool) PutOutBuffer(out *OutBuffer) {
atomic.AddUint64(&pool.outFree, 1)
if cap(out.Data) >= pool.bufferSizeLimit || atomic.LoadInt64(&pool.size) >= pool.sizeLimit {
atomic.AddUint64(&pool.outDrop, 1)
return
}
out.Data = out.Data[0:0]
out.isFreed = true
for {
out.next = atomic.LoadPointer(&pool.out)
if atomic.CompareAndSwapPointer(&pool.out, out.next, unsafe.Pointer(out)) {
atomic.AddInt64(&pool.size, int64(cap(out.Data)))
break
}
}
}
// Incomming message buffer.
type InBuffer struct {
Data []byte // Buffer data.
ReadPos int // Read position.
isFreed bool
pool *bufferPool
next unsafe.Pointer
}
func newInBuffer() *InBuffer {
if enableBufferPool {
return globalPool.GetInBuffer()
}
return &InBuffer{Data: make([]byte, 0, globalPool.bufferInitSize)}
}
// Return the buffer to buffer pool.
func (in *InBuffer) free() {
if enableBufferPool {
if in.isFreed {
panic("link.InBuffer: double free")
}
in.pool.PutInBuffer(in)
}
}
// Prepare buffer for next message.
// This method is for custom protocol only.
// Dont' use it in application logic.
func (in *InBuffer) Prepare(size int) {
if cap(in.Data) < size {
in.Data = make([]byte, size)
} else {
in.Data = in.Data[0:size]
}
}
// Slice some bytes from buffer.
func (in *InBuffer) Slice(n int) []byte {
r := in.Data[in.ReadPos : in.ReadPos+n]
in.ReadPos += n
return r
}
// Implement io.Reader interface
func (in *InBuffer) Read(b []byte) (int, error) {
if in.ReadPos == len(in.Data) {
return 0, io.EOF
}
n := len(b)
if n+in.ReadPos > len(in.Data) {
n = len(in.Data) - in.ReadPos
}
copy(b, in.Data[in.ReadPos:])
in.ReadPos += n
return n, nil
}
// Read some bytes from buffer.
func (in *InBuffer) ReadBytes(n int) []byte {
x := make([]byte, n)
copy(x, in.Slice(n))
return x
}
// Read a string from buffer.
func (in *InBuffer) ReadString(n int) string {
return string(in.Slice(n))
}
// Read a rune from buffer.
func (in *InBuffer) ReadRune() rune {
x, size := utf8.DecodeRune(in.Data[in.ReadPos:])
in.ReadPos += size
return x
}
// Read a uint8 value from buffer.
func (in *InBuffer) ReadUint8() uint8 {
return uint8(in.Slice(1)[0])
}
// Read a uint16 value from buffer using little endian byte order.
func (in *InBuffer) ReadUint16LE() uint16 {
return binary.LittleEndian.Uint16(in.Slice(2))
}
// Read a uint16 value from buffer using big endian byte order.
func (in *InBuffer) ReadUint16BE() uint16 {
return binary.BigEndian.Uint16(in.Slice(2))
}
// Read a uint32 value from buffer using little endian byte order.
func (in *InBuffer) ReadUint32LE() uint32 {
return binary.LittleEndian.Uint32(in.Slice(4))
}
// Read a uint32 value from buffer using big endian byte order.
func (in *InBuffer) ReadUint32BE() uint32 {
return binary.BigEndian.Uint32(in.Slice(4))
}
// Read a uint64 value from buffer using little endian byte order.
func (in *InBuffer) ReadUint64LE() uint64 {
return binary.LittleEndian.Uint64(in.Slice(8))
}
// Read a uint64 value from buffer using big endian byte order.
func (in *InBuffer) ReadUint64BE() uint64 {
return binary.BigEndian.Uint64(in.Slice(8))
}
// Read a float32 value from buffer using little endian byte order.
func (in *InBuffer) ReadFloat32LE() float32 {
return math.Float32frombits(in.ReadUint32LE())
}
// Read a float32 value from buffer using big endian byte order.
func (in *InBuffer) ReadFloat32BE() float32 {
return math.Float32frombits(in.ReadUint32BE())
}
// Read a float64 value from buffer using little endian byte order.
func (in *InBuffer) ReadFloat64LE() float64 {
return math.Float64frombits(in.ReadUint64LE())
}
// Read a float64 value from buffer using big endian byte order.
func (in *InBuffer) ReadFloat64BE() float64 {
return math.Float64frombits(in.ReadUint64BE())
}
// ReadVarint reads an encoded signed integer from buffer and returns it as an int64.
func (in *InBuffer) ReadVarint() int64 {
v, n := binary.Varint(in.Data[in.ReadPos:])
in.ReadPos += n
return v
}
// ReadUvarint reads an encoded unsigned integer from buffer and returns it as a uint64.
func (in *InBuffer) ReadUvarint() uint64 {
v, n := binary.Uvarint(in.Data[in.ReadPos:])
in.ReadPos += n
return v
}
// Outgoing message buffer.
type OutBuffer struct {
Data []byte // Buffer data.
isFreed bool
isBroadcast bool
refCount int32
pool *bufferPool
next unsafe.Pointer
}
func newOutBuffer() *OutBuffer {
if enableBufferPool {
return globalPool.GetOutBuffer()
}
return &OutBuffer{Data: make([]byte, 0, globalPool.bufferInitSize)}
}
func (out *OutBuffer) broadcastUse() {
if enableBufferPool {
atomic.AddInt32(&out.refCount, 1)
}
}
func (out *OutBuffer) broadcastFree() {
if enableBufferPool {
if out.isBroadcast && atomic.AddInt32(&out.refCount, -1) == 0 {
out.free()
}
}
}
// Return the buffer to buffer pool.
func (out *OutBuffer) free() {
if enableBufferPool {
if out.isFreed {
panic("link.OutBuffer: double free")
}
out.pool.PutOutBuffer(out)
}
}
// Prepare for next message.
// This method is for custom protocol only.
// Don't use it in application logic.
func (out *OutBuffer) Prepare(size int) {
if cap(out.Data) < size {
out.Data = make([]byte, 0, size)
} else {
out.Data = out.Data[0:0]
}
}
// Append some bytes into buffer.
func (out *OutBuffer) Append(p ...byte) {
out.Data = append(out.Data, p...)
}
// Implement io.Writer interface.
func (out *OutBuffer) Write(p []byte) (int, error) {
out.Data = append(out.Data, p...)
return len(p), nil
}
// Write a byte slice into buffer.
func (out *OutBuffer) WriteBytes(d []byte) {
out.Append(d...)
}
// Write a string into buffer.
func (out *OutBuffer) WriteString(s string) {
out.Append([]byte(s)...)
}
// Write a rune into buffer.
func (out *OutBuffer) WriteRune(r rune) {
p := []byte{0, 0, 0, 0}
n := utf8.EncodeRune(p, r)
out.Append(p[:n]...)
}
// Write a uint8 value into buffer.
func (out *OutBuffer) WriteUint8(v uint8) {
out.Append(byte(v))
}
// Write a uint16 value into buffer using little endian byte order.
func (out *OutBuffer) WriteUint16LE(v uint16) {
out.Append(byte(v), byte(v>>8))
}
// Write a uint16 value into buffer using big endian byte order.
func (out *OutBuffer) WriteUint16BE(v uint16) {
out.Append(byte(v>>8), byte(v))
}
// Write a uint32 value into buffer using little endian byte order.
func (out *OutBuffer) WriteUint32LE(v uint32) {
out.Append(byte(v), byte(v>>8), byte(v>>16), byte(v>>24))
}
// Write a uint32 value into buffer using big endian byte order.
func (out *OutBuffer) WriteUint32BE(v uint32) {
out.Append(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
// Write a uint64 value into buffer using little endian byte order.
func (out *OutBuffer) WriteUint64LE(v uint64) {
out.Append(
byte(v),
byte(v>>8),
byte(v>>16),
byte(v>>24),
byte(v>>32),
byte(v>>40),
byte(v>>48),
byte(v>>56),
)
}
// Write a uint64 value into buffer using big endian byte order.
func (out *OutBuffer) WriteUint64BE(v uint64) {
out.Append(
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
// Write a float32 value into buffer using little endian byte order.
func (out *OutBuffer) WriteFloat32LE(v float32) {
out.WriteUint32LE(math.Float32bits(v))
}
// Write a float32 value into buffer using big endian byte order.
func (out *OutBuffer) WriteFloat32BE(v float32) {
out.WriteUint32BE(math.Float32bits(v))
}
// Write a float64 value into buffer using little endian byte order.
func (out *OutBuffer) WriteFloat64LE(v float64) {
out.WriteUint64LE(math.Float64bits(v))
}
// Write a float64 value into buffer using big endian byte order.
func (out *OutBuffer) WriteFloat64BE(v float64) {
out.WriteUint64BE(math.Float64bits(v))
}
// Write a uint64 value into buffer.
func (out *OutBuffer) WriteUvarint(v uint64) {
for v >= 0x80 {
out.Append(byte(v) | 0x80)
v >>= 7
}
out.Append(byte(v))
}
// Write a int64 value into buffer.
func (out *OutBuffer) WriteVarint(v int64) {
ux := uint64(v) << 1
if v < 0 {
ux = ^ux
}
out.WriteUvarint(ux)
}
<file_sep>/storage/redis_store/session_cache.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis_store
import (
"encoding/json"
"sync"
"time"
"github.com/garyburd/redigo/redis"
"github.com/oikomi/FishChatServer/storage/mongo_store"
)
type SessionCache struct {
RS *RedisStore
rwMutex sync.Mutex
}
func NewSessionCache(RS *RedisStore) *SessionCache {
return &SessionCache{
RS: RS,
}
}
type SessionCacheData struct {
/*
ClientID string
ClientPwd string
ClientName string
ClientType string
TopicList []string
*/
mongo_store.SessionStoreData
Alive bool
ClientAddr string
MsgServerAddr string
ID string
MaxAge time.Duration
}
func NewSessionCacheData(store_data *mongo_store.SessionStoreData, ClientAddr string, MsgServerAddr string, ID string) *SessionCacheData {
cacheData := &SessionCacheData{
ClientAddr: ClientAddr,
MsgServerAddr: MsgServerAddr,
ID: ID,
}
cacheData.ClientID = store_data.ClientID
cacheData.ClientPwd = store_data.ClientPwd
cacheData.ClientName = store_data.ClientName
cacheData.ClientType = store_data.ClientType
cacheData.TopicList = store_data.TopicList
return cacheData
}
func (self *SessionCacheData) checkClientID(clientID string) bool {
return true
}
func (self *SessionCacheData) StoreKey() string {
return self.ClientID
}
// Get the session from the store.
func (self *SessionCache) Get(k string) (*SessionCacheData, error) {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
key := k + SESSION_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + k + SESSION_UNIQ_PREFIX
}
b, err := redis.Bytes(self.RS.conn.Do("GET", key))
if err != nil {
return nil, err
}
var sess SessionCacheData
err = json.Unmarshal(b, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
// Save the session into the store.
func (self *SessionCache) Set(sess *SessionCacheData) error {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
b, err := json.Marshal(sess)
if err != nil {
return err
}
key := sess.ClientID + SESSION_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + sess.ClientID + SESSION_UNIQ_PREFIX
}
ttl := sess.MaxAge
if ttl == 0 {
// Browser session, set to specified TTL
ttl = self.RS.opts.BrowserSessServerTTL
if ttl == 0 {
ttl = 2 * 24 * time.Hour // Default to 2 days
}
}
_, err = self.RS.conn.Do("SETEX", key, int(ttl.Seconds()), b)
if err != nil {
return err
}
return nil
}
// Delete the session from the store.
func (self *SessionCache) Delete(id string) error {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
key := id + SESSION_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + id + SESSION_UNIQ_PREFIX
}
_, err := self.RS.conn.Do("DEL", key)
if err != nil {
return err
}
return nil
}
// Clear all sessions from the store. Requires the use of a key
// prefix in the store options, otherwise the method refuses to delete all keys.
func (self *SessionCache) Clear() error {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
vals, err := self.getSessionKeys()
if err != nil {
return err
}
if len(vals) > 0 {
self.RS.conn.Send("MULTI")
for _, v := range vals {
self.RS.conn.Send("DEL", v)
}
_, err = self.RS.conn.Do("EXEC")
if err != nil {
return err
}
}
return nil
}
// Get the number of session keys in the store. Requires the use of a
// key prefix in the store options, otherwise returns -1 (cannot tell
// session keys from other keys).
func (self *SessionCache) Len() int {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
vals, err := self.getSessionKeys()
if err != nil {
return -1
}
return len(vals)
}
func (self *SessionCache) getSessionKeys() ([]interface{}, error) {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
if self.RS.opts.KeyPrefix != "" {
return redis.Values(self.RS.conn.Do("KEYS", self.RS.opts.KeyPrefix+":*"))
}
return nil, ErrNoKeyPrefix
}
<file_sep>/monitor/static/js/app/controllers.js
//
// Copyright 2014-2015 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var loginControllerModule = angular.module("LoginControllerModule", ['ngCookies', 'ServicesModule']);
loginControllerModule.controller('LoginController', function($scope, $location, $cookies, loginService) {
//$scope.showLogin = true;
var reqParams = {
action : "login"
};
$scope.login = function(user) {
console.log(user.name)
var postData = {
username : user.name,
password : <PASSWORD>
};
loginService.events(reqParams, postData).success(function(data, status, headers, config) {
console.log(data);
if (data.status == "0") {
$cookies.isLogin = true;
$location.path("/root");
} else {
alert("登录失败!");
}
}).error(function(data, status, headers, config) {
alert("登录失败!");
});
};
});
var controllersModule = angular.module("ControllersModule", ['ngCookies', 'ServicesModule']);
controllersModule.controller('RootController', function($scope, $location, $cookies, getServerDataService) {
var isLogin = $cookies.isLogin;
if (!isLogin) {
$location.path("/");
}
// var reqParams = {
// action : "get_total_status"
// };
// getServerDataService.events(reqParams).success(function(data, status, headers, config) {
// console.log(data);
// }).error(function(data, status, headers, config) {
// alert("获取服务器数据失败!");
// });
});
controllersModule.controller('MsgServerController', function($scope, $location, $cookies, getServerDataService) {
var isLogin = $cookies.isLogin;
if (!isLogin) {
$location.path("/");
}
var reqParams = {
action : "login"
};
});
<file_sep>/storage/redis_store/redis_cache.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis_store
import (
"time"
"sync"
"errors"
"github.com/garyburd/redigo/redis"
)
var (
ErrNoKeyPrefix = errors.New("cannot get session keys without a key prefix")
)
type RedisStoreOptions struct {
Network string
Address string
ConnectTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
Database int // Redis database to use for session keys
KeyPrefix string // If set, keys will be KeyPrefix:SessionID (semicolon added)
BrowserSessServerTTL time.Duration // Defaults to 2 days
}
type RedisStore struct {
opts *RedisStoreOptions
conn redis.Conn
rwMutex sync.Mutex
}
// Create a redis session store with the specified options.
func NewRedisStore(opts *RedisStoreOptions) *RedisStore {
var err error
rs := &RedisStore{
opts : opts,
conn : nil,
}
rs.conn, err = redis.DialTimeout(opts.Network, opts.Address, opts.ConnectTimeout,
opts.ReadTimeout, opts.WriteTimeout)
if err != nil {
panic(err)
}
return rs
}
<file_sep>/gateway/proto_proc.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"github.com/oikomi/FishChatServer/common"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
"github.com/oikomi/FishChatServer/storage/mongo_store"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
type ProtoProc struct {
gateway *Gateway
}
func NewProtoProc(gateway *Gateway) *ProtoProc {
return &ProtoProc{
gateway: gateway,
}
}
// select a min load msg server
func (self *ProtoProc) procGetMinLoadMsgServer() string {
var minload uint64
var minloadserver string
var msgServer string
minload = 0xFFFFFFFFFFFFFFFF
for str, msn := range self.gateway.msgServerNumMap {
if minload > msn {
minload = msn
minloadserver = str
}
}
msgServer = minloadserver
return msgServer
}
func (self *ProtoProc) procLogin(cmd protocol.Cmd, session *libnet.Session) error {
//log.Info("procLogin")
var err error
var uuid string
var msgServer string
ClientID := cmd.GetArgs()[0]
ClientType := cmd.GetArgs()[1]
ClientPwd := ""
if len(cmd.GetArgs()) == 3 {
ClientPwd = cmd.GetArgs()[2]
}
// get the session cache
sessionCacheData, err := self.gateway.sessionCache.Get(ClientID)
if sessionCacheData != nil {
log.Warningf("ID %s already login", ClientID)
msgServer = sessionCacheData.MsgServerAddr
uuid = sessionCacheData.ID
} else {
// choose msg server and allocate UUID
msgServer = self.procGetMinLoadMsgServer()
uuid = common.NewV4().String()
// get the session store to check whether registered
sessionStoreData, _ := self.gateway.mongoStore.GetSessionFromCid(ClientID)
if sessionStoreData == nil {
log.Warningf("ID %s not registered", ClientID)
// for store data
sessionStoreData = mongo_store.NewSessionStoreData(ClientID, ClientPwd, ClientType)
log.Info(sessionStoreData)
common.StoreData(self.gateway.mongoStore, sessionStoreData)
}
// for cache data, MsgServer MUST update local & remote addr.
sessionCacheData = redis_store.NewSessionCacheData(sessionStoreData, session.Conn().RemoteAddr().String(), msgServer, uuid)
log.Info(sessionCacheData)
self.gateway.sessionCache.Set(sessionCacheData)
}
//
resp := protocol.NewCmdSimple(protocol.RSP_LOGIN_CMD)
resp.AddArg(protocol.RSP_SUCCESS)
resp.AddArg(uuid)
resp.AddArg(msgServer)
log.Info("Resp | ", resp)
if session != nil {
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
session.Close()
log.Info("client ", session.Conn().RemoteAddr().String(), " | close")
}
return nil
}
<file_sep>/monitor/controllers/json_data.go
//
// Copyright 2015-2099 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
)
type LoginPostData struct {
Username string `json:"username"`
Password string `json:"<PASSWORD>"`
}
type LoginStatus struct {
Status string `json:"status"`
}
func NewLoginStatus() LoginStatus {
return LoginStatus{}
}
type RebootStatus struct {
Status string `json:"status"`
}
func NewRebootStatus() RebootStatus {
return RebootStatus{}
}
type TotalStatus struct {
Ip string `json:"ip"`
Mac string `json:"mac"`
AllStorage uint64 `json:"allStorage"`
UsedStorage uint64 `json:"usedStorage"`
Modify int64 `json:"modify"`
Type string `json:"type"`
}
func NewTotalStatus() TotalStatus {
return TotalStatus{}
}
type MsgServerData struct {
Status string `json:"status"`
Num uint32 `json:"num"`
}
func NewMsgServerData() MsgServerData {
return MsgServerData{}
}
<file_sep>/base/base.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/protocol"
)
type ChannelMap map[string]*ChannelState
type SessionMap map[string]*libnet.Session
type AckMap map[string]map[string]string
const COMM_PREFIX = "IM"
var ChannleList []string
func init() {
ChannleList = []string{protocol.SYSCTRL_CLIENT_STATUS, protocol.SYSCTRL_TOPIC_STATUS, protocol.SYSCTRL_TOPIC_SYNC,
protocol.SYSCTRL_SEND, protocol.SYSCTRL_MONITOR, protocol.STORE_CLIENT_INFO, protocol.STORE_TOPIC_INFO}
}
type ChannelState struct {
ChannelName string
Channel *libnet.Channel
ClientIDlist []string
}
func NewChannelState(channelName string, channel *libnet.Channel) *ChannelState {
return &ChannelState{
ChannelName: channelName,
Channel: channel,
ClientIDlist: make([]string, 0),
}
}
type SessionState struct {
ClientID string
Alive bool
ClientType string
}
func NewSessionState(alive bool, cid string, clienttype string) *SessionState {
return &SessionState{
ClientID: cid,
Alive: alive,
ClientType: clienttype,
}
}
type Config interface {
LoadConfig(configfile string) (*Config, error)
}
<file_sep>/msg_server/util.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net"
"sync"
"time"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/protocol"
)
type MonitorBeat struct {
name string
session *libnet.Session
mu sync.Mutex
timeout time.Duration
expire time.Duration
fails uint64
threshold uint64
}
func NewMonitorBeat(name string, timeout time.Duration, expire time.Duration, limit uint64) *MonitorBeat {
return &MonitorBeat {
name : name,
timeout : timeout,
expire : expire,
threshold : limit,
}
}
func (self *MonitorBeat) ResetFailures() {
self.mu.Lock()
defer self.mu.Unlock()
self.fails = 0
}
func (self *MonitorBeat) ChangeThreshold(thres uint64) {
self.mu.Lock()
defer self.mu.Unlock()
self.threshold = thres
}
func (self *MonitorBeat) Beat(c *libnet.Channel, data *protocol.CmdMonitor) {
timer := time.NewTicker(self.timeout * time.Second)
//ttl := time.After(self.expire * time.Second)
//for {
select {
case <-timer.C:
go func() {
_, err := c.Broadcast(libnet.Json(data))
if err != nil {
log.Error(err.Error())
//return err
}
}()
//case <-ttl:
//break
}
//}
//return nil
}
func (self *MonitorBeat) Receive() {
timeout := time.After(self.timeout)
for {
select {
case <-timeout:
self.fails = self.fails + 1
if self.fails > self.threshold {
return
}
}
}
}
// TODO : no use
func getHostIP() {
addrs, err := net.InterfaceAddrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
fmt.Println(addr.String())
}
}
<file_sep>/storage/redis_store/p2p_status_cache.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis_store
import (
"encoding/json"
"sync"
"time"
"github.com/garyburd/redigo/redis"
)
type P2pStatusCache struct {
RS *RedisStore
rwMutex sync.Mutex
}
func NewP2pStatusCache(RS *RedisStore) *P2pStatusCache {
return &P2pStatusCache{
RS: RS,
}
}
type P2pStatusCacheData struct {
OwnerName string
uuid map[string]string
MaxAge time.Duration
}
func (self *P2pStatusCacheData) Set(uuid string, status string) {
self.uuid[uuid] = status
}
func (self *P2pStatusCacheData) Get(uuid string) string {
return self.uuid[uuid]
}
func (self *P2pStatusCacheData) Clear(uuid string) {
delete(self.uuid, uuid)
}
func NewP2pStatusCacheData(ownerName string) *P2pStatusCacheData {
return &P2pStatusCacheData{
OwnerName: ownerName,
uuid: make(map[string]string),
}
}
// Get the session from the store.
func (self *P2pStatusCache) Get(k string) (*P2pStatusCacheData, error) {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
key := k + P2P_STATUS_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + k + P2P_STATUS_UNIQ_PREFIX
}
b, err := redis.Bytes(self.RS.conn.Do("GET", key))
if err != nil {
return nil, err
}
var sess P2pStatusCacheData
err = json.Unmarshal(b, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
// Save the session into the store.
func (self *P2pStatusCache) Set(sess *P2pStatusCacheData) error {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
b, err := json.Marshal(sess)
if err != nil {
return err
}
key := sess.OwnerName + P2P_STATUS_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + sess.OwnerName + P2P_STATUS_UNIQ_PREFIX
}
ttl := sess.MaxAge
if ttl == 0 {
// Browser session, set to specified TTL
ttl = self.RS.opts.BrowserSessServerTTL
if ttl == 0 {
ttl = 2 * 24 * time.Hour // Default to 2 days
}
}
_, err = self.RS.conn.Do("SETEX", key, int(ttl.Seconds()), b)
if err != nil {
return err
}
return nil
}
// Delete the session from the store.
func (self *P2pStatusCache) Delete(id string) error {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
key := id + P2P_STATUS_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + id + P2P_STATUS_UNIQ_PREFIX
}
_, err := self.RS.conn.Do("DEL", key)
if err != nil {
return err
}
return nil
}
// Clear all sessions from the store. Requires the use of a key
// prefix in the store options, otherwise the method refuses to delete all keys.
func (self *P2pStatusCache) Clear() error {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
vals, err := self.getSessionKeys()
if err != nil {
return err
}
if len(vals) > 0 {
self.RS.conn.Send("MULTI")
for _, v := range vals {
self.RS.conn.Send("DEL", v)
}
_, err = self.RS.conn.Do("EXEC")
if err != nil {
return err
}
}
return nil
}
// Get the number of session keys in the store. Requires the use of a
// key prefix in the store options, otherwise returns -1 (cannot tell
// session keys from other keys).
func (self *P2pStatusCache) Len() int {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
vals, err := self.getSessionKeys()
if err != nil {
return -1
}
return len(vals)
}
func (self *P2pStatusCache) getSessionKeys() ([]interface{}, error) {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
if self.RS.opts.KeyPrefix != "" {
return redis.Values(self.RS.conn.Do("KEYS", self.RS.opts.KeyPrefix+":*"))
}
return nil, ErrNoKeyPrefix
}
func (self *P2pStatusCache) IsKeyExist(k string) (interface{}, error) {
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
key := k + P2P_STATUS_UNIQ_PREFIX
if self.RS.opts.KeyPrefix != "" {
key = self.RS.opts.KeyPrefix + ":" + k + P2P_STATUS_UNIQ_PREFIX
}
v, err := self.RS.conn.Do("EXISTS", key)
if err != nil {
return v, err
}
return v, err
}
<file_sep>/libnet/session.go
package libnet
import (
"bufio"
"container/list"
"net"
"sync/atomic"
"time"
"github.com/oikomi/FishChatServer/libnet/syncs"
)
var dialSessionId uint64
// The easy way to create a connection.
func Dial(network, address string) (*Session, error) {
conn, err := net.Dial(network, address)
if err != nil {
return nil, err
}
id := atomic.AddUint64(&dialSessionId, 1)
session := NewSession(id, conn, DefaultProtocol, DefaultSendChanSize, DefaultConnBufferSize)
return session, nil
}
// The easy way to create a connection with timeout setting.
func DialTimeout(network, address string, timeout time.Duration) (*Session, error) {
conn, err := net.DialTimeout(network, address, timeout)
if err != nil {
return nil, err
}
id := atomic.AddUint64(&dialSessionId, 1)
session := NewSession(id, conn, DefaultProtocol, DefaultSendChanSize, DefaultConnBufferSize)
return session, nil
}
type Decoder func(*InBuffer) error
type Encoder func(*OutBuffer) error
// Session.
type Session struct {
id uint64
// About network
conn net.Conn
protocol ProtocolState
// About send and receive
readMutex syncs.Mutex
sendMutex syncs.Mutex
asyncSendChan chan asyncMessage
asyncSendBufferChan chan asyncBuffer
// About session close
closeChan chan int
closeFlag int32
closeEventMutex syncs.Mutex
closeCallbacks *list.List
// Put your session state here.
State interface{}
}
// Buffered connection.
type bufferConn struct {
net.Conn
reader *bufio.Reader
}
func newBufferConn(conn net.Conn, readBufferSize int) *bufferConn {
return &bufferConn{
conn,
bufio.NewReaderSize(conn, readBufferSize),
}
}
func (conn *bufferConn) Read(d []byte) (int, error) {
return conn.reader.Read(d)
}
// Create a new session instance.
func NewSession(id uint64, conn net.Conn, protocol Protocol, sendChanSize int, readBufferSize int) *Session {
if readBufferSize > 0 {
conn = newBufferConn(conn, readBufferSize)
}
session := &Session{
id: id,
conn: conn,
asyncSendChan: make(chan asyncMessage, sendChanSize),
asyncSendBufferChan: make(chan asyncBuffer, sendChanSize),
closeChan: make(chan int),
closeCallbacks: list.New(),
}
session.protocol = protocol.New(session)
go session.sendLoop()
return session
}
// Get session id.
func (session *Session) Id() uint64 {
return session.id
}
// Get session connection.
func (session *Session) Conn() net.Conn {
return session.conn
}
// Check session is closed or not.
func (session *Session) IsClosed() bool {
return atomic.LoadInt32(&session.closeFlag) != 0
}
// Close session.
func (session *Session) Close() {
if atomic.CompareAndSwapInt32(&session.closeFlag, 0, 1) {
session.conn.Close()
// exit send loop and cancel async send
close(session.closeChan)
session.invokeCloseCallbacks()
}
}
// Sync send a message. Equals Packet() then SendPacket(). This method will block on IO.
func (session *Session) Send(encoder Encoder) error {
var err error
buffer := newOutBuffer()
session.protocol.PrepareOutBuffer(buffer, 1024)
err = encoder(buffer)
if err == nil {
err = session.sendBuffer(buffer)
}
buffer.free()
return err
}
func (session *Session) sendBuffer(buffer *OutBuffer) error {
session.sendMutex.Lock()
defer session.sendMutex.Unlock()
return session.protocol.Write(session.conn, buffer)
}
// Process one request.
func (session *Session) ProcessOnce(decoder Decoder) error {
session.readMutex.Lock()
defer session.readMutex.Unlock()
buffer := newInBuffer()
err := session.protocol.Read(session.conn, buffer)
if err != nil {
buffer.free()
session.Close()
return err
}
err = decoder(buffer)
buffer.free()
return nil
}
// Process request.
func (session *Session) Process(decoder Decoder) error {
for {
if err := session.ProcessOnce(decoder); err != nil {
return err
}
}
return nil
}
// Async work.
type AsyncWork struct {
c <-chan error
}
// Wait work done. Returns error when work failed.
func (aw AsyncWork) Wait() error {
return <-aw.c
}
type asyncMessage struct {
C chan<- error
E Encoder
}
type asyncBuffer struct {
C chan<- error
B *OutBuffer
}
// Loop and transport responses.
func (session *Session) sendLoop() {
for {
select {
case buffer := <-session.asyncSendBufferChan:
buffer.C <- session.sendBuffer(buffer.B)
buffer.B.broadcastFree()
case message := <-session.asyncSendChan:
message.C <- session.Send(message.E)
case <-session.closeChan:
return
}
}
}
// Async send a message.
func (session *Session) AsyncSend(encoder Encoder) AsyncWork {
c := make(chan error, 1)
if session.IsClosed() {
c <- SendToClosedError
} else {
select {
case session.asyncSendChan <- asyncMessage{c, encoder}:
default:
go func() {
select {
case session.asyncSendChan <- asyncMessage{c, encoder}:
case <-session.closeChan:
c <- SendToClosedError
case <-time.After(time.Second * 5):
session.Close()
c <- AsyncSendTimeoutError
}
}()
}
}
return AsyncWork{c}
}
// Async send a packet.
func (session *Session) asyncSendBuffer(buffer *OutBuffer) AsyncWork {
c := make(chan error, 1)
if session.IsClosed() {
c <- SendToClosedError
} else {
select {
case session.asyncSendBufferChan <- asyncBuffer{c, buffer}:
default:
go func() {
select {
case session.asyncSendBufferChan <- asyncBuffer{c, buffer}:
case <-session.closeChan:
c <- SendToClosedError
case <-time.After(time.Second * 5):
session.Close()
c <- AsyncSendTimeoutError
}
}()
}
}
return AsyncWork{c}
}
type closeCallback struct {
Handler interface{}
Func func()
}
// Add close callback.
func (session *Session) AddCloseCallback(handler interface{}, callback func()) {
if session.IsClosed() {
return
}
session.closeEventMutex.Lock()
defer session.closeEventMutex.Unlock()
session.closeCallbacks.PushBack(closeCallback{handler, callback})
}
// Remove close callback.
func (session *Session) RemoveCloseCallback(handler interface{}) {
if session.IsClosed() {
return
}
session.closeEventMutex.Lock()
defer session.closeEventMutex.Unlock()
for i := session.closeCallbacks.Front(); i != nil; i = i.Next() {
if i.Value.(closeCallback).Handler == handler {
session.closeCallbacks.Remove(i)
return
}
}
}
// Dispatch close event.
func (session *Session) invokeCloseCallbacks() {
session.closeEventMutex.Lock()
defer session.closeEventMutex.Unlock()
for i := session.closeCallbacks.Front(); i != nil; i = i.Next() {
callback := i.Value.(closeCallback)
callback.Func()
}
}
<file_sep>/msg_server/proto_proc.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"strconv"
"github.com/oikomi/FishChatServer/base"
"github.com/oikomi/FishChatServer/common"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
"github.com/oikomi/FishChatServer/storage/mongo_store"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
type ProtoProc struct {
msgServer *MsgServer
}
func NewProtoProc(msgServer *MsgServer) *ProtoProc {
return &ProtoProc{
msgServer: msgServer,
}
}
func (self *ProtoProc) procSubscribeChannel(cmd protocol.Cmd, session *libnet.Session) {
log.Info("procSubscribeChannel")
channelName := cmd.GetArgs()[0]
cUUID := cmd.GetArgs()[1]
log.Info(channelName)
if self.msgServer.channels[channelName] != nil {
self.msgServer.channels[channelName].Channel.Join(session, nil)
self.msgServer.channels[channelName].ClientIDlist = append(self.msgServer.channels[channelName].ClientIDlist, cUUID)
} else {
log.Warning(channelName + " is not exist")
}
log.Info(self.msgServer.channels)
}
func (self *ProtoProc) procPing(cmd protocol.Cmd, session *libnet.Session) error {
//log.Info("procPing")
//cid := session.State.(*base.SessionState).ClientID
if session.State != nil {
self.msgServer.scanSessionMutex.Lock()
defer self.msgServer.scanSessionMutex.Unlock()
//self.msgServer.sessions[cid].State.(*base.SessionState).Alive = true
session.State.(*base.SessionState).Alive = true
}
return nil
}
/*
发送给消息接受者的消息
MsgServer -> device/client
REQ_SEND_P2P_MSG_CMD
arg0: Msg //消息内容
arg1: FromID //发送方用户ID
arg2: uuid //MsgServer分配的消息uuid,可选,如果提供了则须IND_ACK_P2P_STATUS_CMD(ClientID, uuid)
*/
func (self *ProtoProc) procOfflineMsg(session *libnet.Session, ID string) error {
var err error
exist, err := self.msgServer.offlineMsgCache.IsKeyExist(ID)
if exist.(int64) == 0 {
return err
} else {
omrd, err := common.GetOfflineMsgFromOwnerName(self.msgServer.offlineMsgCache, ID)
if err != nil {
log.Error(err.Error())
return err
}
for _, v := range omrd.MsgList {
if v.Msg == protocol.IND_ACK_P2P_STATUS_CMD {
resp := protocol.NewCmdSimple(protocol.IND_ACK_P2P_STATUS_CMD)
resp.AddArg(v.Uuid)
resp.AddArg(v.FromID) // v.FromID is status
if self.msgServer.sessions[ID] != nil {
self.msgServer.sessions[ID].Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
return err
}
}
} else {
resp := protocol.NewCmdSimple(protocol.IND_SEND_P2P_MSG_CMD)
resp.AddArg(v.Msg)
resp.AddArg(v.FromID)
resp.AddArg(v.Uuid)
if self.msgServer.sessions[ID] != nil {
self.msgServer.sessions[ID].Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
return err
} else {
self.procP2PAckStatus(v.FromID, v.Uuid, protocol.P2P_ACK_SENT)
}
}
}
}
omrd.ClearMsg()
self.msgServer.offlineMsgCache.Set(omrd)
}
return err
}
/*
device/client -> MsgServer
REQ_LOGIN_CMD
arg0: ClientID //用户ID
arg1: uuid
MsgServer -> device/client
RSP_LOGIN_CMD
arg0: SUCCESS/ERROR
*/
func (self *ProtoProc) procLogin(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procLogin")
var err error
ClientID := cmd.GetArgs()[0]
uuid := cmd.GetArgs()[1]
resp := protocol.NewCmdSimple(protocol.RSP_LOGIN_CMD)
// for cache data
sessionCacheData, err := self.msgServer.sessionCache.Get(ClientID)
if err != nil {
log.Warningf("no ID : %s", ClientID)
} else if sessionCacheData.ID != uuid {
log.Warningf("ID(%s) & uuid(%s) not matched", ClientID, uuid)
err = common.NOT_LOGIN
}
if err == nil {
resp.AddArg(protocol.RSP_SUCCESS)
} else {
resp.AddArg(protocol.RSP_ERROR)
}
err2 := session.Send(libnet.Json(resp))
if err2 != nil {
log.Error(err2.Error())
return err2
}
if err != nil {
return err
}
// update the session cache
sessionCacheData.ClientAddr = session.Conn().RemoteAddr().String()
sessionCacheData.MsgServerAddr = self.msgServer.cfg.LocalIP
sessionCacheData.Alive = true
self.msgServer.sessionCache.Set(sessionCacheData)
log.Info(sessionCacheData)
self.msgServer.procOffline(ClientID)
self.msgServer.procOnline(ClientID)
/*
args := make([]string, 0)
args = append(args, cmd.GetArgs()[0])
CCmd := protocol.NewCmdInternal(protocol.CACHE_SESSION_CMD, args, sessionCacheData)
log.Info(CCmd)
if self.msgServer.channels[protocol.SYSCTRL_CLIENT_STATUS] != nil {
_, err = self.msgServer.channels[protocol.SYSCTRL_CLIENT_STATUS].Channel.Broadcast(libnet.Json(CCmd))
if err != nil {
log.Error(err.Error())
return err
}
}
// for store data
sessionStoreData := mongo_store.SessionStoreData{ID, session.Conn().RemoteAddr().String(),
self.msgServer.cfg.LocalIP, true}
log.Info(sessionStoreData)
args = make([]string, 0)
args = append(args, cmd.GetArgs()[0])
CCmd = protocol.NewCmdInternal(protocol.STORE_SESSION_CMD, args, sessionStoreData)
log.Info(CCmd)
if self.msgServer.channels[protocol.STORE_CLIENT_INFO] != nil {
_, err = self.msgServer.channels[protocol.STORE_CLIENT_INFO].Channel.Broadcast(libnet.Json(CCmd))
if err != nil {
log.Error(err.Error())
return err
}
}
*/
self.msgServer.sessions[ClientID] = session
self.msgServer.sessions[ClientID].State = base.NewSessionState(true, ClientID, sessionCacheData.ClientType)
err = self.procOfflineMsg(session, ClientID)
if err != nil {
log.Error(err.Error())
return err
}
return nil
}
/*
device/client -> MsgServer
REQ_LOGOUT_CMD
MsgServer -> device/client
RSP_LOGOUT_CMD
arg0: SUCCESS/ERROR
*/
func (self *ProtoProc) procLogout(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procLogout")
var err error
ClientID := session.State.(*base.SessionState).ClientID
resp := protocol.NewCmdSimple(protocol.RSP_LOGOUT_CMD)
resp.AddArg(protocol.RSP_SUCCESS)
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
self.msgServer.procOffline(ClientID)
self.msgServer.sessionCache.Delete(ClientID)
return err
}
/*
IND_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
*/
func (self *ProtoProc) procP2PAckStatus(fromID string, uuid string, status string) error {
log.Info("procP2PAckStatus")
//var err error
p2psd, err := self.msgServer.p2pStatusCache.Get(fromID)
if p2psd == nil {
p2psd = redis_store.NewP2pStatusCacheData(fromID)
}
p2psd.Set(uuid, status)
if status == protocol.P2P_ACK_FALSE {
return nil
}
sessionCacheData, err := self.msgServer.sessionCache.Get(fromID)
if sessionCacheData == nil {
log.Warningf("no cache ID : %s, err: %s", fromID, err.Error())
sessionStoreData, err := self.msgServer.mongoStore.GetSessionFromCid(fromID)
if sessionStoreData == nil {
// not registered
log.Warningf("no store ID : %s, err: %s", fromID, err.Error())
self.msgServer.p2pStatusCache.Delete(fromID)
return err
}
}
if sessionCacheData == nil || sessionCacheData.Alive == false {
// offline
log.Info(fromID + " | is offline")
omrd, err := common.GetOfflineMsgFromOwnerName(self.msgServer.offlineMsgCache, fromID)
log.Info(omrd)
if err != nil {
log.Error(err.Error())
return err
}
if omrd == nil {
omrd = redis_store.NewOfflineMsgCacheData(fromID)
}
omrd.AddMsg(redis_store.NewOfflineMsgData(protocol.IND_ACK_P2P_STATUS_CMD /*fromID*/, status, uuid))
err = self.msgServer.offlineMsgCache.Set(omrd)
if err != nil {
log.Error(err.Error())
return err
}
} else {
// online
resp := protocol.NewCmdSimple(protocol.IND_ACK_P2P_STATUS_CMD)
resp.AddArg(uuid)
resp.AddArg(status)
log.Info(fromID + " | is online")
if sessionCacheData.MsgServerAddr == self.msgServer.cfg.LocalIP {
log.Info("in the same server")
if self.msgServer.sessions[fromID] != nil {
self.msgServer.sessions[fromID].Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
return err
}
}
} else {
log.Info("not in the same server")
if self.msgServer.channels[protocol.SYSCTRL_SEND] != nil {
resp.AddArg(fromID)
_, err = self.msgServer.channels[protocol.SYSCTRL_SEND].Channel.Broadcast(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
return err
}
}
}
}
return nil
}
/*
device/client -> MsgServer -> Router
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
返回给消息发送者的消息
MsgServer -> device/client
RSP_SEND_P2P_MSG_CMD
arg0: SUCCESS/FAILED
arg1: uuid // MsgServer分配的消息uuid,发送方根据此uuid确定该消息状态
IND_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
通过Router转发消息(对终端开发者不可见)
MsgServer -> Router
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
Router -> MsgServer
ROUTE_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
发送给消息接受者的消息
MsgServer -> device/client
REQ_SEND_P2P_MSG_CMD
arg0: Msg //消息内容
arg1: FromID //发送方用户ID
arg2: uuid //MsgServer分配的消息uuid,可选,如果提供了则须IND_ACK_P2P_STATUS_CMD(ClientID, uuid)
*/
func (self *ProtoProc) procSendMessageP2P(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procSendMessageP2P")
var err error
var sessionCacheData *redis_store.SessionCacheData
var sessionStoreData *mongo_store.SessionStoreData
var uuid string
var send2ID string
var send2Msg string
fromID := session.State.(*base.SessionState).ClientID
resp := protocol.NewCmdSimple(protocol.RSP_SEND_P2P_MSG_CMD)
if len(cmd.GetArgs()) != 2 {
log.Warningf("syntax error: (id,msg) needed")
err = common.SYNTAX_ERROR
goto errout
}
send2ID = cmd.GetArgs()[0]
send2Msg = cmd.GetArgs()[1]
sessionCacheData, err = self.msgServer.sessionCache.Get(send2ID)
if sessionCacheData == nil {
sessionStoreData, err = self.msgServer.mongoStore.GetSessionFromCid(send2ID)
if sessionStoreData == nil {
log.Warningf("send2ID %s not found", send2ID)
err = common.NOTFOUNT
goto errout
}
}
uuid = common.NewV4().String()
log.Info("uuid : ", uuid)
self.procP2PAckStatus(fromID, uuid, protocol.P2P_ACK_FALSE)
if sessionCacheData == nil || sessionCacheData.Alive == false {
//offline
log.Info("procSendMessageP2P: " + send2ID + " | is offline")
omrd, err := self.msgServer.offlineMsgCache.Get(send2ID)
log.Info(omrd)
if err != nil {
log.Error(err.Error())
}
if omrd == nil {
omrd = redis_store.NewOfflineMsgCacheData(send2ID)
}
omrd.AddMsg(redis_store.NewOfflineMsgData(send2Msg, fromID, uuid))
err = self.msgServer.offlineMsgCache.Set(omrd)
if err != nil {
log.Error(err.Error())
goto errout
}
} else if sessionCacheData.MsgServerAddr == self.msgServer.cfg.LocalIP {
log.Info("procSendMessageP2P: in the same server")
req := protocol.NewCmdSimple(protocol.IND_SEND_P2P_MSG_CMD)
req.AddArg(send2Msg)
req.AddArg(fromID)
// add uuid
req.AddArg(uuid)
if self.msgServer.sessions[send2ID] != nil {
self.msgServer.sessions[send2ID].Send(libnet.Json(req))
if err != nil {
log.Error(err.Error())
goto errout
}
self.procP2PAckStatus(fromID, uuid, protocol.P2P_ACK_SENT)
}
} else {
log.Info("procSendMessageP2P: not in the same server")
if self.msgServer.channels[protocol.SYSCTRL_SEND] != nil {
cmd.AddArg(fromID)
//add uuid
cmd.AddArg(uuid)
_, err = self.msgServer.channels[protocol.SYSCTRL_SEND].Channel.Broadcast(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
goto errout
}
//self.procP2PAckStatus(fromID, uuid, protocol.P2P_ACK_SENT)
}
}
errout:
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
resp.AddArg(uuid)
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
Router -> MsgServer
ROUTE_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
发送给消息接受者的消息
MsgServer -> device/client
REQ_SEND_P2P_MSG_CMD
arg0: Msg //消息内容
arg1: FromID //发送方用户ID
arg2: uuid //MsgServer分配的消息uuid,可选,如果提供了则须IND_ACK_P2P_STATUS_CMD(ClientID, uuid)
*/
func (self *ProtoProc) procRouteMessageP2P(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procRouteMessageP2P")
var err error
send2ID := cmd.GetArgs()[0]
send2Msg := cmd.GetArgs()[1]
fromID := cmd.GetArgs()[2]
uuid := cmd.GetArgs()[3]
if self.msgServer.sessions[send2ID] != nil {
resp := protocol.NewCmdSimple(protocol.IND_SEND_P2P_MSG_CMD)
resp.AddArg(send2Msg)
resp.AddArg(fromID)
// add uuid
resp.AddArg(uuid)
err = self.msgServer.sessions[send2ID].Send(libnet.Json(resp))
if err != nil {
log.Fatalln(err.Error())
} else {
self.procP2PAckStatus(fromID, uuid, protocol.P2P_ACK_SENT)
}
}
return nil
}
/*
REQ_CREATE_TOPIC_CMD
arg0: TopicName //群组名
arg1: ClientName //用户在Topic中的Name, 比如老爸/老妈
*/
func (self *ProtoProc) procCreateTopic(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procCreateTopic")
var err error
topicName := cmd.GetArgs()[0]
ClientName := cmd.GetArgs()[1]
ClientID := session.State.(*base.SessionState).ClientID
ClientType := session.State.(*base.SessionState).ClientType
resp := protocol.NewCmdSimple(protocol.RSP_CREATE_TOPIC_CMD)
if len(cmd.GetArgs()) != 2 {
err = common.SYNTAX_ERROR
} else
// only DEV_TYPE_CLIENT CAN create topic
if ClientType != protocol.DEV_TYPE_CLIENT {
err = common.DENY_ACCESS
} else {
// check whether the topic exist
topicCacheData, _ := self.msgServer.topicCache.Get(topicName)
if topicCacheData != nil {
log.Infof("TOPIC %s exist in CACHE", topicName)
err = common.TOPIC_EXIST
} else {
log.Infof("TOPIC %s not exist in CACHE", topicName)
topicStoreData, _ := self.msgServer.mongoStore.GetTopicFromCid(topicName)
if topicStoreData != nil {
log.Infof("TOPIC %s exist in STORE", topicName)
err = common.TOPIC_EXIST
} else {
log.Infof("TOPIC %s not exist in STORE", topicName)
// create the topic store
log.Infof("Create topic %s in STORE", topicName)
topicStoreData = mongo_store.NewTopicStoreData(topicName, ClientID)
err = self.msgServer.mongoStore.Set(topicStoreData)
if err != nil {
log.Error(err.Error())
goto ErrOut
}
log.Infof("topic %s created in STORE", topicName)
// create the topic cache
log.Infof("Create topic %s in CACHE", topicName)
topicCacheData = redis_store.NewTopicCacheData(topicStoreData)
err = self.msgServer.topicCache.Set(topicCacheData)
if err != nil {
log.Error(err.Error())
goto ErrOut
}
log.Infof("topic %s created in CACHE", topicName)
member := mongo_store.NewMember(ClientID, ClientName, ClientType)
err = self.msgServer.procJoinTopic(member, topicName)
}
}
}
ErrOut:
if err != nil {
resp.AddArg(err.Error())
resp.AddArg(topicName)
} else {
resp.AddArg(protocol.RSP_SUCCESS)
resp.AddArg(topicName)
resp.AddArg(ClientName)
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
/*
topicCacheData := redis_store.NewTopicCacheData(topicName, session.State.(*base.SessionState).ClientID,
self.msgServer.cfg.LocalIP)
t := protocol.NewTopic(topicName, self.msgServer.cfg.LocalIP, session.State.(*base.SessionState).ClientID, session)
t.ClientIDList = append(t.ClientIDList, session.State.(*base.SessionState).ClientID)
t.TSD = topicCacheData
self.msgServer.topics[topicName] = t
self.msgServer.topics[topicName].Channel = libnet.NewChannel(self.msgServer.server.Protocol())
self.msgServer.topics[topicName].Channel.Join(session, nil)
log.Info(topicCacheData)
args := make([]string, 0)
args = append(args, topicName)
CCmd := protocol.NewCmdInternal(protocol.CACHE_TOPIC_CMD, args, topicCacheData)
m := redis_store.NewMember(session.State.(*base.SessionState).ClientID)
CCmd.AnyData.(*redis_store.TopicCacheData).MemberList = append(CCmd.AnyData.(*redis_store.TopicCacheData).MemberList, m)
log.Info(CCmd)
if self.msgServer.channels[protocol.SYSCTRL_TOPIC_STATUS] != nil {
_, err = self.msgServer.channels[protocol.SYSCTRL_TOPIC_STATUS].Channel.Broadcast(libnet.Json(CCmd))
if err != nil {
log.Error(err.Error())
return err
}
}
// store topic
topicStoreData := mongo_store.NewTopicStoreData(topicName, session.State.(*base.SessionState).ClientID,
self.msgServer.cfg.LocalIP)
args = make([]string, 0)
args = append(args, topicName)
CCmd = protocol.NewCmdInternal(protocol.STORE_TOPIC_CMD, args, topicStoreData)
member := mongo_store.NewMember(session.State.(*base.SessionState).ClientID)
CCmd.AnyData.(*mongo_store.TopicStoreData).MemberList = append(CCmd.AnyData.(*mongo_store.TopicStoreData).MemberList, member)
log.Info(CCmd)
if self.msgServer.channels[protocol.STORE_TOPIC_INFO] != nil {
_, err = self.msgServer.channels[protocol.STORE_TOPIC_INFO].Channel.Broadcast(libnet.Json(CCmd))
if err != nil {
log.Error(err.Error())
return err
}
}
*/
return nil
}
/*
client -> MsgServer
REQ_ADD_2_TOPIC_CMD
arg0: TopicName //群组名
arg1: NewClientID //用户ID
arg2: NewClientName //用户在Topic中的Name, 对于device, 可以是儿子/女儿
MsgServer -> client
RSP_ADD_2_TOPIC_CMD
arg0: SUCCESS/FAILED
arg1: TopicName
arg2: ClientID
arg3: ClientType
*/
func (self *ProtoProc) procAdd2Topic(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procAdd2Topic")
var err error
topicName := cmd.GetArgs()[0]
mID := cmd.GetArgs()[1]
mName := cmd.GetArgs()[2]
ClientID := session.State.(*base.SessionState).ClientID
ClientType := session.State.(*base.SessionState).ClientType
mType := protocol.DEV_TYPE_CLIENT
resp := protocol.NewCmdSimple(protocol.RSP_ADD_2_TOPIC_CMD)
if len(cmd.GetArgs()) != 3 {
err = common.SYNTAX_ERROR
} else
// only DEV_TYPE_CLIENT CAN create topic
if ClientType != protocol.DEV_TYPE_CLIENT {
err = common.DENY_ACCESS
} else {
// check whether the topic exist
topicCacheData, _ := self.msgServer.topicCache.Get(topicName)
if topicCacheData == nil {
log.Infof("TOPIC %s not exist in CACHE", topicName)
err = common.TOPIC_NOT_EXIST
} else
// only topic creater can do this
if topicCacheData.CreaterID != ClientID {
log.Warningf("ClientID %s is not creater of topic %s", ClientID, topicName)
err = common.DENY_ACCESS
} else {
// New Member MUST be online
sessionCacheData, _ := self.msgServer.sessionCache.Get(mID)
if sessionCacheData == nil {
log.Warningf("Client %s not online", mID)
err = common.NOT_ONLINE
} else {
mType = sessionCacheData.ClientType
member := mongo_store.NewMember(mID, mName, mType)
err = self.msgServer.procJoinTopic(member, topicName)
}
}
}
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
}
resp.AddArg(topicName)
resp.AddArg(mID)
resp.AddArg(mType)
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
client -> MsgServer
REQ_KICK_TOPIC_CMD
arg0: TopicName //群组名
arg1: NewClientID //待移除的成员用户ID
MsgServer -> client
RSP_KICK_TOPIC_CMD
arg0: SUCCESS/FAILED
*/
func (self *ProtoProc) procKickTopic(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procKickTopic")
var err error
var topicCacheData *redis_store.TopicCacheData
topicName := cmd.GetArgs()[0]
mID := cmd.GetArgs()[1]
ClientID := session.State.(*base.SessionState).ClientID
ClientType := session.State.(*base.SessionState).ClientType
resp := protocol.NewCmdSimple(protocol.RSP_KICK_TOPIC_CMD)
if len(cmd.GetArgs()) != 2 {
err = common.SYNTAX_ERROR
goto ErrOut
}
// only DEV_TYPE_CLIENT CAN do this
if ClientType != protocol.DEV_TYPE_CLIENT {
err = common.DENY_ACCESS
goto ErrOut
}
// check whether the topic exist
topicCacheData, err = self.msgServer.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist", topicName)
err = common.TOPIC_NOT_EXIST
} else
// only topic creater can do this
if topicCacheData.CreaterID != ClientID {
log.Warningf("ClientID %s is not creater of topic %s", ClientID, topicName)
err = common.DENY_ACCESS
} else {
err = self.msgServer.procQuitTopic(mID, topicName)
}
ErrOut:
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
client -> MsgServer
REQ_JOIN_TOPIC_CMD
arg0: TopicName //群组名
arg1: ClientName //用户在Topic中的Name, 比如老爸/老妈
MsgServer -> client
RSP_JOIN_TOPIC_CMD
arg0: SUCCESS/FAILED
*/
func (self *ProtoProc) procJoinTopic(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procJoinTopic")
var err error
if len(cmd.GetArgs()) != 2 {
err = common.SYNTAX_ERROR
} else {
topicName := cmd.GetArgs()[0]
clientName := cmd.GetArgs()[1]
clientID := session.State.(*base.SessionState).ClientID
clientType := session.State.(*base.SessionState).ClientType
member := mongo_store.NewMember(clientID, clientName, clientType)
err = self.msgServer.procJoinTopic(member, topicName)
}
resp := protocol.NewCmdSimple(protocol.RSP_JOIN_TOPIC_CMD)
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
client -> MsgServer
REQ_QUIT_TOPIC_CMD
arg0: TopicName //群组名
MsgServer -> client
RSP_QUIT_TOPIC_CMD
arg0: SUCCESS/ERROR
*/
func (self *ProtoProc) procQuitTopic(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procQuitTopic")
var err error
if len(cmd.GetArgs()) != 1 {
err = common.SYNTAX_ERROR
} else {
topicName := cmd.GetArgs()[0]
clientID := session.State.(*base.SessionState).ClientID
err = self.msgServer.procQuitTopic(clientID, topicName)
}
resp := protocol.NewCmdSimple(protocol.RSP_QUIT_TOPIC_CMD)
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
device/client -> MsgServer -> Router
REQ_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名, device无须提供
返回给消息发送者的消息
MsgServer -> device/client
RSP_SEND_TOPIC_MSG_CMD
arg0: SUCCESS/FAILED
通过Router转发消息(对终端开发者不可见)
Router -> MsgServer
ROUTE_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
发送给消息接受者的消息
MsgServer -> device/client
REQ_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
*/
func (self *ProtoProc) procSendTopicMsg(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procSendTopicMsg")
var err error
var topicName string
var topicCacheData *redis_store.TopicCacheData
var sessionCacheData *redis_store.SessionCacheData
//var sessionStoreData *mongo_store.SessionStoreData
msg := cmd.GetArgs()[0]
resp := protocol.NewCmdSimple(protocol.RSP_SEND_TOPIC_MSG_CMD)
ClientID := session.State.(*base.SessionState).ClientID
ClientType := session.State.(*base.SessionState).ClientType
if ClientType == protocol.DEV_TYPE_CLIENT {
if len(cmd.GetArgs()) != 2 {
err = common.SYNTAX_ERROR
goto ErrOut
}
} else if len(cmd.GetArgs()) != 1 {
err = common.SYNTAX_ERROR
goto ErrOut
}
// get session cache
sessionCacheData, err = self.msgServer.sessionCache.Get(ClientID)
if sessionCacheData == nil {
log.Errorf("ID %s cache missing", ClientID)
err = common.NOT_ONLINE
goto ErrOut
} else if ClientType == protocol.DEV_TYPE_WATCH {
topicName = sessionCacheData.GetTopics()[0]
} else {
topicName = cmd.GetArgs()[1]
}
// check whether the topic exist
topicCacheData, err = self.msgServer.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist", topicName)
err = common.TOPIC_NOT_EXIST
} else {
topic_msg_resp := protocol.NewCmdSimple(cmd.GetCmdName())
topic_msg_resp.AddArg(msg)
topic_msg_resp.AddArg(topicName)
topic_msg_resp.AddArg(ClientID)
topic_msg_resp.AddArg(ClientType)
if topicCacheData.AliveMemberNumMap[self.msgServer.cfg.LocalIP] > 1 {
// exactly in this server, just broadcasting
topic_msg_resp.ChangeCmdName(protocol.IND_SEND_TOPIC_MSG_CMD)
log.Warningf("topic %s has %d member(s) in this server", topicName, topicCacheData.AliveMemberNumMap[self.msgServer.cfg.LocalIP])
for _, mID := range topicCacheData.MemberList {
if mID.ID != ClientID && self.msgServer.sessions[mID.ID] != nil {
self.msgServer.sessions[mID.ID].Send(libnet.Json(topic_msg_resp))
if err != nil {
log.Fatalln(err.Error())
}
}
}
}
if self.msgServer.channels[protocol.SYSCTRL_SEND] != nil {
//topic_msg_resp.ChangeCmdName(protocol.ROUTE_SEND_TOPIC_MSG_CMD)
topic_msg_resp.ChangeCmdName(protocol.REQ_SEND_TOPIC_MSG_CMD)
for ip, num := range topicCacheData.AliveMemberNumMap {
if num > 0 {
log.Warningf("topic %s has %d member(s) in ip %s", topicName, num, ip)
if ip != self.msgServer.cfg.LocalIP {
// not in this server, routing it
_, err = self.msgServer.channels[protocol.SYSCTRL_SEND].Channel.Broadcast(libnet.Json(topic_msg_resp))
if err != nil {
log.Error(err.Error())
}
break
}
}
}
}
}
ErrOut:
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
通过Router转发消息(对终端开发者不可见)
Router -> MsgServer
ROUTE_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
*/
func (self *ProtoProc) procRouteTopicMsg(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procRouteTopicMsg")
var err error
//send2Msg := cmd.GetArgs()[0]
topicName := cmd.GetArgs()[1]
//fromID := cmd.GetArgs()[2]
//fromType := cmd.GetArgs()[3]
// check whether the topic exist
topicCacheData, err := self.msgServer.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist: %s", topicName, err.Error())
return common.TOPIC_NOT_EXIST
}
cmd.ChangeCmdName(protocol.IND_SEND_TOPIC_MSG_CMD)
// exactly in this server, just broadcasting
log.Warningf("topic %s has %d member(s) in this server", topicName, topicCacheData.AliveMemberNumMap[self.msgServer.cfg.LocalIP])
for _, mID := range topicCacheData.MemberList {
if self.msgServer.sessions[mID.ID] != nil {
self.msgServer.sessions[mID.ID].Send(libnet.Json(cmd))
if err != nil {
log.Fatalln(err.Error())
}
}
}
return nil
}
/*
device/client -> MsgServer
REQ_GET_TOPIC_LIST_CMD
MsgServer -> device/client
RSP_GET_TOPIC_LIST_CMD
arg0: SUCCESS/ERROR
arg1: TopicNum // topic数目,后面跟随该数目的TopicName
arg2: TopicName1
arg3: TopicName2
arg4: TopicName3
*/
func (self *ProtoProc) procGetTopicList(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procGetTopicList")
var err error
resp := protocol.NewCmdSimple(protocol.RSP_GET_TOPIC_LIST_CMD)
clientID := session.State.(*base.SessionState).ClientID
//clientType := session.State.(*base.SessionState).ClientType
sessionCacheData, err := self.msgServer.sessionCache.Get(clientID)
if sessionCacheData == nil {
log.Warningf("Client %s not online", clientID)
err = common.NOT_ONLINE
goto ErrOut
}
ErrOut:
if err != nil {
resp.AddArg(err.Error())
} else {
resp.AddArg(protocol.RSP_SUCCESS)
resp.AddArg(strconv.Itoa(len(sessionCacheData.TopicList)))
for _, topic := range sessionCacheData.TopicList {
resp.AddArg(topic)
}
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
/*
device/client -> MsgServer
REQ_GET_TOPIC_MEMBER_CMD
arg0: TopicName
如果ClientID不是TopicName的成员,则返回失败
返回给消息发送者的消息
MsgServer -> device/client
RSP_GET_TOPIC_MEMBER_CMD
arg0: SUCCESS/FAILED
arg1: MemberNum // topic member数目,后面跟随该数目的member
arg2: Member1
arg3: Member2
arg4: Member3
*/
func (self *ProtoProc) procGetTopicProfile(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procGetTopicProfile")
var err error
var topicCacheData *redis_store.TopicCacheData
topicName := "<N/A>"
resp := protocol.NewCmdSimple(protocol.RSP_GET_TOPIC_PROFILE_CMD)
if len(cmd.GetArgs()) != 1 {
err = common.SYNTAX_ERROR
} else {
topicName = cmd.GetArgs()[0]
clientID := session.State.(*base.SessionState).ClientID
//clientType := session.State.(*base.SessionState).ClientType
// check whether the topic exist
topicCacheData, err = self.msgServer.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist", topicName)
err = common.TOPIC_NOT_EXIST
} else if topicCacheData.MemberExist(clientID) == false {
log.Warningf("%s not the member of topic %d", clientID, topicName)
err = common.DENY_ACCESS
}
}
if err != nil {
resp.AddArg(err.Error())
resp.AddArg(topicName)
} else {
resp.AddArg(protocol.RSP_SUCCESS)
resp.AddArg(topicName)
resp.AddArg(topicCacheData.CreaterID)
for _, member := range topicCacheData.MemberList {
resp.AddArg(member.ID)
resp.AddArg(member.Name)
resp.AddArg(member.Type)
}
}
err = session.Send(libnet.Json(resp))
if err != nil {
log.Error(err.Error())
}
return err
}
// not a good idea
func (self *ProtoProc) procP2pAck(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procP2pAck")
var err error
if len(cmd.GetArgs()) != 3 {
log.Error("procP2pAck: syntax error")
return common.SYNTAX_ERROR
}
uuid := cmd.GetArgs()[0]
status := cmd.GetArgs()[1]
fromeID := cmd.GetArgs()[2]
err = self.procP2PAckStatus(fromeID, uuid, status)
return err
}
<file_sep>/protocol/cmd.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocol
const (
DEV_TYPE_WATCH = "D"
DEV_TYPE_CLIENT = "C"
)
// status of p2p msg
const (
P2P_ACK_FALSE = "FALSE" // msg server received
P2P_ACK_SENT = "SENT" // sent
P2P_ACK_REACHED = "REACHED" // msg reach the peer(Send2ID)
P2P_ACK_READ = "READ" // receiver read this msg
)
const (
RSP_SUCCESS = "SUCCESS"
RSP_ERROR = "ERROR"
)
const (
//SEND_PING
SEND_PING_CMD = "PING"
REQ_LOGIN_CMD = "REQ_LOGIN"
RSP_LOGIN_CMD = "RSP_LOGIN"
/*
device/client -> gateway
REQ_LOGIN_CMD
arg0: ClientID //用户ID
arg1: ClientType //终端类型"C" or "D",是client还是device
arg2: ClientPwd //nil for Device/password for Client
gateway -> device/client
RSP_LOGIN_CMD
arg0: SUCCESS/ERROR
arg1: uuid
arg2: MsgServerAddr
device/client -> MsgServer
REQ_LOGIN_CMD
arg0: ClientID //用户ID
arg1: uuid
MsgServer -> device/client
RSP_LOGIN_CMD
arg0: SUCCESS/ERROR
*/
REQ_LOGOUT_CMD = "REQ_LOGOUT"
RSP_LOGOUT_CMD = "RSP_LOGOUT"
/*
device/client -> MsgServer
REQ_LOGOUT_CMD
MsgServer -> device/client
RSP_LOGOUT_CMD
arg0: SUCCESS/ERROR
*/
REQ_SEND_P2P_MSG_CMD = "REQ_SEND_P2P_MSG"
IND_SEND_P2P_MSG_CMD = "IND_SEND_P2P_MSG"
RSP_SEND_P2P_MSG_CMD = "RSP_SEND_P2P_MSG"
ROUTE_SEND_P2P_MSG_CMD = "ROUTE_SEND_P2P_MSG"
IND_ACK_P2P_STATUS_CMD = "IND_ACK_P2P_STATUS"
ROUTE_ACK_P2P_STATUS_CMD = "ROUTE_ACK_P2P_STATUS"
/*
device/client -> MsgServer
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
IND_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
arg2: fromID //发送方用户ID
返回给消息发送者的消息
MsgServer -> device/client
RSP_SEND_P2P_MSG_CMD
arg0: SUCCESS/FAILED
arg1: uuid // MsgServer分配的消息uuid,发送方根据此uuid确定该消息状态
IND_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
通过Router转发消息(对终端开发者不可见)
MsgServer -> Router
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
IND_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
arg2: fromID //发送方用户ID
Router -> MsgServer
ROUTE_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
ROUTE_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
发送给消息接受者的消息
MsgServer -> device/client
IND_SEND_P2P_MSG_CMD
arg0: Msg //消息内容
arg1: FromID //发送方用户ID
arg2: uuid //MsgServer分配的消息uuid,可选,如果提供了则须IND_ACK_P2P_MSG_CMD(ClientID, uuid)
IND_ACK_P2P_STATUS_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
*/
REQ_CREATE_TOPIC_CMD = "REQ_CREATE_TOPIC"
RSP_CREATE_TOPIC_CMD = "RSP_CREATE_TOPIC"
/*
client -> MsgServer
REQ_CREATE_TOPIC_CMD
arg0: TopicName //群组名
arg1: ClientName //用户在Topic中的Name, 比如老爸/老妈
MsgServer -> client
RSP_CREATE_TOPIC_CMD
arg0: SUCCESS/ERROR
*/
REQ_ADD_2_TOPIC_CMD = "REQ_ADD_2_TOPIC"
RSP_ADD_2_TOPIC_CMD = "RSP_ADD_2_TOPIC"
/*
client -> MsgServer
REQ_ADD_2_TOPIC_CMD
arg0: TopicName //群组名
arg1: NewClientID //用户ID
arg2: NewClientName //用户在Topic中的Name, 对于device, 可以是儿子/女儿
MsgServer -> client
RSP_ADD_2_TOPIC_CMD
arg0: SUCCESS/ERROR
arg1: TopicName
arg2: ClientID
arg3: ClientType
*/
REQ_KICK_TOPIC_CMD = "REQ_KICK_TOPIC"
RSP_KICK_TOPIC_CMD = "RSP_KICK_TOPIC"
/*
client -> MsgServer
REQ_KICK_TOPIC_CMD
arg0: TopicName //群组名
arg1: NewClientID //待移除的成员用户ID
MsgServer -> client
RSP_KICK_TOPIC_CMD
arg0: SUCCESS/ERROR
*/
REQ_JOIN_TOPIC_CMD = "REQ_JOIN_TOPIC"
RSP_JOIN_TOPIC_CMD = "RSP_JOIN_TOPIC"
/*
client -> MsgServer
REQ_JOIN_TOPIC_CMD
arg0: TopicName //群组名
arg1: ClientName //用户在Topic中的Name, 比如老爸/老妈
MsgServer -> client
RSP_JOIN_TOPIC_CMD
arg0: SUCCESS/ERROR
*/
REQ_QUIT_TOPIC_CMD = "REQ_QUIT_TOPIC"
RSP_QUIT_TOPIC_CMD = "RSP_QUIT_TOPIC"
/*
client -> MsgServer
REQ_QUIT_TOPIC_CMD
arg0: TopicName //群组名
MsgServer -> client
RSP_QUIT_TOPIC_CMD
arg0: SUCCESS/ERROR
*/
REQ_SEND_TOPIC_MSG_CMD = "REQ_SEND_TOPIC_MSG"
RSP_SEND_TOPIC_MSG_CMD = "RSP_SEND_TOPIC_MSG"
ROUTE_SEND_TOPIC_MSG_CMD = "ROUTE_SEND_TOPIC_MSG"
IND_SEND_TOPIC_MSG_CMD = "IND_SEND_TOPIC_MSG"
/*
device/client -> MsgServer -> Router
REQ_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名, device无须提供
返回给消息发送者的消息
MsgServer -> device/client
RSP_SEND_TOPIC_MSG_CMD
arg0: SUCCESS/FAILED
通过Router转发消息(对终端开发者不可见)
Router -> MsgServer
ROUTE_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
发送给消息接受者的消息
MsgServer -> device/client
IND_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
*/
REQ_GET_TOPIC_LIST_CMD = "REQ_GET_TOPIC_LIST"
RSP_GET_TOPIC_LIST_CMD = "RSP_GET_TOPIC_LIST"
/*
device/client -> MsgServer
REQ_GET_TOPIC_LIST_CMD
MsgServer -> device/client
RSP_GET_TOPIC_LIST_CMD
arg0: SUCCESS/ERROR
arg1: TopicNum // topic数目,后面跟随该数目的TopicName
arg2: TopicName1
arg3: TopicName2
arg4: TopicName3
*/
REQ_GET_TOPIC_PROFILE_CMD = "REQ_GET_TOPIC_PROFILE"
RSP_GET_TOPIC_PROFILE_CMD = "RSP_GET_TOPIC_PROFILE"
/*
device/client -> MsgServer
REQ_GET_TOPIC_MEMBER_CMD
arg0: TopicName
如果ClientID不是TopicName的成员,则返回失败
MsgServer -> device/client
RSP_GET_TOPIC_MEMBER_CMD
arg0: SUCCESS/ERROR
// followed by topic profile
// topicName
// creator
// members[]
*/
)
const (
REQ_MSG_SERVER_CMD = "REQ_MSG_SERVER"
//SELECT_MSG_SERVER_FOR_CLIENT msg_server_ip
SELECT_MSG_SERVER_FOR_CLIENT_CMD = "SELECT_MSG_SERVER_FOR_CLIENT"
)
const (
//SEND_CLIENT_ID CLIENT_ID
SEND_CLIENT_ID_CMD = "SEND_CLIENT_ID"
//SEND_CLIENT_ID_FOR_TOPIC ID
SEND_CLIENT_ID_FOR_TOPIC_CMD = "SEND_CLIENT_ID_FOR_TOPIC"
//SUBSCRIBE_CHANNEL channelName
SUBSCRIBE_CHANNEL_CMD = "SUBSCRIBE_CHANNEL"
//SEND_MESSAGE_P2P send2ID send2msg
SEND_MESSAGE_P2P_CMD = "SEND_MESSAGE_P2P"
//RESP_MESSAGE_P2P msg fromID uuid
RESP_MESSAGE_P2P_CMD = "RESP_MESSAGE_P2P"
ROUTE_MESSAGE_P2P_CMD = "ROUTE_MESSAGE_P2P"
CREATE_TOPIC_CMD = "CREATE_TOPIC"
//JOIN_TOPIC TOPIC_NAME CLIENT_ID
JOIN_TOPIC_CMD = "JOIN_TOPIC"
LOCATE_TOPIC_MSG_ADDR_CMD = "LOCATE_TOPIC_MSG_ADDR"
SEND_MESSAGE_TOPIC_CMD = "SEND_MESSAGE_TOPIC"
RESP_MESSAGE_TOPIC_CMD = "RESP_MESSAGE_TOPIC"
)
const (
//P2P_ACK clientID uuid
P2P_ACK_CMD = "P2P_ACK"
)
const (
CACHE_SESSION_CMD = "CACHE_SESSION"
CACHE_TOPIC_CMD = "CACHE_TOPIC"
)
const (
STORE_SESSION_CMD = "STORE_SESSION"
STORE_TOPIC_CMD = "STORE_TOPIC"
)
type Cmd interface {
GetCmdName() string
ChangeCmdName(newName string)
GetArgs() []string
AddArg(arg string)
ParseCmd(msglist []string)
GetAnyData() interface{}
}
type CmdSimple struct {
CmdName string
Args []string
}
func NewCmdSimple(cmdName string) *CmdSimple {
return &CmdSimple{
CmdName: cmdName,
Args: make([]string, 0),
}
}
func (self *CmdSimple) GetCmdName() string {
return self.CmdName
}
func (self *CmdSimple) ChangeCmdName(newName string) {
self.CmdName = newName
}
func (self *CmdSimple) GetArgs() []string {
return self.Args
}
func (self *CmdSimple) AddArg(arg string) {
self.Args = append(self.Args, arg)
}
func (self *CmdSimple) ParseCmd(msglist []string) {
self.CmdName = msglist[1]
self.Args = msglist[2:]
}
func (self *CmdSimple) GetAnyData() interface{} {
return nil
}
type CmdInternal struct {
CmdName string
Args []string
AnyData interface{}
}
func NewCmdInternal(cmdName string, args []string, anyData interface{}) *CmdInternal {
return &CmdInternal{
CmdName: cmdName,
Args: args,
AnyData: anyData,
}
}
func (self *CmdInternal) ParseCmd(msglist []string) {
self.CmdName = msglist[1]
self.Args = msglist[2:]
}
func (self CmdInternal) GetCmdName() string {
return self.CmdName
}
func (self *CmdInternal) ChangeCmdName(newName string) {
self.CmdName = newName
}
func (self CmdInternal) GetArgs() []string {
return self.Args
}
func (self *CmdInternal) AddArg(arg string) {
self.Args = append(self.Args, arg)
}
func (self *CmdInternal) SetAnyData(a interface{}) {
self.AnyData = a
}
func (self CmdInternal) GetAnyData() interface{} {
return self.AnyData
}
type CmdMonitor struct {
SessionNum uint64
}
func NewCmdMonitor() *CmdMonitor {
return &CmdMonitor{}
}
type ClientIDCmd struct {
CmdName string
ClientID string
}
type SendMessageP2PCmd struct {
CmdName string
ID string
Msg string
}
<file_sep>/r.sh
#!/bin/bash
OS=`uname|awk '{$1=tolower($1);print $1}'`
#echo "OS=$OS"
PREFIX=""
if [ $OS != "linux" ]
then
PREFIX=".exe"
fi
#echo "PREFIX=$PREFIX"
function help_clean {
echo -e "$0 clean"
echo -e " clean exe files of gateway/msg_server/manager/router/monitor/client"
}
function help_build {
echo -e "$0 build <nil>|server|gateway|msg_server|manager|router|monitor|client"
echo -e " <nil>|server: means to build all: gateway/msg_erver/manager/router/monitor/client"
}
function help_start {
echo -e "$0 start <nil>|server|redis|mongo"
echo -e " <nil>|server: means to start all: msg_erver/gateway/manager/router/monitor"
}
function help_stop {
echo -e "$0 stop <nil>|server|redis|mongo"
echo -e " <nil>|server: means to stop all: msg_erver/gateway/manager/router/monitor"
}
function Usage {
echo -e "Usage: $0 <cmd> <arg>"
echo -e "<cmd> : clean|build|start|stop"
echo -e "<arg> : <nil>|server|redis|mongo|gateway|msg_server|manager|router|monitor|client"
echo -e "Descriptions:"
help_clean
help_build
help_start
help_stop
}
function proc_help {
case $1 in
clean)
help_clean ;;
build)
help_build ;;
start)
help_start ;;
stop)
help_stop ;;
*)
Usage ;;
esac
}
function clean {
rm -f $1/$1$PREFIX
}
function proc_clean {
clean gateway
clean monitor
clean msg_server
clean router
clean manager
clean client
}
function build {
case "$1" in
gateway|msg_server|router|manager|monitor|client)
echo -e "===>building $1..."
cd $1
go build -v
cd ..
;;
esac
}
function proc_build {
case "a$1" in
agateway|amsg_server|arouter|amanager|amonitor|aclient)
build $1
;;
aserver|a)
build gateway
build msg_server
build router
build manager
build monitor
build client
;;
*)
proc_help ;;
esac
}
function start {
echo "#======================================="
read -p "start $1?[y|n]" ANS
case $ANS in
n|N|no|NO|No) exit 0 ;;
y|Y|yes|Yes) ;;
*) ;;
esac
case "x$1" in
"xmanager"|"xmonitor"|"xrouter"|"xgateway")
./$1/$1$PREFIX -conf_file=./$1/$1.json &
;;
"xmsg_server")
./$1/$1$PREFIX -conf_file=./$1/$1.19001.json &
./$1/$1$PREFIX -conf_file=./$1/$1.19000.json &
;;
"xredis")
if [ $OS == "linux" ]
then
sudo /etc/init.d/redis_6379 start
else
net start redis
fi
;;
"xmongo")
if [ $OS == "linux" ]
then
$DIR=$HOME/RDAWatchServer
if [ ! -d $DIR/db ]; then
mkdir $DIR/db
fi
mongod --dbpath=$DIR/db --storageEngine=mmapv1 --logpath=$DIR/mongod.log --logappend --fork &
else
net start mongodb
fi
;;
esac
}
function proc_start {
case "x$1" in
"x"|"xserver")
start msg_server
start gateway
start router
start manager
start monitor
;;
"xredis"|"xmongo")
start $1
;;
*)
proc_help ;;
esac
}
function stop {
pids=`ps -ef | grep $1 | awk '{print $2}'`
for item in ${pids[*]}; do
echo "kill $1:$item"
kill -9 $item
done
}
function proc_stop {
case "x$1" in
"x"|"xserver")
stop monitor
stop manager
stop router
stop gateway
stop msg_server
;;
"xmanager"|"xmonitor"|"xrouter"|"xgateway"|"xmsg_server")
stop $1
;;
"xredis")
if [ $OS == "linux" ]
then
sudo /etc/init.d/redis_6379 stop
else
net stop redis
fi
;;
"xmongo")
if [ $OS == "linux" ]
then
stop mongod
else
net stop mongodb
fi
;;
*)
proc_help ;;
esac
}
function status {
pids=`ps -ef | grep $1 | awk '{print $2}'`
for item in ${pids[*]}; do
echo "$1:$item"
done
}
function proc_status {
case "x$1" in
"x"|"xserver")
status monitor
status manager
status router
status gateway
status msg_server
;;
"xmanager"|"xmonitor"|"xrouter"|"xgateway"|"xmsg_server")
status $1
;;
"xredis")
#if [ $OS == "linux" ]
#then
#else
#fi
;;
"xmongo")
#if [ $OS == "linux" ]
#then
#else
#fi
;;
esac
}
case "$1" in
clean)
proc_clean $2 ;;
build)
proc_build $2 ;;
start)
proc_start $2 ;;
stop)
proc_stop $2 ;;
status)
proc_status $2 ;;
help)
proc_help $2 ;;
*)
proc_help;;
esac
<file_sep>/libnet/protocol.go
package libnet
import (
"encoding/binary"
"io"
)
var (
BigEndian = ByteOrder(binary.BigEndian)
LittleEndian = ByteOrder(binary.LittleEndian)
packet1BE = newSimpleProtocol(1, BigEndian)
packet1LE = newSimpleProtocol(1, LittleEndian)
packet2BE = newSimpleProtocol(2, BigEndian)
packet2LE = newSimpleProtocol(2, LittleEndian)
packet4BE = newSimpleProtocol(4, BigEndian)
packet4LE = newSimpleProtocol(4, LittleEndian)
packet8BE = newSimpleProtocol(8, BigEndian)
packet8LE = newSimpleProtocol(8, LittleEndian)
)
type ByteOrder binary.ByteOrder
// Packet protocol.
type Protocol interface {
// Create protocol state.
// New(*Session) for session protocol state.
// New(*Server) for server protocol state.
// New(*Channel) for channel protocol state.
New(interface{}) ProtocolState
}
// Protocol state.
type ProtocolState interface {
// Packet a message.
PrepareOutBuffer(buffer *OutBuffer, size int)
// Write a packet.
Write(writer io.Writer, buffer *OutBuffer) error
// Read a packet.
Read(reader io.Reader, buffer *InBuffer) error
}
// Create a {packet, N} protocol.
// The n means how many bytes of the packet header.
// n must is 1、2、4 or 8.
func PacketN(n int, byteOrder ByteOrder) Protocol {
switch n {
case 1:
switch byteOrder {
case BigEndian:
return packet1BE
case LittleEndian:
return packet1LE
}
case 2:
switch byteOrder {
case BigEndian:
return packet2BE
case LittleEndian:
return packet2LE
}
case 4:
switch byteOrder {
case BigEndian:
return packet4BE
case LittleEndian:
return packet4LE
}
case 8:
switch byteOrder {
case BigEndian:
return packet8BE
case LittleEndian:
return packet8LE
}
}
panic("unsupported packet head size")
}
// The packet spliting protocol like Erlang's {packet, N}.
// Each packet has a fix length packet header to present packet length.
type simpleProtocol struct {
n int
bo binary.ByteOrder
encodeHead func([]byte)
decodeHead func([]byte) int
MaxPacketSize int
}
func newSimpleProtocol(n int, byteOrder binary.ByteOrder) *simpleProtocol {
protocol := &simpleProtocol{
n: n,
bo: byteOrder,
}
switch n {
case 1:
protocol.encodeHead = func(buffer []byte) {
buffer[0] = byte(len(buffer) - n)
}
protocol.decodeHead = func(buffer []byte) int {
return int(buffer[0])
}
case 2:
protocol.encodeHead = func(buffer []byte) {
byteOrder.PutUint16(buffer, uint16(len(buffer)-n))
}
protocol.decodeHead = func(buffer []byte) int {
return int(byteOrder.Uint16(buffer))
}
case 4:
protocol.encodeHead = func(buffer []byte) {
byteOrder.PutUint32(buffer, uint32(len(buffer)-n))
}
protocol.decodeHead = func(buffer []byte) int {
return int(byteOrder.Uint32(buffer))
}
case 8:
protocol.encodeHead = func(buffer []byte) {
byteOrder.PutUint64(buffer, uint64(len(buffer)-n))
}
protocol.decodeHead = func(buffer []byte) int {
return int(byteOrder.Uint64(buffer))
}
default:
panic("unsupported packet head size")
}
return protocol
}
func (p *simpleProtocol) New(v interface{}) ProtocolState {
return p
}
func (p *simpleProtocol) PrepareOutBuffer(buffer *OutBuffer, size int) {
buffer.Prepare(size)
buffer.Data = buffer.Data[:p.n]
}
func (p *simpleProtocol) Write(writer io.Writer, packet *OutBuffer) error {
if p.MaxPacketSize > 0 && len(packet.Data) > p.MaxPacketSize {
return PacketTooLargeError
}
p.encodeHead(packet.Data)
if _, err := writer.Write(packet.Data); err != nil {
return err
}
return nil
}
func (p *simpleProtocol) Read(reader io.Reader, buffer *InBuffer) error {
// head
buffer.Prepare(p.n)
if _, err := io.ReadFull(reader, buffer.Data); err != nil {
return err
}
size := p.decodeHead(buffer.Data)
if p.MaxPacketSize > 0 && size > p.MaxPacketSize {
return PacketTooLargeError
}
// body
buffer.Prepare(size)
if size == 0 {
return nil
}
if _, err := io.ReadFull(reader, buffer.Data); err != nil {
return err
}
return nil
}
<file_sep>/msg_server/server.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"flag"
"sync"
"time"
"github.com/oikomi/FishChatServer/base"
"github.com/oikomi/FishChatServer/common"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
"github.com/oikomi/FishChatServer/storage/mongo_store"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
type MsgServer struct {
cfg *MsgServerConfig
sessions base.SessionMap
channels base.ChannelMap
topics protocol.TopicMap
server *libnet.Server
sessionCache *redis_store.SessionCache
topicCache *redis_store.TopicCache
offlineMsgCache *redis_store.OfflineMsgCache
p2pStatusCache *redis_store.P2pStatusCache
mongoStore *mongo_store.MongoStore
scanSessionMutex sync.Mutex
readMutex sync.Mutex // multi client session may ask for REDIS at the same time
}
func NewMsgServer(cfg *MsgServerConfig, rs *redis_store.RedisStore) *MsgServer {
return &MsgServer{
cfg: cfg,
sessions: make(base.SessionMap),
channels: make(base.ChannelMap),
topics: make(protocol.TopicMap),
server: new(libnet.Server),
sessionCache: redis_store.NewSessionCache(rs),
topicCache: redis_store.NewTopicCache(rs),
offlineMsgCache: redis_store.NewOfflineMsgCache(rs),
p2pStatusCache: redis_store.NewP2pStatusCache(rs),
mongoStore: mongo_store.NewMongoStore(cfg.Mongo.Addr, cfg.Mongo.Port, cfg.Mongo.User, cfg.Mongo.Password),
}
}
func (self *MsgServer) createChannels() {
log.Info("createChannels")
for _, c := range base.ChannleList {
channel := libnet.NewChannel(self.server.Protocol())
self.channels[c] = base.NewChannelState(c, channel)
}
}
func (self *MsgServer) sendMonitorData() error {
log.Info("sendMonitorData")
resp := protocol.NewCmdMonitor()
// resp.SessionNum = (uint64)(len(self.sessions))
// log.Info(resp)
mb := NewMonitorBeat("monitor", self.cfg.MonitorBeatTime, 40, 10)
if self.channels[protocol.SYSCTRL_MONITOR] != nil {
for {
resp.SessionNum = (uint64)(len(self.sessions))
//log.Info(resp)
mb.Beat(self.channels[protocol.SYSCTRL_MONITOR].Channel, resp)
}
// _, err := self.channels[protocol.SYSCTRL_MONITOR].Channel.Broadcast(libnet.Json(resp))
// if err != nil {
// glog.Error(err.Error())
// return err
// }
}
return nil
}
func (self *MsgServer) scanDeadSession() {
log.Info("scanDeadSession")
timer := time.NewTicker(self.cfg.ScanDeadSessionTimeout * time.Second)
ttl := time.After(self.cfg.Expire * time.Second)
for {
select {
case <-timer.C:
log.Info("scanDeadSession timeout")
go func() {
for id, s := range self.sessions {
self.scanSessionMutex.Lock()
//defer self.scanSessionMutex.Unlock()
if (s.State).(*base.SessionState).Alive == false {
log.Info("delete" + id)
self.procOffline(id)
} else {
s.State.(*base.SessionState).Alive = false
}
self.scanSessionMutex.Unlock()
}
}()
case <-ttl:
break
}
}
}
func (self *MsgServer) procOnline(ID string) {
// load all the topic list of this user
sessionCacheData, err := self.sessionCache.Get(ID)
if err != nil {
log.Errorf("ID(%s) no session cache", ID)
return
}
sessionCacheData.Alive = true
self.sessionCache.Set(sessionCacheData)
for _, topicName := range sessionCacheData.TopicList {
topicCacheData, err := self.topicCache.Get(topicName)
if err != nil {
log.Error(err.Error())
return
}
if topicCacheData == nil {
topicStoreData, err := self.mongoStore.GetTopicFromCid(topicName)
if err != nil {
log.Error(err.Error())
return
}
topicCacheData = redis_store.NewTopicCacheData(topicStoreData)
}
// update AliveMemberNumMap[server]
if v, ok := topicCacheData.AliveMemberNumMap[self.cfg.LocalIP]; ok {
topicCacheData.AliveMemberNumMap[self.cfg.LocalIP] = v + 1
} else {
topicCacheData.AliveMemberNumMap[self.cfg.LocalIP] = 1
}
self.topicCache.Set(topicCacheData)
}
}
func (self *MsgServer) procOffline(ID string) {
// load all the topic list of this user
if self.sessions[ID] != nil {
self.sessions[ID].Close()
delete(self.sessions, ID)
sessionCacheData, err := self.sessionCache.Get(ID)
if err != nil {
log.Errorf("ID(%s) no session cache", ID)
return
}
sessionCacheData.Alive = false
self.sessionCache.Set(sessionCacheData)
for _, topicName := range sessionCacheData.TopicList {
topicCacheData, _ := self.topicCache.Get(topicName)
if topicCacheData != nil {
// update AliveMemberNumMap[server]
if v, ok := topicCacheData.AliveMemberNumMap[self.cfg.LocalIP]; ok {
if v > 0 {
topicCacheData.AliveMemberNumMap[self.cfg.LocalIP] = v - 1
} else {
topicCacheData.AliveMemberNumMap[self.cfg.LocalIP] = 0
}
self.topicCache.Set(topicCacheData)
}
}
}
}
}
func (self *MsgServer) procJoinTopic(member *mongo_store.Member, topicName string) error {
log.Info("procJoinTopic")
var err error
// check whether the topic exist
topicCacheData, err := self.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist", topicName)
return common.TOPIC_NOT_EXIST
}
if topicCacheData.MemberExist(member.ID) {
log.Warningf("ClientID %s exists in topic %s", member.ID, topicName)
return common.MEMBER_EXIST
}
sessionCacheData, err := self.sessionCache.Get(member.ID)
if sessionCacheData == nil {
log.Warningf("Client %s not online", member.ID)
return common.NOT_ONLINE
}
// Watch can only be added in ONE topic
//fmt.Println("len of topic list of %s: %d", member.ID, len(sessionCacheData.TopicList))
if member.Type == protocol.DEV_TYPE_WATCH && len(sessionCacheData.TopicList) >= 1 {
log.Warningf("Watch %s is in topic %s", member.ID, sessionCacheData.TopicList[0])
return common.DENY_ACCESS
}
// session cache and store
sessionCacheData.AddTopic(topicName)
err = self.sessionCache.Set(sessionCacheData)
if err != nil {
log.Error(err.Error())
return err
}
err = self.mongoStore.Set(sessionCacheData.SessionStoreData)
if err != nil {
log.Error(err.Error())
return err
}
// topic cache and store
topicCacheData.AddMember(member)
// update AliveMemberNumMap[server]
if sessionCacheData.Alive {
if v, ok := topicCacheData.AliveMemberNumMap[sessionCacheData.MsgServerAddr]; ok {
topicCacheData.AliveMemberNumMap[sessionCacheData.MsgServerAddr] = v + 1
} else {
topicCacheData.AliveMemberNumMap[sessionCacheData.MsgServerAddr] = 1
}
}
err = self.topicCache.Set(topicCacheData)
if err != nil {
log.Error(err.Error())
return err
}
err = self.mongoStore.Set(topicCacheData.TopicStoreData)
if err != nil {
log.Error(err.Error())
return err
}
return nil
}
func (self *MsgServer) procQuitTopic(clientID string, topicName string) error {
log.Info("procQuitTopic")
var err error
var topicCacheData *redis_store.TopicCacheData
var sessionCacheData *redis_store.SessionCacheData
var sessionStoreData *mongo_store.SessionStoreData
// check whether the topic exist
topicCacheData, err = self.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist", topicName)
return common.TOPIC_NOT_EXIST
}
if !topicCacheData.MemberExist(clientID) {
log.Warningf("member %s is not in topic %s", clientID, topicName)
return common.NOT_MEMBER
}
// update topic cache and store
topicCacheData.RemoveMember(clientID)
err = self.topicCache.Set(topicCacheData)
if err != nil {
log.Error(err.Error())
return err
}
log.Infof("member %s removed from topic CACHE %s", clientID, topicName)
err = self.mongoStore.Set(topicCacheData.TopicStoreData)
if err != nil {
log.Error(err.Error())
return err
}
log.Infof("member %s removed from topic STORE %s", clientID, topicName)
// update session cache and store
sessionCacheData, err = self.sessionCache.Get(clientID)
if sessionCacheData != nil {
log.Infof("remove topic %s from Client CACHE %s", topicName, clientID)
sessionCacheData.RemoveTopic(topicName)
err = self.sessionCache.Set(sessionCacheData)
if err != nil {
log.Error(err.Error())
return err
}
log.Infof("topic %s removed from Client CACHE %s", topicName, clientID)
err = self.mongoStore.Set(sessionCacheData.SessionStoreData)
if err != nil {
log.Error(err.Error())
return err
}
log.Infof("topic %s removed from Client STORE %s", topicName, clientID)
if sessionCacheData.Alive {
// update AliveMemberNumMap[server]
if v, ok := topicCacheData.AliveMemberNumMap[sessionCacheData.MsgServerAddr]; ok {
if v > 0 {
topicCacheData.AliveMemberNumMap[sessionCacheData.MsgServerAddr] = v - 1
} else {
topicCacheData.AliveMemberNumMap[sessionCacheData.MsgServerAddr] = 0
}
self.topicCache.Set(topicCacheData)
}
}
} else {
sessionStoreData, err = self.mongoStore.GetSessionFromCid(clientID)
if sessionStoreData == nil {
log.Warningf("ID %s not registered in STORE", clientID)
} else {
log.Infof("remove topic %s from Client STORE %s", topicName, clientID)
sessionStoreData.RemoveTopic(topicName)
err = self.mongoStore.Set(sessionStoreData)
if err != nil {
log.Error(err.Error())
return err
}
log.Infof("topic %s removed from Client STORE %s", topicName, clientID)
}
}
return nil
}
func (self *MsgServer) parseProtocol(cmd []byte, session *libnet.Session) error {
var c protocol.CmdSimple
// receive msg, that means client alive
if session.State != nil {
self.scanSessionMutex.Lock()
session.State.(*base.SessionState).Alive = true
self.scanSessionMutex.Unlock()
}
err := json.Unmarshal(cmd, &c)
if err != nil {
log.Error("error:", err)
return err
}
pp := NewProtoProc(self)
self.readMutex.Lock()
defer self.readMutex.Unlock()
log.Infof("[%s]->[%s]", session.Conn().RemoteAddr().String(), self.cfg.LocalIP)
log.Info(c)
switch c.GetCmdName() {
case protocol.SEND_PING_CMD:
err = pp.procPing(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.SUBSCRIBE_CHANNEL_CMD:
pp.procSubscribeChannel(&c, session)
case protocol.REQ_LOGIN_CMD:
err = pp.procLogin(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_LOGOUT_CMD:
err = pp.procLogout(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_SEND_P2P_MSG_CMD:
err = pp.procSendMessageP2P(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.ROUTE_SEND_P2P_MSG_CMD:
err = pp.procRouteMessageP2P(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
// p2p ack
case protocol.IND_ACK_P2P_STATUS_CMD:
err = pp.procP2pAck(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
// p2p ack
case protocol.ROUTE_ACK_P2P_STATUS_CMD:
err = pp.procP2pAck(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_SEND_TOPIC_MSG_CMD:
err = pp.procSendTopicMsg(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.ROUTE_SEND_TOPIC_MSG_CMD:
err = pp.procRouteTopicMsg(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_CREATE_TOPIC_CMD:
err = pp.procCreateTopic(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_ADD_2_TOPIC_CMD:
err = pp.procAdd2Topic(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_KICK_TOPIC_CMD:
err = pp.procKickTopic(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_JOIN_TOPIC_CMD:
err = pp.procJoinTopic(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_QUIT_TOPIC_CMD:
err = pp.procQuitTopic(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_GET_TOPIC_LIST_CMD:
err = pp.procGetTopicList(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.REQ_GET_TOPIC_PROFILE_CMD:
err = pp.procGetTopicProfile(&c, session)
if err != nil {
log.Error("error:", err)
return err
}
}
return err
}
<file_sep>/README.md

FishChatServer
======
FishChat(鱼传 - `鱼传尺素`) - FishChat是一款优秀的即时通讯软件(IM), 它集合了市面上已有产品的优点, 并具备**智能硬件网关管理(代码不是很成熟, 暂不公开)**. 我们的目标是:让智能硬件“聊”起来!
> **NOTE:** 鱼传取自古乐府《饮马长城窟行》`客从远方来, 遗我双鲤鱼. 呼儿烹鲤鱼, 中有尺素书.`
FishChat的访谈 : http://www.oschina.net/question/2306979_236368
FishChat Android客户端见 https://github.com/oikomi/FishChatAndroid (紧张开发中, not usable yet)
**FishChatServer的Java版本同时也在开发中 (https://github.com/oikomi/FishChatServerJava)**
Table of Contents
=================
* [FishChatServer](#fishChatServer)
* [Status](#status)
* [Todo](#todo)
* [FIXME](#FIXME)
* [系统架构](#系统架构)
* [关键服务器详解](#关键服务器详解)
* [gateway](#gateway)
* [msg_server](#msg_server)
* [router](#router)
* [manager](#manager)
* [monitor](#monitor)
* [存储](#存储)
* [依赖](#依赖)
* [部署](#部署)
* [监控](#监控)
* [测试](#测试)
* [技术细节](#技术细节)
* [Copyright & License](#Copyright & License)
Status
======
It is not usable yet and still under early development.
[Back to TOC](#table-of-contents)
Todo
======
- [x] support p2p communication
- [x] support group communication
- [ ] support message encryption
- [ ] support file transfer
- [ ] support audio
- [ ] support video
[Back to TOC](#table-of-contents)
FIXME
======
- [ ] signal process (SIGPIPE ...)
- [ ] timer support
[Back to TOC](#table-of-contents)
系统架构
======

[Back to TOC](#table-of-contents)
关键服务器详解
======
### gateway
gateway server主要是接受client请求,进行通用的功能设置,目前这块只做了分配msg_server的功能,后面可根据需要进行扩展
在gateway server的配置文件中最重要的是配置msg_server列表
<pre><code>"MsgServerList" : [
"192.168.159.169:19000",
"192.168.159.169:19001"
],
</code></pre>
### msg_server
msg_server是消息的主体,维护着客户端连接和keeplive,同时要注意router、manager和monitor都订阅了msg_server的channel
<pre><code>
SYSCTRL_CLIENT_STATUS = "/sysctrl/client-status"
SYSCTRL_TOPIC_STATUS = "/sysctrl/topic-status"
SYSCTRL_TOPIC_SYNC = "/sysctrl/topic-sync"
SYSCTRL_SEND = "/sysctrl/send"
SYSCTRL_MONITOR = "/sysctrl/monitor"
STORE_CLIENT_INFO = "/store/client-info"
STORE_TOPIC_INFO = "/store/topic-info"
</code></pre>
这些channel保证了msg_server在事件触发时会广播通知后面的router、manager和monitor服务
### router
router顾名思义是做了msg_server之间的消息转发
### manager
manager主要是管理client信息存储、topic信息存储、离线消息存储等等,通过它和redis、mongodb联系
### monitor
monitor主要是收集监控各服务器状态信息,目前monitor是可选项,可按需要启动它
[Back to TOC](#table-of-contents)
存储
======
FishChatServer通过Redis做cache以及使用MongoDB做持久化存储
[Back to TOC](#table-of-contents)
依赖
======
FishChatServer采用了redis和MongoDB分别做cache和持久化存储,需要安装两者的驱动:
FishChatServer的web监控用到了beego框架:
<pre><code>
go get gopkg.in/mgo.v2
go get github.com/garyburd/redigo
go get github.com/astaxie/beego
</code></pre>
[Back to TOC](#table-of-contents)
部署
======
FishChatServer采用分布式可伸缩部署方式(各类服务器角色都可以动态增减)。如果没有多机条件,可以单机部署:
单机测试部署(建议配置)
* gateway一台
* msg_server两台
* router一台
* manager一台
* monitor一台
> **NOTE:** FishChatServer依赖**redis**,请先启动redis,让它监听在默认端口
同时需要注意:
> **NOTE:** 注意FishChatServer的持久化存储采用了MongoDB(http://www.mongodb.org/), 需要启动mongod
<pre><code>
./gateway
./msg_server -conf_file=msg_server.19001.json
./msg_server -conf_file=msg_server.19000.json
./router
./manager
./monitor
</code></pre>
> **NOTE:** 可以通过修改各文件夹下面的json配置文件配置服务器参数
按上面的默认启动方式,查看系统的监听端口如下

> **NOTE:** router、manager和monitor一定要在msg_server启动之后启动,因为他们都订阅了msg_server的channel
[Back to TOC](#table-of-contents)
监控
======
当启动了monitor后,monitor将在30000端口开启一个http监听,提供web化的服务器集群监控
技术方案采用angularJS和bootstrap(暂时没有太多精力做)

[Back to TOC](#table-of-contents)
测试
======
<pre><code>
测试点对点聊天
cd client/client_p2p
go build
测试群聊
cd client/client_topic
go build
</code></pre>
> **NOTE:** FishChatAndroid暂时不可用
[Back to TOC](#table-of-contents)
技术细节
======
FishChatServer的消息系统完全采用Golang开发(https://golang.org/)
音视频方案
---------------------
音视频采用nginx-rtmp架构,借助ffmpeg技术,客户端推送rtmp流,服务器输出hls流
关键协议
---------------------
## 1. client to gateway
##### REQ_MSG_SERVER_CMD
**format:** *REQ_MSG_SERVER*
client use this cmd to get a msg_server addr from gateway
##### SELECT_MSG_SERVER_FOR_CLIENT_CMD
**format:** *SELECT_MSG_SERVER_FOR_CLIENT msg_server_ip*
gateway return msg_server addr to client
## 2. client to msg_server
##### SEND_PING_CMD
**format:** *SEND_PING*
client use this cmd to keep alive status in msg_server.
##### SEND_CLIENT_ID_CMD
**format:** *SEND_CLIENT_ID CLIENT_ID*
client use this cmd to send unique ID to msg_server.
##### LOCATE_TOPIC_MSG_ADDR_CMD
**format:** *LOCATE_TOPIC_MSG_ADDR MsgServerAddr topic_name*
send the msg_server addr that having the topic you want.
##### CREATE_TOPIC_CMD
**format:** *CREATE_TOPIC topic_name*
client use this cmd to CREATE TOPIC.
##### JOIN_TOPIC_CMD
**format:** *JOIN_TOPIC topic_name ID*
client use this cmd to JOIN TOPIC.
## 2. router,manager,monitor to msg_server
##### SUBSCRIBE_CHANNEL_CMD
**format:** *SUBSCRIBE_CHANNEL channelName*
router,manager,monitor use this cmd to SUBSCRIBE channel from msg_server.
##### ROUTE_MESSAGE_P2P_CMD
**format:** *ROUTE_MESSAGE_P2P send2ID send2Msg*
router use this cmd to router message between msg_servers.
[Back to TOC](#table-of-contents)
Copyright & License
===================
Copyright 2014-2015 <NAME> (<EMAIL>). All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
<file_sep>/monitor/proto_proc.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
// "github.com/oikomi/FishChatServer/log"
// "github.com/oikomi/FishChatServer/libnet"
// "github.com/oikomi/FishChatServer/protocol"
// "github.com/oikomi/FishChatServer/common"
)
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
type ProtoProc struct {
Monitor *Monitor
}
func NewProtoProc(r *Monitor) *ProtoProc {
return &ProtoProc {
Monitor : r,
}
}
//func (self *ProtoProc)procSendMsgP2P(cmd protocol.Cmd, session *libnet.Session) error {
// log.Info("procSendMsgP2P")
// var err error
// send2ID := cmd.GetArgs()[0]
// send2Msg := cmd.GetArgs()[1]
// log.Info(send2Msg)
// self.Monitor.readMutex.Lock()
// defer self.Monitor.readMutex.Unlock()
// store_session, err := common.GetSessionFromCID(self.Monitor.sessionStore, send2ID)
// if err != nil {
// log.Warningf("no ID : %s", send2ID)
// return err
// }
// log.Info(store_session.MsgServerAddr)
// cmd.ChangeCmdName(protocol.ROUTE_MESSAGE_P2P_CMD)
// err = self.Monitor.msgServerClientMap[store_session.MsgServerAddr].Send(libnet.Json(cmd))
// if err != nil {
// log.Error("error:", err)
// return err
// }
// return nil
//}
<file_sep>/common/err.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"errors"
)
var (
NOTFOUNT = errors.New("NOTFOUNT")
NOTOPIC = errors.New("NO TOPIC")
SYNTAX_ERROR = errors.New("SYNTAX ERROR")
NOT_LOGIN = errors.New("NOT LOGIN")
DENY_ACCESS = errors.New("DENY ACCESS")
TOPIC_EXIST = errors.New("TOPIC EXIST")
TOPIC_NOT_EXIST = errors.New("TOPIC NOT EXIST")
MONGO_ACCESS = errors.New("MONGO NOT ACCESSABLE")
NOT_ONLINE = errors.New("NOT ONLINE")
NOT_MEMBER = errors.New("NOT MEMBER")
MEMBER_EXIST = errors.New("MEMBER EXIST")
)
<file_sep>/libnet/broadcast.go
package libnet
import "github.com/oikomi/FishChatServer/libnet/syncs"
// Broadcaster.
type Broadcaster struct {
protocol ProtocolState
fetcher func(func(*Session))
}
// Broadcast work.
type BroadcastWork struct {
Session *Session
AsyncWork
}
// Create a broadcaster.
func NewBroadcaster(protocol ProtocolState, fetcher func(func(*Session))) *Broadcaster {
return &Broadcaster{
protocol: protocol,
fetcher: fetcher,
}
}
// Broadcast to sessions. The message only encoded once
// so the performance is better than send message one by one.
func (b *Broadcaster) Broadcast(encoder Encoder) ([]BroadcastWork, error) {
buffer := newOutBuffer()
b.protocol.PrepareOutBuffer(buffer, 1024)
if err := encoder(buffer); err != nil {
buffer.free()
return nil, err
}
buffer.isBroadcast = true
works := make([]BroadcastWork, 0, 10)
b.fetcher(func(session *Session) {
buffer.broadcastUse()
works = append(works, BroadcastWork{
session,
session.asyncSendBuffer(buffer),
})
})
return works, nil
}
// The channel type. Used to maintain a group of session.
// Normally used for broadcast classify purpose.
type Channel struct {
mutex syncs.RWMutex
sessions map[uint64]channelSession
broadcaster *Broadcaster
// channel state
State interface{}
}
type channelSession struct {
*Session
KickCallback func()
}
// Create a channel instance.
func NewChannel(protocol Protocol) *Channel {
channel := &Channel{
sessions: make(map[uint64]channelSession),
}
channel.broadcaster = NewBroadcaster(protocol.New(channel), channel.Fetch)
return channel
}
// Broadcast to channel. The message only encoded once
// so the performance is better than send message one by one.
func (channel *Channel) Broadcast(encoder Encoder) ([]BroadcastWork, error) {
return channel.broadcaster.Broadcast(encoder)
}
// How mush sessions in this channel.
func (channel *Channel) Len() int {
channel.mutex.RLock()
defer channel.mutex.RUnlock()
return len(channel.sessions)
}
// Join the channel. The kickCallback will called when the session kick out from the channel.
func (channel *Channel) Join(session *Session, kickCallback func()) {
channel.mutex.Lock()
defer channel.mutex.Unlock()
session.AddCloseCallback(channel, func() {
channel.Exit(session)
})
channel.sessions[session.Id()] = channelSession{session, kickCallback}
}
// Exit the channel.
func (channel *Channel) Exit(session *Session) {
channel.mutex.Lock()
defer channel.mutex.Unlock()
session.RemoveCloseCallback(channel)
delete(channel.sessions, session.Id())
}
// Kick out a session from the channel.
func (channel *Channel) Kick(sessionId uint64) {
channel.mutex.Lock()
defer channel.mutex.Unlock()
if session, exists := channel.sessions[sessionId]; exists {
delete(channel.sessions, sessionId)
if session.KickCallback != nil {
session.KickCallback()
}
}
}
// Fetch the sessions. NOTE: Invoke Kick() or Exit() in fetch callback will dead lock.
func (channel *Channel) Fetch(callback func(*Session)) {
channel.mutex.RLock()
defer channel.mutex.RUnlock()
for _, sesssion := range channel.sessions {
callback(sesssion.Session)
}
}
<file_sep>/儿童手表协议V2.md
# 儿童手表协议文档 #
## 1. 几个概念 ##
- 1.1. 终端类型
. Device: 儿童手表客户端, IMEI为其唯一标识, 用户名和密码均为IMEI, 另外, IMEI也会在Device上以二维码显示.
. Client: 家长应用客户端, 运行在android或iOS手机上, 可通过扫码添加关注Watch
- 1.2. Topic, 也就是聊天群组
Topic只能由Client创建,创建者Client则为此Topic管理者,只有管理者才有权限增删此topic成员. 考虑到儿童手表客户端无法选择聊天群组,限定: 一个Device最多只能存在一个topic中,一个topic可以有多个device. 这样的话, 手表被家长添加到自己创建的群组中后, 其他任何人想要再关注该手表都会被服务器拒绝, 可有效保护儿童隐私和安全.
为简化客户端和服务器, 限制用户只能登录到一个MsgServer, 哪怕是Topic消息也可router转发, 实际上, 手表项目也没有P2P消息需求, 但是作为一个通用IM, 可暂时保留此功能.
- 1.3. Gateway
Device和Client周知的服务器, 完成注册/登陆和分配MsgServer的功能
Gateway获取MsgServer的负载平衡算法, 可以是:
a) 随机: 用于DEMO版本
b) 最少连接算法: 选择当前连接数最少的服务器
c) 或者更复杂点, 设置一个连接数阈值, 低于该阈值表示低负荷, 高于该阈值为高负荷, 所有服务器均处于高负荷时, 按最少连接算法, 否则在所有低负荷服务器中最高负荷者, 这样可减少消息经router转发概率
- 1.4. MsgServer
MsgServer是消息的主体,维护着客户端连接和keeplive, 可启动多个MsgServer, 由Gateway根据当前负载选择合适的服务器,
- 1.5. Router
当消息接受者跟发送者不在同一个MsgServer上 , 由Router来完成MsgServer之间的消息转发 ,
- 1.6. Manager
Manager主要是管理client信息存储、topic信息存储、离线消息存储等等,通过它和redis、mongodb联系
## 2. 存储 ##
因为限制了一个Client/Device只能登陆一个MsgServer, 考虑到手表项目的特殊性, 与FishChatServer对比, 此处去掉了Topic Storage和Cache, 更加精简
- 2.1. Client Storage on MongoDB
mongodb存储client信息如下:
ClientID, // device是IMEI,client是电话号码
ClientPwd, // device为IMEI或nil, client是密码, 正式版本密码不能是明文, 要使用单向hash算法加密保存
ClientName, // 名字
ClientType, // device是"D", client是"C"
TopicList[], // device或client所在的topic列表, 登录时所在topic也要加载到cache中, 对device来说,topic只有一个.
- 2.2. Client Cache on Redis
redis除上述storage信息外,还cache如下信息
SessionID, // 分配给客户端的session id, 凭此id客户端无须再gateway校验密码登陆直至id失效
ClientAddr, // 客户端地址
MsgServerAddr, // 登录到的MsgServer地址
Alive, // 客户端状态
MaxAge // 信息最长有效时间
- 2.3. Topic Storage on MongoDB
注: Topic只能由client来创建
mongodb存储Topic信息如下:
TopicName, // Topic名称
CreaterID, // 创建者ClientID, 也就是管理员, 同时也是第一个member
MemberList[], // 成员列表, 可以是device或client
- 2.4. Topic Cache on Redis
. redis除上述storage信息外,还cache如下信息
MaxAge // 信息最长有效时间
## 3. 场景描述 ##
- 3.1. Device注册&登陆(Gateway)
Device注册和登录都是提供本机IMEI号,简单化处理,将注册和登录合二为一,统一称之为登录, Gateway收到Device登录请求,首先(TODO: 正式版本需要考虑增加Device合法性检测) 查询mongodb是否有其注册信息,没有则注册[参考注册]。然后分配SessionID, 并随之返回真正的服务器地址.
- 3.2. Device登陆(MsgServer)
成功登陆Gateway后, Device需要登陆到MsgServer, 此时无须再做身份验证, 仅需提供ClientID和SessionID即可, MsgServer实际上是做Online处理, 如加载mongodb Client Storage到redis cache中, 处理OfflineMsg发送
- 3.3. Client注册&登陆(Gateway)
Client必须手机注册, 正常流程该是先验证手机, 获取验证码, 然后随同设置的密码提供给Gateway进行注册, 初始DEMO版本采取"登陆即注册"(同2.1), Client提供(手机号, 密码, 名称)发送登陆请求给Gateway, Gateway查询mongodb该手机号是否有注册, 未注册则直接注册, 否则须验证密码, 成功后分配SessionID并随之返回服务器地址.
- 3.4. Client登陆(MsgServer)
成功登陆Gateway后, Client需要登陆到MsgServer, 此时无须再做密码验证, 仅需提供SessionID和ClientID即可, MsgServer实际上是做Online处理, 如加载mongodb Client Storage到redis cache中, 处理OfflineMsg发送
- 3.5. Client创建Topic(MsgServer)
Client发送请求CreateTopic(ClientID, TopicName), 收到请求后先查询同名topic是否存在(先redis cache, 然后mongodb), 存在则返回错误, 否则创建该topic, 同步更新client和topic的redis cache和mongodb.
- 3.6. Client添加成员到Topic中(MsgServer)
Client发送请求Add2Topic(ClientID, TopicName, member), 收到请求后先查询同名topic是否存在(先redis cache, 然后mongodb), 不存在则返回错误, 否则更新member与topic映射关系,同步更新member和topic的redis cache和mongodb.
正式版本可通过扫二维码方式(实际上获取Device的IMEI号)可将device添加到现有群组中, 或新建群组.
- 3.7. Client申请加入到Topic中(MsgServer)
Client发送请求JoinTopic(ClientID, TopicName), 收到请求后先查询同名topic是否存在(先redis cache, 然后mongodb), 不存在则返回错误, 否则更新client和topic映射关系,同步更新client和topic的redis cache和mongodb.
注: 正式版本必须要加上限制,必须该topic创建者同意才能加入。这要求server须cache比请求,然后发送消息给创建者,等待其返回同意消息,如果创建者不在线,还要cache该消息,等待其上线才发送,注意,cache的消息都有生存期。
- 3.8. Device/Client发送topic消息(MsgServer)
因为device只存在于唯一一个topic中,所以device发送时无须提供topicname, server识别到发送者为device, 则从redis cache中获取其所属topic.
Server从redis cache中获取该Topic的MemberList[], 对每一个member(跳过Sender), 通过其ClientID从redis cache中查询是否在线, 在线则发送消息(如果不在同一个MsgServer还须Router转发), 不在线则缓存到OfflineMsgCache中, 等待其上线重发.
## 4. 协议规范 ##
- 4.1. 协议格式
type CmdSimple struct {
CmdName string
Args []string
}
对于非golang客户端,报文格式为json格式字符串,因此客户端要实现json格式与上述数据结构的转换, 如下所示:
{
"CmdName" :"<CmdName>",
"Args" : [
"<arg0>",
"<arg1>",
"<arg2>",
......
"<argN>",
]
}
- 4.2. LOGIN
device/client -> gateway
REQ_LOGIN_CMD
arg0: ClientID //用户ID
arg1: ClientType //终端类型"C" or "D",是client还是device
arg2: ClientPwd //nil for Device/password for Client
参见[3.1~3.4], gateway验证通过后,会创建对应的session cache, 并为之分配唯一的session ID, 有此ID,server无须再次检验密码,直至其失效。每次收到客户端心跳时,server要更新其在线时间。
gateway -> device/client
RSP_LOGIN_CMD
arg0: LOGIN_SUCCESS/LOGIN_FAILED
arg1: uuid
arg2: MsgServerAddr
登录到从gateway获取到的MsgServerAddr
device/client -> MsgServer
REQ_LOGIN_CMD
arg0: ClientID //用户ID
arg1: uuid
MsgServer -> device/client
RSP_LOGIN_CMD
arg0: LOGIN_SUCCESS/LOGIN_FAILED
更新client session,保存client cache信息到redis(此处client和device通用)
- 4.3. LOGOUT
device/client -> MsgServer
REQ_LOGOUT_CMD
arg0: ClientID //用户ID
MsgServer -> device/client
RSP_LOGOUT_CMD
arg0: SUCCESS/FAILED
释放session cache和相应的session id.
- 4.4. CreateTopic [参见3.5]
client -> MsgServer
REQ_CREATE_TOPIC_CMD
arg0: TopicName //群组名
arg1: ClientName //用户在Topic中的Name, 比如老爸/老妈
MsgServer -> client
RSP_CREATE_TOPIC_CMD
arg0: SUCCESS/FAILED
- 4.5. Add2Topic 添加新成员 [参见3.6]
client -> MsgServer
REQ_ADD_2_TOPIC_CMD
arg0: TopicName //群组名
arg1: NewClientID //用户ID
arg2: NewClientName //用户在Topic中的Name, 对于device, 可以是儿子/女儿
server要依次做如下检查:
. TopicName是否存在
. ClientID是否为该topic管理员
. NewClientID是否存在
. NewClientID如果是device, 要检查是否未被监控中(没加入到任何topic)
通过所有检查, 才能添加新成员
MsgServer -> client
RSP_ADD_2_TOPIC_CMD
arg0: SUCCESS/FAILED
- 4.6. KickTopic 移除topic成员
client -> MsgServer
REQ_KICK_TOPIC_CMD
arg0: TopicName //群组名
arg1: NewClientID //待移除的成员用户ID
MsgServer -> client
RSP_KICK_TOPIC_CMD
arg0: SUCCESS/FAILED
- 4.7. JoinTopic 申请加入topic [参见3.7.]
client -> MsgServer
REQ_JOIN_TOPIC_CMD
arg0: TopicName //群组名
arg1: ClientName //用户在Topic中的Name, 比如老爸/老妈
MsgServer -> client
RSP_JOIN_TOPIC_CMD
arg0: SUCCESS/FAILED
- 4.8. QuitTopic 退出topic
client -> MsgServer
REQ_QUIT_TOPIC_CMD
arg0: TopicName //群组名
MsgServer -> client
RSP_QUIT_TOPIC_CMD
arg0: SUCCESS/FAILED
- 4.9. 发送Topic消息 [参见3.8]
device/client -> MsgServer -> Router
REQ_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名, device无须提供
返回给消息发送者的消息
MsgServer -> device/client
RSP_SEND_TOPIC_MSG_CMD
arg0: SUCCESS/FAILED
通过Router转发消息(对终端开发者不可见)
Router -> MsgServer
ROUTE_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
发送给消息接受者的消息
MsgServer -> device/client
REQ_SEND_TOPIC_MSG_CMD
arg0: Msg //消息内容
arg1: TopicName //群组名
arg2: ClientID //发送方用户ID
arg3: ClientType //发送方终端类型,是client还是device
- 4.10. Client获取Topic列表
device/client -> MsgServer
REQ_GET_TOPIC_LIST_CMD
返回给消息发送者的消息
MsgServer -> device/client
RSP_GET_TOPIC_LIST_CMD
arg0: SUCCESS/FAILED
arg1: TopicNum // topic数目,后面跟随该数目的TopicName
arg2: TopicName1
arg3: TopicName2
arg4: TopicName3
- 4.11. Client获取Topic成员列表
device/client -> MsgServer
REQ_GET_TOPIC_MEMBER_CMD
arg0: TopicName
如果ClientID不是TopicName的成员,则返回失败
返回给消息发送者的消息
MsgServer -> device/client
RSP_GET_TOPIC_MEMBER_CMD
arg0: SUCCESS/FAILED
arg1: MemberNum // topic member数目,后面跟随该数目的member
arg2: Member1ID
arg3: Member1Name
arg4: Member2ID
arg5: Member2Name
- 4.12. 发送P2P消息
device/client -> MsgServer -> Router
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
返回给消息发送者的消息
MsgServer -> device/client
RSP_SEND_P2P_MSG_CMD
arg0: SUCCESS/FAILED
arg1: uuid // MsgServer分配的消息uuid,发送方根据此uuid确定该消息状态
IND_ACK_P2P_MSG_CMD
arg0: uuid // 发送方知道uuid对应的已发送的消息已送达
arg1: SENT/READ // 发送方知道uuid对应的消息状态:已送达/已读
通过Router转发消息(对终端开发者不可见)
MsgServer -> Router
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
Router -> MsgServer
ROUTE_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
发送给消息接受者的消息
MsgServer -> device/client
REQ_SEND_P2P_MSG_CMD
arg0: Msg //消息内容
arg1: FromID //发送方用户ID
arg2: uuid //MsgServer分配的消息uuid,
//可选,如果提供了则须IND_ACK_P2P_MSG_CMD(ClientID, uuid)
- 4.12. 心跳
device/client -> MsgServer
REQ_SEND_PING_CMD
<file_sep>/RDAWatchServer环境搭建.md
# RDAWatchServer环境搭建 #
## git环境搭建 ##
参见文档《HOWTO--git&github.md》
## Golang环境搭建 ##
参见文档《golang环境搭建》
将GOPATH设置为$HOME/RDAWatchServer
## redis安装 ##
FishChatServer通过Redis(http://redis.io)做cache,最新稳定版本为3.2.0
参见文档《redis》
## mongoDB安装 ##
FishChatServer使用MongoDB(http://www.mongodb.org/)做持久化存储,最新稳定版本为3.2.6
参见文档《mongodb》
## RDAWatchServer代码获取 ##
<pre>
mkdir -p RDAWatchServer/src
cd RDAWatchServer/src
mkdir -p gopkg.in/mgo.v2
mkdir -p github.com/astaxie/beego
mkdir -p github.com/garyburd/redigo
mkdir -p github.com/oikomi/FishChatServer
//然后通过go get获取代码
go get gopkg.in/mgo.v2 # MongoDB驱动
go get github.com/astaxie/beego #web监控使用的beego框架
go get github.com/garyburd/redigo #redis驱动
go get github.com/alvin921/FishChatServer #server代码
//但是失败,原因未知,只好使用git clone获取代码
git clone https://gopkg.in/mgo.v2 gopkg.in/mgo.v2
git clone https://github.com/astaxie/beego github.com/astaxie/beego
git clone https://github.com//garyburd/redigo github.com//garyburd/redigo
git clone https://github.com/alvin921/FishChatServer github.com/oikomi/FishChatServer
</pre>
## 编译及安装 ##
写了如下三个脚本(windows7下,请将cmd.exe和git-bash.exe设置为管理员身份运行):
wincmd.bat: 切换到windows cmd命令行窗口
<pre>
@echo off
@title RDAWatchServer
c:\Windows\system32\cmd.exe
</pre>
gitcmd.bat: 切换到git命令行窗口
<pre>
@echo off
@title RDAWatchServer
@for /f %%i in ('cd') do set PWD=%%i
c:\Windows\system32\cmd.exe /c ""C:\Program Files\Git\git-bash.exe" --cd=%PWD%"
</pre>
r.sh:在gitcmd命令行运行,主要用于编译和启动/停止服务
<pre>
Usage:
./r.sh clean|build|start|stop ...
./r.sh clean
clean exe files of gateway/msg_server/manager/router/monitor/client
./r.sh build nil|server|gateway|msg_server|manager|router|monitor|client
nil|server: means to build all: gateway/msg_erver/manager/router/monitor/client
./r.sh start nil|server|redis|mongo
nil|server: means to start all: msg_erver/gateway/manager/router/monitor
./r.sh stop nil|server|redis|mongo
nil|server: means to stop all: msg_erver/gateway/manager/router/monitor
</pre>
## 服务器部署 ##
FishChatServer采用分布式可伸缩部署方式(各类服务器角色都可以动态增减):
* gateway一台
* msg_server两台
* router一台
* manager一台
* monitor一台
如果没有多机条件,可以单机部署。
**NOTE:** 必须先修改各文件夹下面的json配置文件配置服务器参数
**NOTE:** gateway、router、manager和monitor一定要在msg_server之后启动,因为他们都订阅了msg_server的channel
**NOTE:**
## 测试 ##
<pre>
./r.sh build client
client/client
</pre>
<file_sep>/client/client.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"encoding/json"
"errors"
"flag"
"fmt"
"os"
"strconv"
"strings"
"github.com/oikomi/FishChatServer/common"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
)
var InputConfFile = flag.String("conf_file", "client.json", "input conf file name")
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
func heartBeat(cfg *ClientConfig, msgServerClient *libnet.Session) {
hb := common.NewHeartBeat("client", msgServerClient, cfg.HeartBeatTime, cfg.Expire, 10)
hb.Beat()
}
var bExit bool = false
type Client struct {
cfg *ClientConfig
bLogin bool
clientID string
clientType string
clientPwd string
uuid string
msAddr string
session *libnet.Session
}
func NewClient() *Client {
cfg, err := LoadConfig(*InputConfFile)
if err != nil {
log.Error(err.Error())
return nil
}
return &Client{
cfg: cfg,
bLogin: false,
}
}
type HelpInfo struct {
desc string
detail string
f func(client *Client, args []string) error
}
var help_string map[string]HelpInfo
// return (ClientID, MsgServerAddr)
func login_gateway(self *Client) error {
fmt.Println("req GatewayServer...")
gatewayClient, err := libnet.Dial("tcp", self.cfg.GatewayServer)
if err != nil {
panic(err)
}
cmd := protocol.NewCmdSimple(protocol.REQ_LOGIN_CMD)
cmd.AddArg(self.clientID)
cmd.AddArg(self.clientType)
cmd.AddArg(self.clientPwd)
err = gatewayClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
}
var c protocol.CmdSimple
err = gatewayClient.ProcessOnce(func(msg *libnet.InBuffer) error {
log.Info(string(msg.Data))
err = json.Unmarshal(msg.Data, &c)
if err != nil {
log.Error("error:", err)
}
return nil
})
if err != nil {
log.Error(err.Error())
}
gatewayClient.Close()
fmt.Println("req GatewayServer end...")
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
log.Errorf("login gateway error: %s", c.GetArgs()[0])
return errors.New(c.GetArgs()[0])
}
self.uuid = c.GetArgs()[1]
self.msAddr = c.GetArgs()[2]
return nil
}
func login_server(self *Client) error {
var err error
self.session, err = libnet.Dial("tcp", self.msAddr)
if err != nil {
panic(err)
}
fmt.Println("req to login msg server...")
cmd := protocol.NewCmdSimple(protocol.REQ_LOGIN_CMD)
cmd.AddArg(self.clientID)
cmd.AddArg(self.uuid)
err = self.session.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
var c protocol.CmdSimple
err = self.session.ProcessOnce(func(msg *libnet.InBuffer) error {
log.Info(string(msg.Data))
err = json.Unmarshal(msg.Data, &c)
if err != nil {
log.Error("error:", err)
}
return nil
})
if err != nil {
log.Error(err.Error())
}
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
log.Errorf("login msgserver error: %s", c.GetArgs()[0])
return errors.New(c.GetArgs()[0])
}
fmt.Println("login msg server SUCCESS")
return nil
}
func cmd_logout(self *Client, args []string) error {
if self.bLogin == true {
cmd := protocol.NewCmdSimple(protocol.REQ_LOGOUT_CMD)
err := self.session.Send(libnet.Json(cmd))
self.bLogin = false
return err
}
return nil
}
func cmd_exit(self *Client, args []string) error {
if self != nil && self.session != nil {
self.session.Close()
}
bExit = true
return nil
}
func cmd_delete(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
fmt.Println("Not implemented yet.")
return nil
}
func cmd_topic(self *Client, args []string) error {
// get topic list
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
cmd := protocol.NewCmdSimple(protocol.REQ_GET_TOPIC_LIST_CMD)
err := self.session.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
}
return err
}
func cmd_topic_rsp(self *Client, c *protocol.CmdSimple) error {
var num int
var err error
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
if num, err = strconv.Atoi(c.GetArgs()[1]); err != nil {
fmt.Println(err.Error())
log.Error(err.Error())
return err
}
fmt.Println("GET_TOPIC_LIST returns (" + c.GetArgs()[1] + "): ")
index := 0
for {
if index == num {
break
} else {
fmt.Println(c.GetArgs()[2+index])
index++
}
}
return nil
}
func cmd_topic_profile(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 2 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_GET_TOPIC_PROFILE_CMD)
cmd.AddArg(args[1])
err := self.session.Send(libnet.Json(cmd))
return err
}
func cmd_topic_profile_rsp(self *Client, c *protocol.CmdSimple) error {
var num int
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
fmt.Println("GET_TOPIC_PROFILE returns : ")
fmt.Println(" topicName: " + c.GetArgs()[1])
fmt.Println(" creator: " + c.GetArgs()[2])
fmt.Println(" members[]: ")
num = (len(c.GetArgs()) - 3) / 3
index := 0
for {
if index == num {
break
} else {
fmt.Println(" " + c.GetArgs()[3+3*index] + "\t, " + c.GetArgs()[3+3*index+1] + "\t, " + c.GetArgs()[3+3*index+2])
index++
}
}
return nil
}
func cmd_new(self *Client, args []string) error {
// CREATE TOPIC
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 3 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_CREATE_TOPIC_CMD)
cmd.AddArg(args[1])
cmd.AddArg(args[2])
err := self.session.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
}
return err
}
func cmd_new_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
return nil
}
func cmd_join(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 3 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_JOIN_TOPIC_CMD)
cmd.AddArg(args[1])
cmd.AddArg(args[2])
err := self.session.Send(libnet.Json(cmd))
if err != nil {
fmt.Errorf(err.Error())
}
return err
}
func cmd_join_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
return nil
}
func cmd_quit(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 2 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_QUIT_TOPIC_CMD)
cmd.AddArg(args[1])
err := self.session.Send(libnet.Json(cmd))
return err
}
func cmd_quit_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
return nil
}
// add <topic> <id> <alias>
func cmd_add(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 4 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_ADD_2_TOPIC_CMD)
cmd.AddArg(args[1])
cmd.AddArg(args[2])
cmd.AddArg(args[3])
err := self.session.Send(libnet.Json(cmd))
return err
}
func cmd_add_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
return nil
}
// kick <topic> <id>
func cmd_kick(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 3 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_KICK_TOPIC_CMD)
cmd.AddArg(args[1])
cmd.AddArg(args[2])
err := self.session.Send(libnet.Json(cmd))
return err
}
func cmd_kick_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] != protocol.RSP_SUCCESS {
return errors.New(c.GetArgs()[0])
}
return nil
}
// sendto <id> <msg>
func cmd_sendto(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if len(args) != 3 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_SEND_P2P_MSG_CMD)
cmd.AddArg(args[1])
cmd.AddArg(args[2])
err := self.session.Send(libnet.Json(cmd))
return err
}
func cmd_sendto_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
if c.GetArgs()[0] == protocol.RSP_SUCCESS {
fmt.Println("uuid: " + c.GetArgs()[1])
}
return nil
}
// [msg, fromID, uuid]
func incoming_p2p_msg(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetArgs()[1] + " says : " + c.GetArgs()[0])
if len(c.GetArgs()) >= 3 {
cmd := protocol.NewCmdSimple(protocol.IND_ACK_P2P_STATUS_CMD)
cmd.AddArg(c.GetArgs()[2])
cmd.AddArg(protocol.P2P_ACK_READ)
cmd.AddArg(c.GetArgs()[1])
err := self.session.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
}
}
return nil
}
// send <msg>[ <topic>]
func cmd_send(self *Client, args []string) error {
if self.bLogin == false {
fmt.Println("NOT login yet. Please login first.")
return nil
}
if self.clientType == protocol.DEV_TYPE_CLIENT {
if len(args) != 3 {
return common.SYNTAX_ERROR
}
} else if len(args) != 2 {
return common.SYNTAX_ERROR
}
cmd := protocol.NewCmdSimple(protocol.REQ_SEND_TOPIC_MSG_CMD)
cmd.AddArg(args[1])
if self.clientType == protocol.DEV_TYPE_CLIENT {
cmd.AddArg(args[2])
}
err := self.session.Send(libnet.Json(cmd))
return err
}
func cmd_send_rsp(self *Client, c *protocol.CmdSimple) error {
fmt.Println(c.GetCmdName() + " returns: " + c.GetArgs()[0])
return nil
}
func incoming_topic_msg(self *Client, c *protocol.CmdSimple) error {
msg := c.GetArgs()[0]
topicName := c.GetArgs()[1]
fromID := c.GetArgs()[2]
fromType := c.GetArgs()[3]
fmt.Println("Topic message received :")
fmt.Println(" TopicName :" + topicName)
fmt.Println(" FromID :" + fromID)
fmt.Println(" FromType :" + fromType)
fmt.Println(" Message :" + msg)
return nil
}
func cmd_login(self *Client, args []string) error {
if self.bLogin {
fmt.Println("You are already login, pls logout first")
return nil
}
if len(args) != 3 && len(args) != 4 {
return common.SYNTAX_ERROR
}
if args[2] != "D" && args[2] != "d" && args[2] != "C" && args[2] != "c" {
return common.SYNTAX_ERROR
}
self.clientID = args[1]
if args[2] == "D" || args[2] == "d" {
self.clientType = protocol.DEV_TYPE_WATCH
} else if args[2] == "C" || args[2] == "c" {
self.clientType = protocol.DEV_TYPE_CLIENT
} else {
}
self.clientPwd = ""
if self.clientType == protocol.DEV_TYPE_CLIENT {
if len(args) != 4 {
return common.SYNTAX_ERROR
}
self.clientPwd = args[3]
}
// Load config
fmt.Println("config file:" + *InputConfFile)
fmt.Println("config file loaded")
err := login_gateway(self)
if err != nil {
panic(err)
}
err = login_server(self)
if err != nil {
panic(err)
}
self.bLogin = true
go heartBeat(self.cfg, self.session)
var c protocol.CmdSimple
go self.session.Process(func(msg *libnet.InBuffer) error {
fmt.Println(string(msg.Data))
err = json.Unmarshal(msg.Data, &c)
if err != nil {
log.Error("error:", err)
}
fmt.Println("msg received is : " + c.GetCmdName())
switch c.GetCmdName() {
case protocol.RSP_GET_TOPIC_LIST_CMD:
cmd_topic_rsp(self, &c)
case protocol.RSP_GET_TOPIC_PROFILE_CMD:
cmd_topic_profile_rsp(self, &c)
case protocol.RSP_CREATE_TOPIC_CMD:
cmd_new_rsp(self, &c)
case protocol.RSP_JOIN_TOPIC_CMD:
cmd_join_rsp(self, &c)
case protocol.RSP_QUIT_TOPIC_CMD:
cmd_quit_rsp(self, &c)
case protocol.RSP_ADD_2_TOPIC_CMD:
cmd_add_rsp(self, &c)
case protocol.RSP_KICK_TOPIC_CMD:
cmd_kick_rsp(self, &c)
case protocol.RSP_SEND_P2P_MSG_CMD:
cmd_sendto_rsp(self, &c)
case protocol.IND_ACK_P2P_STATUS_CMD:
fmt.Println("msg sent [uuid=" + c.GetArgs()[0] + "] status: " + c.GetArgs()[1])
case protocol.IND_SEND_P2P_MSG_CMD:
incoming_p2p_msg(self, &c)
case protocol.IND_SEND_TOPIC_MSG_CMD:
incoming_topic_msg(self, &c)
}
return nil
})
return nil
}
func Help(self *Client, args []string) error {
if len(args) <= 1 {
help := "RDA Watch Client.\n" +
"Usage:<cmd> [<arg0> <arg1> ...]\n" +
"<cmd> can be:\n"
for k, v := range help_string {
help += k + "\t----\t" + v.desc
}
help += `please type "help <cmd>" to get help for specific command.` + "\n"
fmt.Print(help)
} else {
if v, ok := help_string[args[1]]; ok {
fmt.Print(v.desc)
fmt.Print(v.detail)
}
}
return nil
}
func main() {
flag.Parse()
client := NewClient()
help_string = map[string]HelpInfo{
"help": {
"RDA Watch Client.\n",
"",
Help,
},
"login": HelpInfo{
"Login RDA Watch Server\n",
"login <id> <type> <pwd>\n" +
"<type>: \"D\" for watch, \"C\" for app client\n" +
"NOTE: watch doesn't need to provide <pwd>\n",
cmd_login,
},
"logout": HelpInfo{
"Logout from RDA Watch Server\n",
"logout\n",
cmd_logout,
},
"exit": {
"Close this program and exit\n",
"exit\n",
cmd_exit,
},
"topic": {
"Get your own topic list\n",
"topic\n",
cmd_topic,
},
"new": {
"create a new topic\n",
"new <topic_name> <alias>\n",
cmd_new,
},
"delete": {
"delete a topic, only for topic creator\n",
"delete <topic_name>\n",
cmd_delete,
},
"profile": {
"Get specific topic profile. You MUST be a member of the topic.\n",
"profile <topic_name>\n",
cmd_topic_profile,
},
"add": {
"Add someone into the topic, only for topic creator\n",
"add <topic> <id> <alias>\n",
cmd_add,
},
"kick": {
"Kick someone out of the topic, only for topic creator\n",
"kick <topic> <id>\n",
cmd_kick,
},
"join": {
"join a topic\n",
"join <topic> <alias>\n",
cmd_join,
},
"quit": {
"Quit from a topic. Usage:\n",
"quit <topic_name>\n",
cmd_quit,
},
"send": {
"send topic message\n",
"send <msg>[ <topic>]\n",
cmd_send,
},
"sendto": {
"send p2p message\n",
"sendto <id> <msg>\n",
cmd_sendto,
},
}
Help(client, nil)
r := bufio.NewReader(os.Stdin)
for {
fmt.Print("Command> ")
b, _, _ := r.ReadLine()
line := string(b)
tokens := strings.Split(line, " ")
if v, ok := help_string[tokens[0]]; ok {
ret := v.f(client, tokens)
if ret == common.SYNTAX_ERROR {
fmt.Printf("Syntax error, pls type \"help %s\" to get more information\n", tokens[0])
} else if ret != nil {
fmt.Println(ret.Error())
}
if bExit {
break
}
} else {
fmt.Println("Unknown command:", tokens[0])
}
}
}
<file_sep>/common/heartbeat.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"sync"
"time"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
)
type HeartBeat struct {
name string
session *libnet.Session
mu sync.Mutex
timeout time.Duration
expire time.Duration
fails uint64
threshold uint64
}
func NewHeartBeat(name string, session *libnet.Session, timeout time.Duration, expire time.Duration, limit uint64) *HeartBeat {
return &HeartBeat{
name: name,
session: session,
timeout: timeout,
expire: expire,
threshold: limit,
}
}
func (self *HeartBeat) ResetFailures() {
self.mu.Lock()
defer self.mu.Unlock()
self.fails = 0
}
func (self *HeartBeat) ChangeThreshold(thres uint64) {
self.mu.Lock()
defer self.mu.Unlock()
self.threshold = thres
}
func (self *HeartBeat) Beat() {
timer := time.NewTicker(self.timeout * time.Second)
//ttl := time.After(self.expire * time.Second)
for {
select {
case <-timer.C:
go func() {
cmd := protocol.NewCmdSimple(protocol.SEND_PING_CMD)
err := self.session.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
}
}()
//case <-ttl:
//break
}
}
}
func (self *HeartBeat) Receive() {
timeout := time.After(self.timeout)
for {
select {
case <-timeout:
self.fails = self.fails + 1
if self.fails > self.threshold {
return
}
}
}
}
<file_sep>/manager/server.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"time"
"encoding/json"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/base"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/protocol"
"github.com/oikomi/FishChatServer/storage/redis_store"
"github.com/oikomi/FishChatServer/storage/mongo_store"
)
type Manager struct {
cfg *ManagerConfig
sessionCache *redis_store.SessionCache
topicCache *redis_store.TopicCache
mongoStore *mongo_store.MongoStore
}
func NewManager(cfg *ManagerConfig) *Manager {
return &Manager {
cfg : cfg,
sessionCache : redis_store.NewSessionCache(redis_store.NewRedisStore(&redis_store.RedisStoreOptions {
Network : "tcp",
Address : cfg.Redis.Addr + cfg.Redis.Port,
ConnectTimeout : time.Duration(cfg.Redis.ConnectTimeout)*time.Millisecond,
ReadTimeout : time.Duration(cfg.Redis.ReadTimeout)*time.Millisecond,
WriteTimeout : time.Duration(cfg.Redis.WriteTimeout)*time.Millisecond,
Database : 1,
KeyPrefix : base.COMM_PREFIX,
})),
topicCache : redis_store.NewTopicCache(redis_store.NewRedisStore(&redis_store.RedisStoreOptions {
Network : "tcp",
Address : cfg.Redis.Addr + cfg.Redis.Port,
ConnectTimeout : time.Duration(cfg.Redis.ConnectTimeout)*time.Millisecond,
ReadTimeout : time.Duration(cfg.Redis.ReadTimeout)*time.Millisecond,
WriteTimeout : time.Duration(cfg.Redis.WriteTimeout)*time.Millisecond,
Database : 1,
KeyPrefix : base.COMM_PREFIX,
})),
mongoStore : mongo_store.NewMongoStore(cfg.Mongo.Addr, cfg.Mongo.Port, cfg.Mongo.User, cfg.Mongo.Password),
}
}
func (self *Manager)connectMsgServer(ms string) (*libnet.Session, error) {
client, err := libnet.Dial("tcp", ms)
if err != nil {
log.Error(err.Error())
panic(err)
}
return client, err
}
func (self *Manager)parseProtocol(cmd []byte, session *libnet.Session) error {
var err error
var c protocol.CmdInternal
//var cType interface{}
err = json.Unmarshal(cmd, &c)
if err != nil {
log.Error("error:", err)
return err
}
pp := NewProtoProc(self)
log.Info(c)
log.Info(c.CmdName)
switch c.CmdName {
case protocol.CACHE_SESSION_CMD:
var scc SessionCacheCmd
err = json.Unmarshal(cmd, &scc)
if err != nil {
log.Error("error:", err)
return err
}
err = pp.procCacheSession(scc, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.CACHE_TOPIC_CMD:
var tcc TopicCacheCmd
err = json.Unmarshal(cmd, &tcc)
if err != nil {
log.Error("error:", err)
return err
}
err = pp.procCacheTopic(tcc, session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.STORE_SESSION_CMD:
var ssd SessionStoreCmd
err = json.Unmarshal(cmd, &ssd)
if err != nil {
log.Error("error:", err)
return err
}
err := pp.procStoreSession(ssd.GetAnyData(), session)
if err != nil {
log.Error("error:", err)
return err
}
case protocol.STORE_TOPIC_CMD:
var tsd TopicStoreCmd
err = json.Unmarshal(cmd, &tsd)
if err != nil {
log.Error("error:", err)
return err
}
err = pp.procStoreTopic(tsd.GetAnyData(), session)
if err != nil {
log.Error("error:", err)
return err
}
}
return err
}
func (self *Manager)handleMsgServerClient(msc *libnet.Session) {
msc.Process(func(msg *libnet.InBuffer) error {
log.Info("msg_server", msc.Conn().RemoteAddr().String(),"say:", string(msg.Data))
self.parseProtocol(msg.Data, msc)
return nil
})
}
func (self *Manager)subscribeChannels() error {
log.Info("subscribeChannels")
var msgServerClientList []*libnet.Session
for _, ms := range self.cfg.MsgServerList {
msgServerClient, err := self.connectMsgServer(ms)
if err != nil {
log.Error(err.Error())
return err
}
cmd := protocol.NewCmdSimple(protocol.SUBSCRIBE_CHANNEL_CMD)
cmd.AddArg(protocol.SYSCTRL_CLIENT_STATUS)
cmd.AddArg(self.cfg.UUID)
err = msgServerClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
cmd = protocol.NewCmdSimple(protocol.SUBSCRIBE_CHANNEL_CMD)
cmd.AddArg(protocol.SYSCTRL_TOPIC_STATUS)
cmd.AddArg(self.cfg.UUID)
err = msgServerClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
cmd = protocol.NewCmdSimple(protocol.SUBSCRIBE_CHANNEL_CMD)
cmd.AddArg(protocol.STORE_CLIENT_INFO)
cmd.AddArg(self.cfg.UUID)
err = msgServerClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
cmd = protocol.NewCmdSimple(protocol.SUBSCRIBE_CHANNEL_CMD)
cmd.AddArg(protocol.STORE_TOPIC_INFO)
cmd.AddArg(self.cfg.UUID)
err = msgServerClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
msgServerClientList = append(msgServerClientList, msgServerClient)
}
for _, msc := range msgServerClientList {
go self.handleMsgServerClient(msc)
}
return nil
}<file_sep>/msg_server/msg_server_config.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"encoding/json"
"time"
"github.com/oikomi/FishChatServer/log"
)
type MsgServerConfig struct {
configfile string
LocalIP string
TransportProtocols string
Listen string
LogFile string
ScanDeadSessionTimeout time.Duration
Expire time.Duration
MonitorBeatTime time.Duration
SessionManagerServerList []string
Redis struct {
Addr string
Port string
ConnectTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
}
Mongo struct {
Addr string
Port string
User string
Password string
}
}
func NewMsgServerConfig(configfile string) *MsgServerConfig {
return &MsgServerConfig{
configfile : configfile,
}
}
func (self *MsgServerConfig)LoadConfig() error {
file, err := os.Open(self.configfile)
if err != nil {
log.Error(err.Error())
return err
}
defer file.Close()
dec := json.NewDecoder(file)
err = dec.Decode(&self)
if err != nil {
return err
}
return nil
}
func (self *MsgServerConfig)DumpConfig() {
//fmt.Printf("Mode: %s\nListen: %s\nServer: %s\nLogfile: %s\n",
//cfg.Mode, cfg.Listen, cfg.Server, cfg.Logfile)
}
<file_sep>/monitor/controllers/monitor_controller.go
//
// Copyright 2014-2015 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"encoding/json"
"github.com/astaxie/beego"
"github.com/oikomi/FishChatServer/monitor/conf"
)
type MonitorController struct {
beego.Controller
}
func (this *MonitorController) Post() {
beego.Info("MonitorController Post")
action := this.GetString(conf.KEY_ACTION)
if action == "" {
beego.Error("[para is null] | action ")
this.Abort("400")
return
}
ifo := NewInfoOperation()
switch action {
case conf.ACTION_LOGIN:
var ob LoginPostData
json.Unmarshal(this.Ctx.Input.RequestBody, &ob)
ts, err := ifo.login(ob.Username, ob.Password)
if err != nil {
beego.Error(err)
this.Abort("400")
return
}
this.Data["json"] = ts
this.ServeJSON()
}
}
func (this *MonitorController) Get() {
beego.Info("MonitorController Get")
action := this.GetString(conf.KEY_ACTION)
if action == "" {
beego.Error("[para is null] | action ")
this.Abort("400")
return
}
ifo := NewInfoOperation()
switch action {
case conf.ACTION_GET_MSG_SERVER_DATA:
ifo.getMsgServerData()
}
}
type InfoOperation struct {
}
func NewInfoOperation() *InfoOperation {
return &InfoOperation{}
}
func (this *InfoOperation) login(username, password string) (*LoginStatus, error) {
ts := NewLoginStatus()
ts.Status = "1"
if username == "admin" && password == "<PASSWORD>" {
ts.Status = "0"
}
return &ts, nil
}
func (this *InfoOperation) getMsgServerData() (*MsgServerData, error) {
ts := NewMsgServerData()
ts.Status = "1"
return &ts, nil
}
<file_sep>/gateway/server.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"flag"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
"github.com/oikomi/FishChatServer/storage/mongo_store"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
type Gateway struct {
cfg *GatewayConfig
msgServerClientMap map[string]*libnet.Session
// client to each msg server getting their payload
msgServerNumMap map[string]uint64
// payload of each msg server
server *libnet.Server
sessionCache *redis_store.SessionCache
mongoStore *mongo_store.MongoStore
}
func NewGateway(cfg *GatewayConfig, rs *redis_store.RedisStore) *Gateway {
return &Gateway{
cfg: cfg,
msgServerClientMap: make(map[string]*libnet.Session),
msgServerNumMap: make(map[string]uint64),
server: new(libnet.Server),
sessionCache: redis_store.NewSessionCache(rs),
mongoStore: mongo_store.NewMongoStore(cfg.Mongo.Addr, cfg.Mongo.Port, cfg.Mongo.User, cfg.Mongo.Password),
}
}
func (self *Gateway) parseProtocol(cmd []byte, session *libnet.Session) error {
var c protocol.CmdSimple
err := json.Unmarshal(cmd, &c)
if err != nil {
return err
}
pp := NewProtoProc(self)
switch c.GetCmdName() {
case protocol.REQ_LOGIN_CMD:
err = pp.procLogin(&c, session)
if err != nil {
return err
}
}
return err
}
func (self *Gateway) handleMsgServerClient(msc *libnet.Session) {
msc.Process(func(msg *libnet.InBuffer) error {
log.Info("msg_server", msc.Conn().RemoteAddr().String(), " say: ", string(msg.Data))
var c protocol.CmdMonitor
err := json.Unmarshal(msg.Data, &c)
if err != nil {
log.Error("error:", err)
return err
}
self.msgServerNumMap[msc.Conn().RemoteAddr().String()] = c.SessionNum
return nil
})
}
func (self *Gateway) connectMsgServer(ms string) (*libnet.Session, error) {
client, err := libnet.Dial("tcp", ms)
if err != nil {
log.Error(err.Error())
panic(err)
}
return client, err
}
func (self *Gateway) subscribeChannels() error {
log.Info("gateway start to subscribeChannels")
for _, ms := range self.cfg.MsgServerList {
msgServerClient, err := self.connectMsgServer(ms)
if err != nil {
log.Error(err.Error())
return err
}
cmd := protocol.NewCmdSimple(protocol.SUBSCRIBE_CHANNEL_CMD)
cmd.AddArg(protocol.SYSCTRL_MONITOR)
cmd.AddArg(self.cfg.UUID)
err = msgServerClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
self.msgServerClientMap[ms] = msgServerClient
}
for _, msc := range self.msgServerClientMap {
go self.handleMsgServerClient(msc)
}
return nil
}
<file_sep>/monitor/conf/const.go
//
// Copyright 2015-2099 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conf
const (
IFI = "eth0"
BaseDir = "/home/data"
)
const (
KEY_ACTION = "action"
KEY_PATH = "path"
KEY_TOKEN = "token"
)
const (
ACTION_REBOOT = "reboot"
ACTION_LOGIN = "login"
ACTION_GET_MSG_SERVER_DATA = "get_msg_server_data"
)<file_sep>/libnet/message.go
package libnet
import (
"encoding/gob"
"encoding/json"
"encoding/xml"
)
// Convert to bytes message.
func Bytes(v []byte) Encoder {
return func(buffer *OutBuffer) error {
buffer.WriteBytes(v)
return nil
}
}
// Convert to string message.
func String(v string) Encoder {
return func(buffer *OutBuffer) error {
buffer.WriteString(v)
return nil
}
}
// Create a json message.
func Json(v interface{}) Encoder {
return func(buffer *OutBuffer) error {
return json.NewEncoder(buffer).Encode(v)
}
}
// Create a gob message.
func Gob(v interface{}) Encoder {
return func(buffer *OutBuffer) error {
return gob.NewEncoder(buffer).Encode(v)
}
}
// Create a xml message.
func Xml(v interface{}) Encoder {
return func(buffer *OutBuffer) error {
return xml.NewEncoder(buffer).Encode(v)
}
}
<file_sep>/router/proto_proc.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
//"github.com/oikomi/FishChatServer/common"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/protocol"
//"github.com/oikomi/FishChatServer/storage/mongo_store"
)
func init() {
flag.Set("alsologtostderr", "true")
flag.Set("log_dir", "false")
}
type ProtoProc struct {
Router *Router
}
func NewProtoProc(r *Router) *ProtoProc {
return &ProtoProc{
Router: r,
}
}
/*
MsgServer -> Router
REQ_SEND_P2P_MSG_CMD
arg0: Sent2ID //接收方用户ID
arg1: Msg //消息内容
arg2: FromID //发送方用户ID
arg3: uuid //MsgServer分配的消息uuid
*/
func (self *ProtoProc) procSendMsgP2P(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procSendMsgP2P")
var err error
send2ID := cmd.GetArgs()[0]
send2Msg := cmd.GetArgs()[1]
log.Info(send2Msg)
self.Router.readMutex.Lock()
defer self.Router.readMutex.Unlock()
cacheSession, err := self.Router.sessionCache.Get(send2ID)
if cacheSession == nil || cacheSession.Alive == false {
storeSession, err := self.Router.mongoStore.GetSessionFromCid(send2ID)
if err != nil {
log.Warningf("ID %s not registered, msg dropped", send2ID)
return err
}
log.Info(storeSession)
log.Warningf("ID registered but offline: %s", send2ID)
cmd.ChangeCmdName(protocol.ROUTE_SEND_P2P_MSG_CMD)
//ms := self.Router.cfg.MsgServerList[0]
ms := session.Conn().RemoteAddr().String()
err = self.Router.msgServerClientMap[ms].Send(libnet.Json(cmd))
if err != nil {
log.Error("error:", err)
return err
}
} else {
log.Info(cacheSession.MsgServerAddr)
cmd.ChangeCmdName(protocol.ROUTE_SEND_P2P_MSG_CMD)
log.Info(cmd)
err = self.Router.msgServerClientMap[cacheSession.MsgServerAddr].Send(libnet.Json(cmd))
if err != nil {
log.Error("error:", err)
return err
}
}
return nil
}
func (self *ProtoProc) procAckP2pStatus(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procAckP2pStatus")
var err error
send2ID := cmd.GetArgs()[2]
self.Router.readMutex.Lock()
defer self.Router.readMutex.Unlock()
cacheSession, err := self.Router.sessionCache.Get(send2ID)
if cacheSession == nil || cacheSession.Alive == false {
storeSession, err := self.Router.mongoStore.GetSessionFromCid(send2ID)
if err != nil {
log.Warningf("ID %s not registered, msg dropped", send2ID)
return err
}
log.Info(storeSession)
log.Warningf("ID registered but offline: %s", send2ID)
cmd.ChangeCmdName(protocol.ROUTE_ACK_P2P_STATUS_CMD)
ms := session.Conn().RemoteAddr().String()
err = self.Router.msgServerClientMap[ms].Send(libnet.Json(cmd))
if err != nil {
log.Error("error:", err)
return err
}
} else {
log.Info(cacheSession.MsgServerAddr)
cmd.ChangeCmdName(protocol.ROUTE_ACK_P2P_STATUS_CMD)
log.Info(cmd)
err = self.Router.msgServerClientMap[cacheSession.MsgServerAddr].Send(libnet.Json(cmd))
if err != nil {
log.Error("error:", err)
return err
}
}
return nil
}
/*
Router -> MsgServer
ROUTE_SEND_TOPIC_MSG_CMD
arg0: ClientID //发送方用户ID
arg1: ClientType //发送方终端类型,是client还是device
arg2: Msg //消息内容
arg3: TopicName //群组名, device无须提供
*/
func (self *ProtoProc) procSendMsgTopic(cmd protocol.Cmd, session *libnet.Session) error {
log.Info("procSendMsgTopic")
var err error
ms := session.Conn().RemoteAddr().String()
//send2Msg := cmd.GetArgs()[0]
topicName := cmd.GetArgs()[1]
//fromID := cmd.GetArgs()[2]
//fromType := cmd.GetArgs()[3]
// check whether the topic exist
topicCacheData, err := self.Router.topicCache.Get(topicName)
if topicCacheData == nil {
log.Warningf("TOPIC %s not exist: %s", topicName, err.Error())
return err
}
cmd.ChangeCmdName(protocol.ROUTE_SEND_TOPIC_MSG_CMD)
log.Info(protocol.ROUTE_SEND_TOPIC_MSG_CMD)
log.Info(cmd)
for ip, num := range topicCacheData.AliveMemberNumMap {
if num > 0 {
log.Warningf("topic %s has %d member(s) in ip %s", topicName, num, ip)
if ip != ms {
// not in this server, routing it
err = self.Router.msgServerClientMap[ip].Send(libnet.Json(cmd))
if err != nil {
log.Error("error:", err)
return err
}
}
}
}
return nil
}
<file_sep>/protocol/topic.go
//
// Copyright 2014 <NAME>. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocol
import (
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/storage/mongo_store"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
type TopicMap map[string]*Topic
type Topic struct {
TopicName string
MsgAddr string
Channel *libnet.Channel
TA *TopicAttribute
ClientIDList []string
TSD *redis_store.TopicCacheData
}
func NewTopic(topicName string, msgAddr string, CreaterID string, CreaterSession *libnet.Session) *Topic {
return &Topic{
TopicName: topicName,
MsgAddr: msgAddr,
Channel: new(libnet.Channel),
TA: NewTopicAttribute(CreaterID, CreaterSession),
ClientIDList: make([]string, 0),
}
}
func (self *Topic) AddMember(m *mongo_store.Member) {
self.TSD.MemberList = append(self.TSD.MemberList, m)
}
type TopicAttribute struct {
CreaterID string
CreaterSession *libnet.Session
}
func NewTopicAttribute(CreaterID string, CreaterSession *libnet.Session) *TopicAttribute {
return &TopicAttribute{
CreaterID: CreaterID,
CreaterSession: CreaterSession,
}
}
<file_sep>/storage/mongo_store/session_store.go
//
// Copyright 2014-2015 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mongo_store
type SessionStoreData struct {
ClientID string `bson:"ClientID"`
ClientPwd string `bson:"ClientPwd"`
ClientName string `bson:"ClientName"`
ClientType string `bson:"ClientType"`
TopicList []string `bson:"TopicList"`
}
func NewSessionStoreData(clientID string, clientPwd string, clientType string) *SessionStoreData {
return &SessionStoreData{
ClientID: clientID,
ClientPwd: <PASSWORD>,
ClientType: clientType,
TopicList: make([]string, 0),
}
}
func (self *SessionStoreData) AddTopic(t string) {
self.TopicList = append(self.TopicList, t)
}
func (self *SessionStoreData) GetTopics() []string {
return self.TopicList
}
func (self *SessionStoreData) TopicExist(t string) bool {
for _, name := range self.TopicList {
if name == t {
return true
}
}
return false
}
func (self *SessionStoreData) RemoveTopic(t string) {
for i, name := range self.TopicList {
if name == t {
self.TopicList = append(self.TopicList[:i], self.TopicList[i+1:]...)
break
}
}
}
<file_sep>/manager/session_cache.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/oikomi/FishChatServer/storage/redis_store"
)
type SessionCacheCmd struct {
CmdName string
Args []string
AnyData *redis_store.SessionCacheData
}
func (self SessionCacheCmd)GetCmdName() string {
return self.CmdName
}
func (self SessionCacheCmd)ChangeCmdName(newName string) {
self.CmdName = newName
}
func (self SessionCacheCmd)GetArgs() []string {
return self.Args
}
func (self SessionCacheCmd)AddArg(arg string) {
self.Args = append(self.Args, arg)
}
func (self SessionCacheCmd)ParseCmd(msglist []string) {
self.CmdName = msglist[1]
self.Args = msglist[2:]
}
func (self SessionCacheCmd)GetAnyData() interface{} {
return self.AnyData
}<file_sep>/libnet/server.go
package libnet
import (
"errors"
"net"
"sync/atomic"
"github.com/oikomi/FishChatServer/libnet/syncs"
)
// Errors
var (
SendToClosedError = errors.New("Send to closed session")
PacketTooLargeError = errors.New("Packet too large")
AsyncSendTimeoutError = errors.New("Async send timeout")
)
var (
DefaultSendChanSize = 1024 // Default session send chan buffer size.
DefaultConnBufferSize = 1024 // Default session read buffer size.
DefaultProtocol = PacketN(4, BigEndian) // Default protocol for utility APIs.
)
// The easy way to setup a server.
func Listen(network, address string) (*Server, error) {
listener, err := net.Listen(network, address)
if err != nil {
return nil, err
}
return NewServer(listener, DefaultProtocol), nil
}
// Server.
type Server struct {
// About sessions
maxSessionId uint64
sessions map[uint64]*Session
sessionMutex syncs.Mutex
// About network
listener net.Listener
protocol Protocol
broadcaster *Broadcaster
// About server start and stop
stopFlag int32
stopWait syncs.WaitGroup
SendChanSize int // Session send chan buffer size.
ReadBufferSize int // Session read buffer size.
State interface{} // server state.
}
// Create a server.
func NewServer(listener net.Listener, protocol Protocol) *Server {
server := &Server{
listener: listener,
protocol: protocol,
sessions: make(map[uint64]*Session),
SendChanSize: DefaultSendChanSize,
ReadBufferSize: DefaultConnBufferSize,
}
server.broadcaster = NewBroadcaster(protocol.New(server), server.fetchSession)
return server
}
// Get listener address.
func (server *Server) Listener() net.Listener {
return server.listener
}
// Get protocol.
func (server *Server) Protocol() Protocol {
return server.protocol
}
// Broadcast to all session. The message only encoded once
// so the performance is better than send message one by one.
func (server *Server) Broadcast(encoder Encoder) ([]BroadcastWork, error) {
return server.broadcaster.Broadcast(encoder)
}
// Accept incoming connection once.
func (server *Server) Accept() (*Session, error) {
conn, err := server.listener.Accept()
if err != nil {
return nil, err
}
return server.newSession(
atomic.AddUint64(&server.maxSessionId, 1),
conn,
), nil
}
// Loop and accept incoming connections. The callback will called asynchronously when each session start.
func (server *Server) Serve(handler func(*Session)) error {
for {
session, err := server.Accept()
if err != nil {
server.Stop()
return err
}
go handler(session)
}
return nil
}
// Stop server.
func (server *Server) Stop() {
if atomic.CompareAndSwapInt32(&server.stopFlag, 0, 1) {
server.listener.Close()
server.closeSessions()
server.stopWait.Wait()
}
}
func (server *Server) newSession(id uint64, conn net.Conn) *Session {
session := NewSession(id, conn, server.protocol, server.SendChanSize, server.ReadBufferSize)
server.putSession(session)
return session
}
// Put a session into session list.
func (server *Server) putSession(session *Session) {
server.sessionMutex.Lock()
defer server.sessionMutex.Unlock()
session.AddCloseCallback(server, func() {
server.delSession(session)
})
server.sessions[session.id] = session
server.stopWait.Add(1)
}
// Delete a session from session list.
func (server *Server) delSession(session *Session) {
server.sessionMutex.Lock()
defer server.sessionMutex.Unlock()
session.RemoveCloseCallback(server)
delete(server.sessions, session.id)
server.stopWait.Done()
}
// Copy sessions for close.
func (server *Server) copySessions() []*Session {
server.sessionMutex.Lock()
defer server.sessionMutex.Unlock()
sessions := make([]*Session, 0, len(server.sessions))
for _, session := range server.sessions {
sessions = append(sessions, session)
}
return sessions
}
// Fetch sessions.
func (server *Server) fetchSession(callback func(*Session)) {
server.sessionMutex.Lock()
defer server.sessionMutex.Unlock()
for _, session := range server.sessions {
callback(session)
}
}
// Close all sessions.
func (server *Server) closeSessions() {
// copy session to avoid deadlock
sessions := server.copySessions()
for _, session := range sessions {
session.Close()
}
}
<file_sep>/common/util.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"math/rand"
"time"
"github.com/oikomi/FishChatServer/base"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/storage/mongo_store"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
const KeyPrefix string = base.COMM_PREFIX
var DefaultRedisConnectTimeout uint32 = 2000
var DefaultRedisReadTimeout uint32 = 1000
var DefaultRedisWriteTimeout uint32 = 1000
var DefaultRedisOptions redis_store.RedisStoreOptions = redis_store.RedisStoreOptions{
Network: "tcp",
Address: ":6379",
ConnectTimeout: time.Duration(DefaultRedisConnectTimeout) * time.Millisecond,
ReadTimeout: time.Duration(DefaultRedisReadTimeout) * time.Millisecond,
WriteTimeout: time.Duration(DefaultRedisWriteTimeout) * time.Millisecond,
Database: 1,
KeyPrefix: base.COMM_PREFIX,
}
//Just use random to select msg_server
func SelectServer(serverList []string, serverNum int) string {
return serverList[rand.Intn(serverNum)]
}
func GetSessionFromCID(storeOp interface{}, ID string) (interface{}, error) {
switch storeOp.(type) {
case *redis_store.SessionCache:
// return (*redis_store.SessionCacheData)
SessionCacheData, err := storeOp.(*redis_store.SessionCache).Get(ID)
if err != nil {
log.Warningf("no ID : %s", ID)
return nil, err
}
if SessionCacheData != nil {
log.Info(SessionCacheData)
}
return SessionCacheData, nil
case *mongo_store.MongoStore:
// return (*mongo_store.sessionStoreData)
sessionStoreData, err := storeOp.(*mongo_store.MongoStore).GetSessionFromCid(ID)
if err != nil {
log.Warningf("no ID : %s", ID)
return nil, err
}
if sessionStoreData != nil {
log.Info(sessionStoreData)
}
return sessionStoreData, nil
}
return nil, NOTFOUNT
// session ,err := sessionCache.Get(ID)
// if err != nil {
// log.Warningf("no ID : %s", ID)
// return nil, err
// }
// if session != nil {
// log.Info(session)
// }
// return session, nil
}
func DelSessionFromCID(storeOp interface{}, ID string) error {
switch storeOp.(type) {
case *redis_store.SessionCache:
err := storeOp.(*redis_store.SessionCache).Delete(ID)
if err != nil {
log.Warningf("no ID : %s", ID)
return err
}
}
// err := sessionCache.Delete(ID)
// if err != nil {
// log.Warningf("no ID : %s", ID)
// return err
// }
return NOTFOUNT
}
func GetTopicFromTopicName(storeOp interface{}, topicName string) (interface{}, error) {
switch storeOp.(type) {
case *redis_store.TopicCache:
// return (*redis_store.TopicCacheData)
TopicCacheData, err := storeOp.(*redis_store.TopicCache).Get(topicName)
if err != nil {
log.Warningf("no topicName : %s", topicName)
return nil, err
}
if TopicCacheData != nil {
log.Info(TopicCacheData)
}
return TopicCacheData, nil
case *mongo_store.MongoStore:
// return (*mongo_store.TopicStoreData)
TopicStoreData, err := storeOp.(*mongo_store.MongoStore).GetTopicFromCid(topicName)
if err != nil {
log.Warningf("no topicName : %s", topicName)
return nil, err
}
if TopicStoreData != nil {
log.Info(TopicStoreData)
}
return TopicStoreData, nil
}
return nil, NOTFOUNT
// topic ,err := topicCache.Get(topicName)
// if err != nil {
// log.Warningf("no topicName : %s", topicName)
// return nil, err
// }
// if topic != nil {
// log.Info(topic)
// }
// return topic, nil
}
func StoreData(storeOp interface{}, data interface{}) error {
switch data.(type) {
case *redis_store.SessionCacheData:
err := storeOp.(*redis_store.SessionCache).Set(data.(*redis_store.SessionCacheData))
//err := (*redis_store.SessionCache)(storeOp).Set(data)
if err != nil {
return err
log.Error("error:", err)
}
log.Info("cache sesion success")
return nil
case *redis_store.TopicCacheData:
err := storeOp.(*redis_store.TopicCache).Set(data.(*redis_store.TopicCacheData))
if err != nil {
return err
log.Error("error:", err)
}
log.Info("cache topic success")
return nil
case *mongo_store.SessionStoreData:
err := storeOp.(*mongo_store.MongoStore).Set(data)
if err != nil {
return err
log.Error("error:", err)
}
log.Info("store session success")
return nil
case *mongo_store.TopicStoreData:
err := storeOp.(*mongo_store.MongoStore).Set(data)
if err != nil {
return err
log.Error("error:", err)
}
log.Info("store topic success")
return nil
}
return nil
}
func GetOfflineMsgFromOwnerName(storeOp interface{}, ownerName string) (*redis_store.OfflineMsgCacheData, error) {
switch storeOp.(type) {
case *redis_store.OfflineMsgCache:
o, err := storeOp.(*redis_store.OfflineMsgCache).Get(ownerName)
if err != nil {
log.Warningf("no ownerName : %s", ownerName)
return nil, err
}
if o != nil {
log.Info(o)
}
return o, nil
}
return nil, NOTFOUNT
// o ,err := offlineMsgCache.Get(ownerName)
// if err != nil {
// log.Warningf("no ownerName : %s", ownerName)
// return nil, err
// }
// if o != nil {
// log.Info(o)
// }
// return o, nil
}
<file_sep>/storage/mongo_store/mongo_store.go
//
// Copyright 2014-2015 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mongo_store
import (
"sync"
"time"
"github.com/oikomi/FishChatServer/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type MongoStoreOptions struct {
}
type MongoStore struct {
opts *MongoStoreOptions
session *mgo.Session
rwMutex sync.Mutex
}
func NewMongoStore(ip string, port string, user string, password string) *MongoStore {
var url string
if user == "" && password == "" {
url = ip + port
} else {
url = user + ":" + password + "@" + ip + port
}
log.Info("connect to mongo : ", url)
maxWait := time.Duration(5 * time.Second)
session, err := mgo.DialWithTimeout(url, maxWait)
session.SetMode(mgo.Monotonic, true)
if err != nil {
panic(err)
}
return &MongoStore{
session: session,
}
}
func (self *MongoStore) Init() {
//self.session.DB("im").C("client_info")
}
func (self *MongoStore) Set(data interface{}) error {
log.Info("MongoStore Update")
var err error
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
switch data.(type) {
case *SessionStoreData:
op := self.session.DB(DATA_BASE_NAME).C(CLIENT_INFO_COLLECTION)
cid := data.(*SessionStoreData).ClientID
log.Info("cid : ", cid)
_, err = op.Upsert(bson.M{"ClientID": cid}, data.(*SessionStoreData))
if err != nil {
log.Error(err.Error())
return err
}
case *TopicStoreData:
op := self.session.DB(DATA_BASE_NAME).C(TOPIC_INFO_COLLECTION)
topicName := data.(*TopicStoreData).TopicName
log.Info("topicName : ", topicName)
_, err = op.Upsert(bson.M{"TopicName": topicName}, data.(*TopicStoreData))
if err != nil {
log.Error(err.Error())
return err
}
}
return err
}
func (self *MongoStore) GetTopicFromCid(cid string) (*TopicStoreData, error) {
log.Info("MongoStore GetTopicFromCid")
var err error
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
op := self.session.DB(DATA_BASE_NAME).C(TOPIC_INFO_COLLECTION)
var result *TopicStoreData
err = op.Find(bson.M{"TopicName": cid}).One(result)
if err != nil {
log.Error(err.Error())
return nil, err
}
return result, nil
}
func (self *MongoStore) DeleteTopic(cid string) error {
log.Info("MongoStore DeleteTopic")
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
op := self.session.DB(DATA_BASE_NAME).C(TOPIC_INFO_COLLECTION)
return op.Remove(bson.M{"TopicName": cid})
}
func (self *MongoStore) GetSessionFromCid(cid string) (*SessionStoreData, error) {
log.Info("MongoStore GetSessionFromCid")
var err error
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
op := self.session.DB(DATA_BASE_NAME).C(CLIENT_INFO_COLLECTION)
var result *SessionStoreData
err = op.Find(bson.M{"ClientID": cid}).One(result)
if err != nil {
log.Error(err.Error())
return nil, err
}
return result, nil
}
func (self *MongoStore) DeleteSession(cid string) error {
log.Info("MongoStore DeleteSession")
self.rwMutex.Lock()
defer self.rwMutex.Unlock()
op := self.session.DB(DATA_BASE_NAME).C(CLIENT_INFO_COLLECTION)
return op.Remove(bson.M{"ClientID": cid})
}
func (self *MongoStore) Close() {
self.session.Close()
}
<file_sep>/monitor/controllers/util.go
//
// Copyright 2014-2015 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"net"
"fmt"
"bytes"
"errors"
"strings"
"os/exec"
"github.com/astaxie/beego"
)
func RunShellCmd(s string) error {
cmd := exec.Command("/bin/sh", "-c", s)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
beego.Error(err)
return err
}
fmt.Printf("%s", out.String())
return nil
}
func GetLocalIP(inter string) (string,error){
ifi, err := net.InterfaceByName(inter)
if err != nil {
beego.Error("GetLocalIP Failed")
return "", err
}
addrs, err := ifi.Addrs()
if err != nil {
beego.Error("GetLocalIP Failed")
return "", err
}
for _, a := range addrs {
fmt.Printf("Interface %q, address %v\n", ifi.Name, a)
return a.String(), err
}
return "", err
}
func GetLocalMac(inter string) (string, error) {
ifi, err := net.InterfaceByName(inter)
if err != nil {
beego.Error("GetLocalMac Failed")
return "", err
}
return ifi.HardwareAddr.String(), nil
}
func GetLocalMask(inter string) (string, error) {
ifi, err := net.InterfaceByName(inter)
if err != nil {
beego.Error("GetLocalMac Failed")
return "", err
}
addrs, err := ifi.Addrs()
if err != nil {
beego.Error("GetLocalMac Failed")
return "", err
}
for _, a := range addrs {
fullIp := strings.Split(a.String(), "/")[0]
ip := net.ParseIP(fullIp)
if ip == nil {
beego.Error("ParseIP Failed")
return "", errors.New("ParseIP Failed")
}
return ip.DefaultMask().String(), err
}
return "", err
}
<file_sep>/monitor/server.go
//
// Copyright 2014 <NAME> (<EMAIL>). All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"sync"
"time"
"encoding/json"
"github.com/oikomi/FishChatServer/log"
"github.com/oikomi/FishChatServer/base"
"github.com/oikomi/FishChatServer/libnet"
"github.com/oikomi/FishChatServer/protocol"
"github.com/oikomi/FishChatServer/storage/redis_store"
)
type Monitor struct {
cfg *MonitorConfig
msgServerClientMap map[string]*libnet.Session
sessionStore *redis_store.SessionCache
topicServerMap map[string]string
readMutex sync.Mutex
}
func NewMonitor(cfg *MonitorConfig) *Monitor {
return &Monitor {
cfg : cfg,
msgServerClientMap : make(map[string]*libnet.Session),
sessionStore : redis_store.NewSessionCache(redis_store.NewRedisStore(&redis_store.RedisStoreOptions {
Network : "tcp",
Address : cfg.Redis.Addr + cfg.Redis.Port,
ConnectTimeout : time.Duration(cfg.Redis.ConnectTimeout)*time.Millisecond,
ReadTimeout : time.Duration(cfg.Redis.ReadTimeout)*time.Millisecond,
WriteTimeout : time.Duration(cfg.Redis.WriteTimeout)*time.Millisecond,
Database : 1,
KeyPrefix : base.COMM_PREFIX,
})),
topicServerMap : make(map[string]string),
}
}
func (self *Monitor)connectMsgServer(ms string) (*libnet.Session, error) {
client, err := libnet.Dial("tcp", ms)
if err != nil {
log.Error(err.Error())
panic(err)
}
return client, err
}
func (self *Monitor)handleMsgServerClient(msc *libnet.Session) {
msc.Process(func(msg *libnet.InBuffer) error {
//log.Info("msg_server", msc.Conn().RemoteAddr().String()," say: ", string(msg.Data))
var c protocol.CmdMonitor
err := json.Unmarshal(msg.Data, &c)
if err != nil {
log.Error("error:", err)
return err
}
return nil
})
}
func (self *Monitor)subscribeChannels() error {
log.Info("monitor start to subscribeChannels")
for _, ms := range self.cfg.MsgServerList {
msgServerClient, err := self.connectMsgServer(ms)
if err != nil {
log.Error(err.Error())
return err
}
cmd := protocol.NewCmdSimple(protocol.SUBSCRIBE_CHANNEL_CMD)
cmd.AddArg(protocol.SYSCTRL_MONITOR)
cmd.AddArg(self.cfg.UUID)
err = msgServerClient.Send(libnet.Json(cmd))
if err != nil {
log.Error(err.Error())
return err
}
self.msgServerClientMap[ms] = msgServerClient
}
for _, msc := range self.msgServerClientMap {
go self.handleMsgServerClient(msc)
}
return nil
}
| f711e2be0e6bd11f5f39470ed13752897ea9a3c2 | [
"JavaScript",
"Go",
"Markdown",
"Shell"
] | 39 | Go | alvin921/FishChatServer | 877dcd51086a1e76b61e3e70f6b95b6ce03d8971 | c2e95e7e22507c07e87bb102d4bae792efba18db |
refs/heads/main | <repo_name>lerouxl/Tiles_ROI<file_sep>/Tiles ROI/utils.py
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from PIL import Image
class MPGrid:
def __init__(self, dpi:int=120):
self.dpi = dpi
def Add(self, image, size=None, intervals=(100, 100), color="black", show_lbls=False):
assert all(x > 0 for x in intervals), "intervals values should be > 0"
if type(image) is str:
try: image = self.read(image, size)
except:
print("[ERROR] Could not read the image file:", image)
return
self.image = image
self.shape = shape = image.shape
self.intervals = intervals
self.fig = fig = plt.figure(figsize=(shape[1]//self.dpi, shape[0]//self.dpi), dpi=self.dpi)
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
img_grid = self.add_grid(image, intervals, color)
ax.imshow(img_grid)
if show_lbls:
self._add_gridsquare_labels(ax, intervals)
ax.format_coord = self.cord_formater
fig.canvas.mpl_connect("button_press_event", lambda x: self.get_grid_square(shape, intervals))
return fig
def read(self, src, size=None):
img = Image.open(src)
if size:
img = img.resize(size)
return np.array(img)
def _add_gridsquare_labels(self, ax, interval):
nx = abs(int((ax.get_xlim()[1]-ax.get_xlim()[0])/ interval[0]))
ny = abs(int((ax.get_ylim()[1]-ax.get_ylim()[0])/ interval[1]))
for j in range(ny):
y = interval[1] /2 + j * interval[1]
for i in range(nx):
x = interval[0] / 2. + i * interval[0]
ax.text(x,y,'{:d}'.format(i+j*nx),color='w',ha='center',va='center')
@staticmethod
def add_grid(img, grid_intervals=(100, 100), color="black"):
img = img.copy()
colors = {
"black" : [0, 0, 0],
"white" : [255, 255, 255]
}
assert color in colors, f"color should be one of {tuple(colors.keys())}"
color = colors[color]
gx, gy = grid_intervals
img[:, ::gx, :] = color
img[::gy, :, :] = color
return img
def cord_formater(self, x, y):
self.mouse_x, self.mouse_y = x, y
return f"{x:.2f} {y:.2f}"
def get_grid_square(self, x=None, y=None, intervals=None):
img_shape = self.shape
xi, yi = self.intervals if intervals is None else intervals
if x is not None:
ms_x, ms_y = x, y
else:
return
x, y = xi * (ms_x // xi), yi * (ms_y // yi)
xm, ym = min(x+xi, img_shape[1]), min(y+yi, img_shape[0])
return tuple(map(int, (x, y, xm, ym)))
def highlight_roi(rois, img_dim):
img = np.zeros(img_dim)
for roi in rois:
x, y, xm, ym = roi
w, h = xm - x, ym - y
img[y:ym, x:xm] = np.ones((h, w, 1), dtype=np.float32)
return plt.imshow(img, alpha=0.3, cmap="jet")
def mpl2tk(plot, parent):
try:canvas = FigureCanvasTkAgg(plot, master=parent)
except Exception as e:
print(e)
canvas = FigureCanvasTkAgg(plt.figure(), master=parent)
canvas.draw()
widget = canvas.get_tk_widget()
return canvas, widget
class Slider:
def __init__(self, slides):
self.slides = slides
self.size = len(slides)
self.ind = 0
def update(self, value):
self.ind += value
if 0 <= self.ind < self.size:
slide = self.slides[self.ind]
return slide
else:
self.ind = 0 if self.ind < 0 else self.size - 1
def pack_all(*widgets, **kwargs):
for widget in widgets:
widget.pack(**kwargs)
def get_fname(path, re_dir=False):
dir, file = os.path.split(path)
fname = file.rsplit('.')[0]
if re_dir:
return dir, fname
return fname<file_sep>/Tiles ROI/main.py
import os
import tkinter as tk
import warnings
import json
from tkinter import ttk, messagebox, filedialog
from utils import *
warnings.filterwarnings('ignore')
class LabelWidget(ttk.LabelFrame):
def __init__(self, master, widget, text="", **kwargs):
super().__init__(master, text=text)
self.widget = widget(self, **kwargs)
self.widget.pack(expand=True, fill="both")
class LabelPopUp(tk.Toplevel):
def __init__(self, master, params, **kwargs):
super().__init__(master, **kwargs)
self.params = params
self.geometry('200x120')
self.title('Labeler')
self.lbl = LabelWidget(self, ttk.Entry, text="Enter roi label:")
btn = ttk.Button(self, text="Save", command=self.save)
pack_all(self.lbl, btn, expand=True, fill="x", pady=10)
def save(self):
path = self.params['path']
lbl = self.lbl.widget.get()
self.params['label'] = lbl
if os.path.exists(path):
prv_lbl = json.load(open(path))['label']
if prv_lbl != lbl:
print("[INFO] found file with same name but different labels, saving it to a different name!")
path, fname = get_fname(path, re_dir=True)
fname = f"{fname}_{lbl}.json"
path = os.path.join(path, fname)
elif messagebox.askquestion(message="File Already exists. continue?") == "no":
self.destroy()
return
del self.params["path"]
json.dump(self.params, open(path, "w"), indent=2)
messagebox.showinfo(message="Label saved!")
self.destroy()
class App(tk.Tk):
def __init__(self, dpi=150):
super().__init__()
self.title('Tiles ROI Segmenter')
self.geometry("600x600")
self.init_style()
self.current_img = None
self.slider = None
self.save_dir = "./"
self.dpi = dpi
self.grid = MPGrid(dpi)
self.rois = set()
self.display_opt = {
"color":"black",
"intervals":(100, 100),
"size":None
}
self.init_ui()
self.protocol("WM_DELETE_WINDOW", self.exit)
self.mainloop()
def init_style(self):
self.style = ttk.Style()
try: self.style.theme_use('vista')
except: pass
def init_ui(self):
NAV = ttk.Frame(self)
NAV.place(relheight=0.1, relwidth=1)
self.main = MAIN = ttk.Frame(self)
MAIN.place(rely=0.1, relheight=0.8, relwidth=1)
BTN = ttk.Frame(self)
BTN.place(rely=0.9, relheight=0.1, relwidth=1)
fold_btn = ttk.Button(NAV, text="Select an image folder", command=self.select_folder)
save_dir = ttk.Button(NAV, text="Select a saving directory", command=self.select_savedir)
back_btn = ttk.Button(NAV, text="Back", command=lambda : self.to(-1))
next_btn = ttk.Button(NAV, text="Next" , command=lambda : self.to(1))
reset = ttk.Button(NAV, text="Reset", command=lambda : self.display(reset=True))
pack_all(fold_btn, save_dir, back_btn, next_btn, reset, side="left", expand=True)
frame_0 = ttk.Frame(BTN)
frame_1 = ttk.Frame(BTN)
pack_all(frame_0, frame_1, expand=True, fill="both", side="left")
grid_inter = LabelWidget(frame_0, ttk.Entry, text="Grid Intervals: (x,y)", width=20)
grid_cl = LabelWidget(frame_0, ttk.Combobox, text="Grid Color",
values=['black', "white"])
img_resize = LabelWidget(frame_0, ttk.Entry, text="Image Size: (width, height)")
pack_all(grid_inter, grid_cl, img_resize,
expand=True, side="left")
def set_opt():
def process(x, default):
try:
x = tuple(map(int, [i.strip() for i in x.split(",")]))
except:
x = default
return x
intervals = process(grid_inter.widget.get(), self.display_opt['intervals'])
size = process(img_resize.widget.get(), None)
cl = grid_cl.widget.get()
cl = cl if cl else self.display_opt['color']
opt = {"intervals":intervals, "size":size, "color":cl}
self.display_opt.update(opt)
self.display()
commit_cfg = ttk.Button(frame_1, text="Commit Changes", command=set_opt)
save = ttk.Button(frame_1, text="Save Label", command=self.label)
pack_all(save, commit_cfg, expand=True, fill="both")
def display(self, rois=None, reset=False):
if self.current_img is None:
return
for child in self.main.winfo_children():
child.destroy()
if reset: self.rois = set()
img = self.grid.Add(self.current_img, **self.display_opt)
self.canv, img_widget = mpl2tk(img, self.main)
img_widget.pack(expand=True, fill="both")
self.canv.mpl_connect('button_press_event', self.extract_roi)
if rois:
img.add_subplot(111)
highlight_roi(rois, self.grid.shape)
def extract_roi(self, event):
roi = self.grid.get_grid_square(event.xdata, event.ydata,
self.display_opt['intervals']) # xmin, ymin, xmax, ymax
if roi not in self.rois:
self.rois.add(roi)
self.display(self.rois)
else:
self.rois.remove(roi)
self.display(self.rois)
def exit(self):
self.quit()
self.destroy()
def select_folder(self):
dir = filedialog.askdirectory(initialdir="./", title="Select an Image Folder")
if not dir:
return
files = [os.path.join(dir, file) for file in os.listdir(dir)]
self.slider = Slider(files)
self.current_img = self.slider.update(0)
self.display()
def to(self, value):
if self.slider is not None:
self.rois = set()
self.current_img = self.slider.update(value)
self.display()
def select_savedir(self):
self.save_dir = filedialog.askdirectory(initialdir="./",
title="Select a Saving directory")
def label(self):
if not self.current_img:
return
fname = get_fname(self.current_img)+".json"
path = os.path.join(os.path.abspath(self.save_dir), fname)
params = {
"image_path":os.path.abspath(self.current_img),
"path":path, "roi":list(self.rois),
"image_size":self.grid.shape[:2]
}
LabelPopUp(self, params)
if __name__ == "__main__":
App()
| dfe59ee407c1bced975b097adf183dc6b6f34c04 | [
"Python"
] | 2 | Python | lerouxl/Tiles_ROI | 7d0d0128075a03e5785787a3da5e9a1815553023 | 75f52a912549f15926438cf271e3b3a8abb95fc4 |
refs/heads/master | <repo_name>marinater/SequenceGA<file_sep>/master.py
from ga import *
import ImageMake
from math import floor
import matplotlib.pyplot as plt
from collections import deque
convergencePicture = ImageMake.generateMat('convergencePictures/checkerboard.jpg')
models = [Model(80, row) for row in convergencePicture]
s = [[int(str(x)) for x in a.BEST_SPECIES] for a in models]
ImageMake.makeImage(convergencePicture, show=True)
ImageMake.makeImage(s, name = 'EvolutionSnapShots/Initialized.jpg', save=True)
convergedRows = [False] * len(models)
mutationRates = [.1] * len(models)
eliteSizes = [40] * len(models)
for generation in range(10000):
for rowIndex in range(len(models)):
if not convergedRows[rowIndex]:
mutationRate = mutationRates[rowIndex]
model = models[rowIndex]
model.nextGeneration(eliteSizes[rowIndex], mutationRate)
error = model.BEST_ERROR
print('\rGeneration {:3d}: Row {:2d}: {:.2f}'.format(generation, rowIndex, error), end='')
if error < 800:
convergedRows[rowIndex] = True
s = [[int(str(x)) for x in a.BEST_SPECIES] for a in models]
# ImageMake.makeImage(s, name='EvolutionSnapShots/{}.jpg'.format(generation), save=True)
ImageMake.makeImage(s, name='EvolutionSnapShots/0.jpg', save=True)
if not False in convergedRows:
break
<file_sep>/ImageMake.py
import numpy as np
import scipy.misc as smp
from PIL import Image
def makeImage(numberMat, name = 'Output.jpg', show = False, save = False):
width = len(numberMat[0])
height = len(numberMat)
data = np.zeros( (height, width,3), dtype=np.uint8 )
for y in range(len(numberMat)):
for x in range(len(numberMat[y])):
pixel = [numberMat[y][x] , numberMat[y][x], numberMat[y][x]]
data[y, x] = pixel
img = smp.toimage( data ) # Create a PIL image
img = img.resize((50*width, 50*height))
if show:
img.show() # View in default viewer
if save:
img.save(name)
def generateMat(name = 'creeper.jpg'):
im = Image.open(name)
width, height = im.size
blocksPerSide = 20
mat = []
for x in range(blocksPerSide):
row = []
for y in range(blocksPerSide):
stepX = 25
offsetX = stepX // 2
stepY = 25
offsetY = stepY // 2
pixel = im.getpixel((offsetY + stepY*y, offsetX + stepX*x))
row.append(pixel[1])
mat.append(row)
return mat
<file_sep>/README.md
A Genetic Algorithm for Images
Genetic Algorithms are a form of Machine Learning that use the process of Natural Selection to generate an ideal solution to a problem. The flexibility of Genetic Algorithms make it ideal for situations that are NP-Hard or not representable by other forms of Machine Learning. For a Genetic Algorithm to work, the only necessary components are that solutions must be representable in a way that can be recombined and that there must be a way to quantifiably say how ‘good’ a solution is.
Steps to a generic Genetic Algorithm
1. Initialization
1. A population of possible solutions is generated
2. In most cases, the population is randomly generated, but it is possible to start the population with already known solutions so the algorithm can further optimize them.
2. Fitness Calculation
3. Each solution in the population has a ‘fitness’ score computed. This score is based on how correct the solution is.
1. Note that the correctness of the solution must be on a quantifiable spectrum, not simply yes or no
4. Fitness can be calculated in any number of ways, so long as it is a reliable measure of how ‘good’ the solution is
2. Many algorithms can calculate error using the concept of Mean Squared Error (Calculate error between the solution and the ideal, then square it). Squaring the error allows for greater distinction between ‘good’ and ‘not good’ items for when the algorithm is picking solutions
3. Create a New Population - 3 Steps to creating the new generation
5. Selection:
3. Pick which solutions should be used for breeding. This can be done in many different ways, but usually done by randomly picking individuals with a weight probability. Individuals with more fitness are more likely to be picked for breeding. An ‘elite population’ can also be implemented where the top few solutions are automatically selected for breeding, ensuring that the best solutions are not discarded by chance.
6. Cross Over:
4. The solutions selected for breeding are crossed together in almost the same way that it occurs in Prophase-1 of meiosis. Two ‘parent’ solutions that were replace parts of their own solution with part of the other parent’s solution. You can visualize this as crossing ‘abcd’ and ‘1234’ to create ‘ab34’. The point where the solutions are split is usually picked randomly.
7. Mutate:
5. To ensure that the population does not settle into local minima, random mutation is applied to solutions. The process of breeding can make it possible that something necessary for convergence to be discarded if was initialized as part of a poor solution. Mutation allows for genetic variation to exist so that the population does not eventually become homogeneous.
4. Replace
8. Discard the previous population and replace it with the new population generated
5. Repeat steps 2 - 4 until convergence
While very abstract, the takeaway is that Genetic Algorithms follow the steps of Natural Selection almost to the tee. Granted, the digital representation of the population differs, but the key steps of selection, crossing-over, and mutations make Genetic Algorithms able to solve problems not able to be optimized by other means of Machine Learning.
My Project
As a challenge, I decided to make my own Genetic Algorithm that starts with a population of randomly generated ‘images’ and converges it towards a final (pre-specified) picture. The videos are compilations of the top rated solution from each generation, over the course of approximately 8,000 generations. Whereas the original image starts out mostly as static, you can see how the algorithm gets closer and closer over time to the checkerboard pattern it was set to converge to. As a bonus, I included a compilation of my algorithm doing the same for a "Creeper" from Minecraft, but the algorithm didn’t run long enough for the image to fully converge.
Hello!
<file_sep>/Convergence Rate Graphs/ConvergenceRateTesting.py
from ga import *
import ImageMake
from math import floor
import matplotlib.pyplot as plt
def generateCurrentImage(toSave = 'False', toShow = 'False', imName = 'output.jpg'):
a = []
for row in grid:
elements = [str(item) for item in row.BEST_SPECIES]
a.append(elements)
ImageMake.makeImage(a, show = toShow, save = toSave, name = imName)
def runTest(populationSize, mutationRate, eliteSize, k):
print("Initializing population", k)
grid = [Model(populationSize, "a" * 20) for i in range(20)]
print("\tPopulation initialized")
finished = [False] * len(grid)
gridRows = len(grid)
results = []
for i in range(1000):
generationError = 0
print('\tGeneration {:<30}'.format(str(i)))
for index, a in enumerate(grid):
if finished[index] == False:
a.nextGeneration(eliteSize, mutationRate)
error = a.BEST_ERROR
generationError += error
if error == 0.0:
finished[index] = True
percentComplete = int((index / gridRows) * 30) + 1
print('\r\t|{:<30}|'.format('*'*percentComplete), end='')
print('\r\t {:<28}'.format(str(floor(generationError))),end='\n')
results.append(generationError)
if not False in finished:
return i, results
return 1000, results
testVals = range(0,50,10)
data = []
#for y in testVals:
convergenceTime, results = runTest(50 ,.3, y, y)
# data.append(results)
#dataFrames = [pd.DataFrame(data = {'{}'.format(x * .05) : data[x]}) for x in range(len(testVals))]#
#axis1 = dataFrames[0].plot()
#[x.plot(ax = axis1) for x in dataFrames[1:]]
#plt.show()
<file_sep>/ga.py
import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt
import string
class SequenceElement:
def __init__(self, val):
self.value = int(val)
def error(self, targetVal):
return abs(targetVal - self.value) ** 2
def __repr__(self):
return str(self.value)
class Fitness:
def __init__(self, sequence, targetSeq):
self.targetSequence = targetSeq
self.sequence = sequence
self.error = 0
self.fitness = 0
def sequenceError(self):
if self.error == 0:
errorSum = 0
for index, sequenceElement in enumerate(self.sequence):
errorSum += self.sequence[index].error(self.targetSequence[index])
self.error = errorSum
return self.error
def sequenceFitness(self):
if self.fitness == 0:
if self.sequenceError() == 0:
return float("inf")
self.fitness = 1 / float(self.sequenceError())
return self.fitness
class Model():
def __init__(self, popSize, targetVal):
self.TARGET = targetVal
self.CURRENT_POPULATION = self.initialPopulation(popSize)
best = self.rankSequences(self.CURRENT_POPULATION, self.TARGET)[0]
self.BEST_SPECIES = self.CURRENT_POPULATION[best[0]]
def initialPopulation(self, popSize):
population = []
for i in range(0, popSize):
newElement = self.sequenceListFromValues(self.createSequence(len(self.TARGET)))
population.append(newElement)
return population
def createSequence(self, targetLength):
sequence = np.random.random_integers(0, 255, targetLength)
return sequence
def sequenceListFromValues(self, values):
returnSequenceList = []
for number in values:
returnSequenceList.append(SequenceElement(int(str(number))))
return returnSequenceList
def rankSequences(self, population, targetSequence):
sequenceList = {}
rankedSequences = {}
for index, element in enumerate(population):
sequenceList[index] = Fitness(element, targetSequence).sequenceFitness()
return sorted(sequenceList.items(), key = operator.itemgetter(1), reverse = True)
def selection(self, popRanked, eliteSize):
selectionResults = []
df = pd.DataFrame(np.array(popRanked), columns= ["Index", "Fitness"])
df['cumulativeSum'] = df.Fitness.cumsum()
df['cumulativePercentages'] = 100 * df.cumulativeSum / df.Fitness.sum()
for i in range(eliteSize):
selectionResults.append(popRanked[i][0])
for i in range(len(popRanked) - eliteSize):
pick = 100 * random.random()
for i in range(len(popRanked)):
if pick <= df.iat[i,3]:
selectionResults.append(popRanked[i][0])
break
return selectionResults
def MatingPool(self, population, selectionResults):
matingPool = []
for i in range(len(selectionResults)):
index = selectionResults[i]
matingPool.append(population[index])
return matingPool
def breed(self, parent1, parent2):
child = []
splicePoint = int(random.random() * len(parent1))
child = parent1[0:splicePoint] + parent2[splicePoint:]
return self.sequenceListFromValues(child)
def breedPopulation(self, matingPool, eliteSize):
children = []
length = len(matingPool) - eliteSize
pool = random.sample(matingPool, len(matingPool))
for i in range(0, eliteSize):
children.append(matingPool[i])
for i in range(0, length):
child = self.breed(pool[i], pool[len(matingPool) - i - 1])
children.append(child)
return children
def mutate(self, sequence, mutationRate):
for swapped in range(len(sequence)):
if(random.random() < mutationRate):
swappedWith = int(random.random() * len(sequence))
sequence1 = sequence[swapped]
sequence2 = sequence[swappedWith]
sequence[swapped] = sequence2
sequence[swappedWith] = sequence1
return sequence
def mutatePopulation(self, population, mutationRate):
mutatedPop = []
for ind in range(len(population)):
mutatedInd = self.mutate(population[ind], mutationRate)
mutatedPop.append(mutatedInd)
return mutatedPop
def nextGeneration(self, eliteSize, mutationRate):
popRanked = self.rankSequences(self.CURRENT_POPULATION, self.TARGET)
selectionResults = self.selection(popRanked, eliteSize)
matingPool = self.MatingPool(self.CURRENT_POPULATION, selectionResults)
children = self.breedPopulation(matingPool, eliteSize)
self.CURRENT_POPULATION = self.mutatePopulation(children, mutationRate)
best = self.rankSequences(self.CURRENT_POPULATION, self.TARGET)[0]
self.BEST_SPECIES = self.CURRENT_POPULATION[best[0]]
self.BEST_ERROR = 1 / best[1]
return self.CURRENT_POPULATION
| 7d521f3f61e7cec45b4bbc20b4aab0f630add7b0 | [
"Markdown",
"Python"
] | 5 | Python | marinater/SequenceGA | b7d500dd5b0ac3ef8f19ac942f05e04e5df76a32 | 98cb4af367a332e0efa91d9c87c220eed5e414b7 |
refs/heads/master | <repo_name>libudong/Shopping-Mall<file_sep>/Shopping Mall/public/frontEnd/demo/截取url字符串.js
var url = "http://localhost:3000/frontEnd/search.html";
var str = url.split("/").pop();
console.log(str);
| 7c03cd15ec11d6180ffd115d7cd3749366f7a9b6 | [
"JavaScript"
] | 1 | JavaScript | libudong/Shopping-Mall | c609a807ac3d1d6238bbc40ea3a0845b5e70e522 | 273a5a77f615627a17ebe82892741a14c23014b5 |
refs/heads/main | <file_sep># tailscale-dns-sync
Sync tailscale hosts to public dns domain.
问题:Tailscale的API Key 有90天的有效期,过期后需要人工修改。
替代方案:目前Tailscale的Magic DNS支持不覆盖Local DNS,所以可以使用 Magic DNS功能。
## Develop
```
$ python3 -m venv ./venv/
$ source venv/bin/activate
$ pip install -r requirements.txt
```
## Config
Copy `config.py.example` to `config.py`, then modify it.
## Baidu Cloud CCE
1. 触发器: `定时任务`, `rate(5 minutes)`
2. 基础信息:
- 运行时:`Python 3.6`
- 超时时间: `100` 秒
3. 代码:将`config.py`内容写到文件头,然后将main()写入handler:
```
def handler(event, context):
main()
```
<file_sep>#!/usr/bin/env python3
#coding=utf8
import sys
from typing import List, Tuple, Dict
from collections import defaultdict
import requests
from config import *
class Device(object):
def __init__(self, name: str, ip: str):
self.name = name
self.ip = ip
def __str__(self):
return f"{self.name}[{self.ip}]"
def get_tailscale_devices(tailnet: str, api_key: str) -> Tuple[str, List[Device]]:
""" Get tailscale devices
API docs: https://github.com/tailscale/tailscale/blob/main/api.md
Args:
tailnet: A tailnet is the name of your Tailscale network.
You can find it in the top left corner of the Admin
Panel beside the Tailscale logo.
api_key: Visit the admin panel and navigate to the Keys page,
then generate an API Key. That keys expire after 90 days and
need to be regenerated.
Returns:
err, devices:
err: Error message if not None.
devices: List of Device.
"""
r = requests.get(
f'https://api.tailscale.com/api/v2/tailnet/{tailnet}/devices',
auth=(api_key, ''),
timeout=30,
)
try:
r.raise_for_status()
ret = r.json()
except requests.RequestException as e:
return f'Call Tailscale API failed: {e}', None
except ValueError:
return f'Tailscale API response json decode failed.', None
devices: List[Device] = []
# remove `.swulling.gmail.com` in yangtaodeimac.swulling.gmail.com
domain = tailnet.replace('@', '.')
for d in ret.get('devices', []):
name = d['name'].replace(f'.{domain}', '')
ip = d['addresses'][0]
devices.append(Device(name, ip))
return None, devices
class Dnspod(object):
def __init__(self, api_id: str, api_token: str):
self.login_token = f'{api_id},{api_token}'
def _request(self, path: str, params: Dict) -> Tuple[str, Dict]:
"""See: https://docs.dnspod.cn/api/5f561f9ee75cf42d25bf6720/
"""
data = dict(
login_token=self.login_token,
format='json',
lang='en',
error_on_empty='yes'
)
data.update(params)
r = requests.post(
f'https://dnsapi.cn/{path}',
data=data,
timeout=30,
)
try:
r.raise_for_status()
ret = r.json()
except requests.RequestException as e:
return f'Call Dnspod API failed: {e}', None
except ValueError:
return f'Dnspod API response json decode failed.', None
err_code = ret.get('status', {}).get('code')
if err_code != '1':
err_msg = ret.get('status', {}).get('message')
return f'Dnspod API response status != 1: {err_code}/{err_msg}', None
return None, ret
def get_domain_records(self, domain: str, sub_domain: str = None) -> Tuple[str, Dict]:
params = dict(domain=domain)
if sub_domain:
params['sub_domain'] = sub_domain
err, r = self._request('Record.List', params=params)
if err:
return err, None
records = defaultdict(list)
for r in r.get('records', []):
records[r['name']].append(r)
return None, records
def add_record_a(self, ip:str, domain: str, sub_domain: str) -> str:
params = dict(
domain=domain,
sub_domain=sub_domain,
record_type='A',
record_line='默认',
value=ip,
)
err, r = self._request('Record.Create', params=params)
if err:
return err
def modify_record_a(self, ip:str, domain: str, sub_domain: str, record_id: str) -> str:
params = dict(
domain=domain,
sub_domain=sub_domain,
record_id=record_id,
record_type='A',
record_line='默认',
value=ip,
)
err, r = self._request('Record.Modify', params=params)
if err:
return err
def sync_devices_to_domain(self, devices: List[Device], domain: str, sub_domain: str = None) -> str:
err, records = self.get_domain_records(domain, sub_domain)
if err:
return err
for device in devices:
# 判断对应的records是否存在
record_name = f'{device.name}.{sub_domain}' if sub_domain else device.name
if record_name not in records:
print(f'Record {record_name}.{domain} not exist, set to {device.ip}')
err = self.add_record_a(device.ip, domain, record_name)
if err:
return err
continue
# 如果存在且默认线路的解析相等,跳过
default_record = [r for r in records[record_name] if r['line'] == '默认']
if not default_record:
print(f'Record {record_name}.{domain} default line not exist, set to {device.ip}')
err = self.add_record_a(ip, domain, record_name)
if err:
return err
continue
modify_record = default_record[0]
if modify_record['type'] == 'A' and modify_record['value'] == device.ip:
print(f'Record {record_name}.{domain} is already point to {device.ip}, do nothing')
else:
old_value = modify_record['value']
print(f'Record {record_name}.{domain} from {old_value} change to {device.ip}')
err = self.modify_record_a(device.ip, domain, record_name, modify_record['id'])
if err:
return err
def main():
err, devices = get_tailscale_devices(TAILSCALE_TAILNET, TAILSCALE_API_KEY)
if err:
print(f'Get tailscale devices failed: {err}', file=sys.stderr)
sys.exit(1)
dnspod = Dnspod(DNSPOD_API_ID, DNSPOD_API_TOKEN)
err = dnspod.sync_devices_to_domain(devices, DNSPOD_DOMAIN, DNSPOD_SUB_DOMAIN)
if err:
print(f'Dnspod sync records failed: {err}', file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| d01064755f923e659eafe8c4e3dc780f1807fe27 | [
"Markdown",
"Python"
] | 2 | Markdown | ninehills/tailscale-dns-sync | 5a117c808e5b8df67a28ed14140ca087d998cbbe | 76e929aaf85e782769db74aa345bafb5bbb876d5 |
refs/heads/master | <repo_name>VivekKumar1913/Project4--FlightSuretyDApp<file_sep>/test/flightSurety.js
var Test = require('../config/testConfig.js');
var BigNumber = require('bignumber.js');
contract('Flight Surety Tests', async (accounts) => {
var config;
before('setup contract', async () => {
config = await Test.Config(accounts);
await config.flightSuretyData.authorizeCaller(config.flightSuretyApp.address);
});
const owner = accounts[0];
const secondAirline = accounts[1];
const thirdAirline = accounts[2];
const fourthAirline = accounts[3];
const fifthAirline = accounts[4];
const passenger = accounts[5];
/****************************************************************************************/
/* Operations and Settings */
/****************************************************************************************/
it(`(multiparty) has correct initial isOperational() value`, async function () {
// Get operating status
let status = await config.flightSuretyData.isOperational.call();
assert.equal(status, true, "Incorrect initial operating status value");
assert.equal(await config.flightSuretyApp.isOperational(), true, "Incorrect initial operating status value for flightSuretyApp");
});
it("check to see if app can call the isOperational() function of the data contract", async() => {
let status = await config.flightSuretyApp.isOperational.call();
assert.equal(status, true, "Incorrect initial operating status value");
});
it(`(multiparty) can block access to setOperatingStatus() for non-Contract Owner account`, async function () {
// Ensure that access is denied for non-Contract Owner account
let accessDenied = false;
try
{
await config.flightSuretyData.setOperatingStatus(false, { from: config.testAddresses[2] });
}
catch(e) {
accessDenied = true;
}
assert.equal(accessDenied, true, "Access not restricted to Contract Owner");
});
it(`(multiparty) can allow access to setOperatingStatus() for Contract Owner account`, async function () {
// Ensure that access is allowed for Contract Owner account
let accessDenied = false;
try
{
await config.flightSuretyData.setOperatingStatus(false);
}
catch(e) {
accessDenied = true;
}
assert.equal(accessDenied, false, "Access not restricted to Contract Owner");
});
it(`(multiparty) can block access to functions using requireIsOperational when operating status is false`, async function () {
await config.flightSuretyData.setOperatingStatus(false);
let reverted = false;
try
{
await config.flightSurety.setTestingMode(true);
}
catch(e) {
reverted = true;
}
assert.equal(reverted, true, "Access not blocked for requireIsOperational");
// Set it back for other tests to work
await config.flightSuretyData.setOperatingStatus(true);
});
it('(airline) cannot register an Airline using registerAirline() if it is not funded', async () => {
// ARRANGE
let newAirline = accounts[2];
// ACT
try {
await config.flightSuretyApp.registerAirline(newAirline, {from: config.firstAirline});
}
catch(e) {
}
let result = await config.flightSuretyData.isAirline.call(newAirline);
// ASSERT
assert.equal(result, false, "Airline should not be able to register another airline if it hasn't provided funding");
});
/****************************************************************************************/
/* Airlines */
/****************************************************************************************/
it("Contract owner is created as the first airline", async() => {
//await FlightSuretyData.deployed();
let isAirline = await config.flightSuretyData.isAirline.call(owner);
let numAirlines = await config.flightSuretyApp.AirlineCount.call();
//console.log(numAirlines)
assert.equal(isAirline, true, "No airline registerd by contract owner");
assert.equal(numAirlines, 1, "There should be only 1 airline after the contract deployment");
});
it("deploys with initial contract balance 0", async() => {
let contractBalance = await config.flightSuretyApp.getContractBalance.call();
assert.equal(contractBalance, 0, "Contract balance after deployment should be equal to 0");
});
it("checks if the first airline can send funds to the contract and change its 'isFunded' state", async() => {
let airlineFee = await web3.utils.toWei("10", "ether");
let airlineBalanceBefore = await web3.eth.getBalance(owner);
await config.flightSuretyApp.fundAirline({from: owner, value: airlineFee});
let airlineBalanceAfter = await web3.eth.getBalance(owner);
assert.isAbove(Number(airlineBalanceBefore) - Number(airlineBalanceAfter), Number(airlineFee));
let airline = await config.flightSuretyApp.getAirlineDetails.call(owner);
let isFunded = airline[3];
assert.equal(isFunded, true);
});
it('4 Airlines can apply for registration without multiparty consensus', async function () {
let isAirline = await config.flightSuretyData.isAirline.call(owner);
assert.equal(isAirline, true, "No airline registerd by contract owner");
await config.flightSuretyApp.registerAirline(secondAirline, "Airline 2", {from:owner});
//assert.equal(isAirline2, true, "No airline registerd by contract owner");
airlinec = await config.flightSuretyData.AirlineCount()
assert.equal(2, airlinec)
await config.flightSuretyData.registerAirline(thirdAirline, "Airline 3", {from:owner});
await config.flightSuretyApp.registerAirline(fourthAirline, "Airline 4", { from: owner });
let numAirlines = await config.flightSuretyApp.AirlineCount.call();
assert.equal(numAirlines, 4);
let airline3details = await config.flightSuretyApp.getAirlineDetails(thirdAirline);
assert.equal(airline3details[0], thirdAirline);
assert.equal(airline3details[1], "Airline 3");
assert.equal(airline3details[2], true);
assert.equal(airline3details[3], false);
});
it('Unfunded airlines cannot register another airline', async function () {
let isAirline = await config.flightSuretyData.isAirline.call(owner);
assert.equal(isAirline, true, "No airline registerd by contract owner");
let error = false;
try {
await config.flightSuretyApp.registerAirline(fifthAirline, "Airline 5", {from:secondAirline});
}
catch(err) {
error = true;
}
assert.equal(error, true, "Non-Funded airline should not be able to register an airline");
});
it('5th Airline should not be registered without multiparty consensus', async function () {
await config.flightSuretyApp.registerAirline(fifthAirline, "Airline 5", { from: owner });
let isFifthAirline = await config.flightSuretyData.isAirline.call(fifthAirline);
assert.equal(isFifthAirline, false, "Airline cannot be registered without consensus");
let numAirlines = await config.flightSuretyApp.AirlineCount.call();
assert.equal(Number(numAirlines), 5);
});
it("Checking if multiparty consensus works", async() => {
let airlineFee = await web3.utils.toWei("10", "ether");
await config.flightSuretyApp.fundAirline({from: secondAirline, value: airlineFee});
await config.flightSuretyApp.fundAirline({from: thirdAirline, value: airlineFee});
await config.flightSuretyApp.fundAirline({from: fourthAirline, value: airlineFee});
let airline = await config.flightSuretyApp.getAirlineDetails.call(secondAirline);
let isFunded = airline[3];
assert.equal(isFunded, true);
let numAirlines = await config.flightSuretyApp.AirlineCount.call();
// there are 5 airlines in the list
assert.equal(Number(numAirlines), 5);
let airline5details = await config.flightSuretyApp.getAirlineDetails.call(fifthAirline);
await config.flightSuretyApp.castVote(fifthAirline, {from:owner});
airline5details = await config.flightSuretyApp.getAirlineDetails.call(fifthAirline);
assert.equal(fifthAirline[2], false);
let numVotes = await config.flightSuretyApp.numVotesCasted.call(fifthAirline);
assert.equal(numVotes, 1)
await config.flightSuretyApp.castVote(fifthAirline, {from:secondAirline});
airline5details = await config.flightSuretyApp.getAirlineDetails.call(fifthAirline);
assert.equal(airline5details[2], false);
numVotes = await config.flightSuretyApp.numVotesCasted.call(fifthAirline);
assert.equal(numVotes, 2)
await config.flightSuretyApp.castVote(fifthAirline, {from:thirdAirline});
airline5details = await config.flightSuretyApp.getAirlineDetails(fifthAirline);
// after 3 out of 4 votes the 5th airline gets registered
assert.equal(airline5details[2], true);
numVotes = await config.flightSuretyApp.numVotesCasted.call(fifthAirline);
assert.equal(numVotes, 3)
});
it("checks that a non-airline user cannot register another airline", async() => {
let user2 = accounts[7];
let numAirlines = await config.flightSuretyApp.AirlineCount.call();
assert.equal(numAirlines, 5, "INVALID NUMBER OF AIRLINES");
let error;
try {
await config.flightSuretyApp.registerAirline(user2, "Fraudulant airlines", {from:user2});
} catch(err) {
error = true;
}
assert.equal(error, true, "Non-airline user should not be able to register an airline");
});
/****************************************************************************************/
/* Flights */
/****************************************************************************************/
it("Check if a funded airline can register a flight", async() => {
let airline1 = owner;
let airline1Details = await config.flightSuretyApp.getAirlineDetails.call(airline1);
// the first airline should be funded
assert.equal(airline1Details[3], true);
let dateString = "2020-05-28T14:45:00Z"
let departureDate = new Date(dateString).getTime();
await config.flightSuretyApp.registerFlight("ABC123", "NYC", "DEL", departureDate, {from:airline1});
let numFlights = await config.flightSuretyApp.getFlightCount.call();
assert.equal(numFlights, 1);
let flightHash = await config.flightSuretyApp.getFlightKey.call(airline1, "ABC123", departureDate);
let flightInfo = await config.flightSuretyApp.getFlightDetails.call(flightHash);
assert.equal(flightInfo[0], "ABC123");
assert.equal(flightInfo[3], true);
assert.equal(flightInfo[4], false);
assert.equal(flightInfo[6], departureDate);
assert.equal(flightInfo[7], airline1);
});
it("Check if a non-funded airline can register a flight", async() => {
let airline2 = accounts[10];
let airline2Details = await config.flightSuretyApp.getAirlineDetails.call(airline2);
// the airline should not be funded
assert.equal(airline2Details[3], false, "This is a funded airline");
let dateString = "2020-06-28T14:45:00Z"
let departureDate = new Date(dateString).getTime();
let accessDenied = false;
try
{
await config.flightSuretyApp.registerFlight("ABCD123", "NYC", "DEL", departureDate, {from:airline2});
}
catch(e) {
accessDenied = true;
}
assert.equal(accessDenied, true, "Access not blocked for unfunded airline");
});
it("check if a registered a flight can be insured", async() => {
let airline1 = owner;
let airline1Details = await config.flightSuretyApp.getAirlineDetails.call(airline1);
// the first airline should be funded
assert.equal(airline1Details[3], true);
// get the registered flight
let dateString = "2020-05-28T14:45:00Z"
let departureDate = new Date(dateString).getTime();
let flightHash = await config.flightSuretyApp.getFlightKey.call(airline1, "ABC123", departureDate);
let flightInfo = await config.flightSuretyApp.getFlightDetails(flightHash);
assert.equal(flightInfo[3], true);
assert.equal(flightInfo[4], false);
assert.equal(flightInfo[6], departureDate);
assert.equal(flightInfo[7], airline1);
await config.flightSuretyApp.insureFlight("ABC123", departureDate);
flightInfo = await config.flightSuretyApp.getFlightDetails(flightHash);
assert.equal(flightInfo[4], true);
});
/****************************************************************************************/
/* Passenger Insurance */
/****************************************************************************************/
it("checks that a passenger is able to buy insurance for a flight", async() => {
let passenger1 = passenger;
let airline1 = owner;
let dateString = "2020-05-28T14:45:00Z"
let departureDate = new Date(dateString).getTime();
let flightHash = await config.flightSuretyApp.getFlightKey.call(airline1, "ABC123", departureDate);
let flightInfo = await config.flightSuretyApp.getFlightDetails(flightHash);
let insuranceFee = await web3.utils.toWei("0.5", "ether");
await config.flightSuretyApp.buyInsurance(airline1, departureDate, "ABC123", {from: passenger1, value: insuranceFee});
let isInsured = await config.flightSuretyApp.isInsured(airline1, passenger1, "ABC123", departureDate);
assert.equal(isInsured, true);
let insuredBalance = await config.flightSuretyApp.getInsuranceBalance.call(passenger1, flightHash);
assert.equal(insuranceFee, insuredBalance);
});
it("checks that if the oracles' decision is 'LATE_AIRLINE' users' balance is multiplied by 1.5", async() => {
let passenger1 = passenger;
let airline1 = owner;
let dateString = "2020-05-28T14:45:00Z"
let departureDate = new Date(dateString).getTime();
let flightHash = await config.flightSuretyApp.getFlightKey.call(airline1, "ABC123", departureDate);
// the user is insured
let isInsured = await config.flightSuretyApp.isInsured(airline1, passenger1, "ABC123", departureDate);
assert.equal(isInsured, true);
// check the previous balance
let prevBalance = await config.flightSuretyApp.getInsuranceBalance.call(passenger1, flightHash);
// send and process the oracles' decision
await config.flightSuretyApp.processFlightStatus(airline1, "ABC123", departureDate, 20);
let flightInfo = await config.flightSuretyApp.getFlightDetails(flightHash);
assert.equal(flightInfo[5], 20);
let afterBalance = await config.flightSuretyApp.getInsuranceBalance.call(passenger1, flightHash);
assert.equal(afterBalance, prevBalance * 1.5);
});
it("check whether a user can withdraw their balance", async() => {
let passenger1 = passenger;
let airline1 = owner;
let dateString = "2020-05-28T14:45:00Z"
let departureDate = new Date(dateString).getTime();
let flightHash = await config.flightSuretyApp.getFlightKey.call(airline1, "ABC123", departureDate);
let passengerBalanceBefore = await web3.eth.getBalance(passenger1);
let amountToWithdraw = await config.flightSuretyApp.getInsuranceBalance.call(passenger1, flightHash);
await config.flightSuretyApp.payOut(flightHash, amountToWithdraw, {from:passenger1});
let passengerBalanceAfter = await web3.eth.getBalance(passenger1);
console.log(passengerBalanceBefore, " Before");
console.log(passengerBalanceAfter, " After");
assert.isAbove(Number(passengerBalanceAfter - passengerBalanceBefore), Number(web3.utils.toWei("0.5", "ether")));
});
});
<file_sep>/src/dapp/flightData.js
export const flightCodes = {
"Alesund, Norway" : "AES",
"Alexander Bay - Kortdoorn, South Africa" : "ALJ",
"Alexandria - Borg el Arab Airport, Egypt" : "HBH",
"Alexandria - El Nhouza Airport, Egypt" : "ALY",
"Alexandria - Esler Field, USA (LA)" : "ESF",
"Alfujairah (Fujairah), United Arab Emirates" : "FJR",
"Alghero Sassari, Italy" : "AHO",
"Algiers, Houari Boumediene Airport, Algeria" : "ALG",
"Al Hoceima, Morocco" : "AHU",
"Alicante, Spain" : "ALC",
"Alice Springs, Australia" : "ASP",
"Alldays, South Africa" : "ADY",
"Allentown (PA), USA" : "ABE",
"Almaty (Alma Ata) - Almaty International Airport, Kazakhstan" : "ALA",
"Almeria, Spain" : "LEI",
"Alta, Norway" : "ALF",
"Altay, PR China" : "AAT",
"Altenrhein, Switzerland" : "ACH",
"Altoona (PA), USA" : "AOO",
"Altus, USA" : "AXS",
"Amami, Japan" : "ASJ",
"Amarillo (TX), USA" : "AMA",
"Amazon Bay, Papua New Guinea" : "AZB",
"Amman - Queen Alia International Airport, Jordan" : "AMM",
"Amman - Amman-Marka International Airport, Jordan" : "ADJ",
"Amritsar, India" : "ATQ",
"Amsterdam - Amsterdam Airport Schiphol, Netherlands" : "AMS",
"Anand, India" : "QNB",
"Anchorage (AK) - Ted Stevens Anchorage International, USA" : "ANC",
"Ancona - Ancona Falconara Airport, Italy" : "AOI",
"Andorra La Vella - Heliport, Andorra" : "ALV",
"Anguilla, Anguilla" : "AXA",
"Anjouan - Anjouan Airport, Comoros (Comores)" : "AJN",
"Ankara, Turkey" : "ANK",
"Ankara - Esenboğa International Airport, Turkey" : "ESB",
"Annaba, Algeria" : "AAE",
"Ann Arbor (MI), USA" : "ARB",
"Annecy, France" : "NCY",
"Anniston (AL), USA" : "ANB",
"Antalya, Turkey" : "AYT",
"Antananarivo (Tanannarive) - Ivato International Airport, Madagascar" : "TNR",
"Antigua, V.C. Bird International, Antigua and Barbuda" : "ANU",
"Antwerp, Belgium" : "ANR",
"Aomori, Japan" : "AOJ",
"Apia - Faleolo International Airport, Samoa" : "APW",
"Appelton/Neenah/Menasha (WI), USA" : "ATW",
"Aqaba, Jordan" : "AQJ",
"Aracaju, Brazil" : "AJU",
"Arkhangelsk, Russia" : "ARH",
"Arusha, Tanzania" : "ARK",
"Araxos, Greece" : "GPA",
"Arlit, Niger" : "RLT",
"Arrecife/Lanzarote, Spain" : "ACE",
"Aruba - Reina Beatrix International, Oranjestad, Aruba" : "AUA",
"Asheville (NC), USA" : "AVL",
"Ashgabat - Saparmurat Turkmenbashy Int. Airport, Turkmenistan" : "ASB",
"Asmara - Asmara International, Eritrea" : "ASM",
"Aspen, (CO) - Aspen-Pitkin County Airport, USA" : "ASE",
"Assiut, Egypt" : "ATZ",
"Astana - Astana International Airport, Kazakhstan" : "TSE",
"Asuncion - Asunción International Airport, Paraguay" : "ASU",
"Aswan - Daraw Airport, Egypt" : "ASW",
"Athens - Elefthérios Venizélos International Airport, Greece" : "ATH",
"Athens, Hellinikon Airport, Greece" : "HEW",
"Athens (GA), USA" : "AHN",
"Athens (OH), USA" : "ATO",
"Atlanta (GA) - Hartsfield Atlanta International Airport, USA" : "ATL",
"Atlantic City (NJ) - Atlantic City International, USA" : "ACY",
"Attawapiskat, NT, Canada" : "YAT",
"Auckland - Auckland International Airport, New Zealand" : "AKL",
"Augsburg - Augsbur gAirport, Germany" : "AGB",
"Augusta (GA), USA" : "AGS",
"Augusta (ME) - Augusta State Airport, USA" : "AUG",
"Aurillac, France" : "AUR",
"Austin (TX) - Austin-Bergstrom Airport, USA" : "AUS",
"Ayawasi, Indonesia" : "AYW",
"Ayers Rock - Connellan, Australia" : "AYQ",
"Ayr, Australia" : "AYR",
"Badajoz, Spain" : "BJZ",
"Bagdad - Baghdad International Airport, Iraq" : "BGW",
"Bagdogra, India" : "IXB",
"Bahamas - Lynden Pindling International Airport, The Bahamas" : "NAS",
"Bahawalpur, Pakistan" : "BHV",
"Bahrain - Bahrain International Airport, Bahrain" : "BAH",
"Bakersfield (CA), USA" : "BFL",
"Baku - Heydar Aliyev International Airport, Azerbaijan" : "BAK",
"Ballina, Australia" : "BNK",
"Baltimore (MD) - Washington International Airport, USA" : "BWI",
"Bamaga, Australia" : "ABM",
"Bamako - Bamako-Sénou International Airport, Mali" : "BKO",
"Bambari, Central African Republic" : "BBY",
"Bandar Seri Begawan - Brunei International Airport, Brunei" : "BWN",
"Bandung - Husein Sastranegara International Airport, Indonesia" : "BDO",
"Bangalore, India" : "BLR",
"Bangassou, Central African Republic" : "BGU",
"Bangkok, Don Muang, Thailand" : "DMK",
"Bangkok, Suvarnabhumi International, Thailand" : "BKK",
"Bangor (ME), USA" : "BGR",
"Bangui - M'Poko International Airport, Central African Republic" : "BGF",
"Banjul - Banjul International Airport (Yundum International), Gambia" : "BJL",
"Bannu, Pakistan" : "BNP",
"Barcelona, Spain" : "BCN",
"Barcelona, Venezuela" : "BLA",
"Bardufoss, Norway" : "BDU",
"Bari, Italy" : "BRI",
"Barisal, Bangladesh" : "BZL",
"Baroda, India" : "BDQ",
"Barra (the famous tidal beach landing), United Kingdom" : "BRR",
"Barranquilla, Colombia" : "BAQ",
"Basel, Switzerland" : "BSL",
"Basel/Mulhouse, Switzerland/France" : "EAP",
"Basra, Basrah, Iraq" : "BSR",
"Basse-Terre - Pointe-à-Pitre International Airport, Guadeloupe" : "PTP",
"Basseterre - Robert L. Bradshaw International Airport, Saint Kitts and Nevis" : "SKB",
"Bastia, France" : "BIA",
"Baton Rouge (LA) - Baton Rouge Metropolitan Airport, USA" : "BTR",
"Bayreuth - Bindlacher-Berg, Germany" : "BYU",
"Beaumont/Pt. Arthur (TX), USA" : "BPT",
"Beckley (WV), USA" : "BKW",
"Beef Island - Terrance B. Lettsome, Virgin Islands (British)" : "EIS",
"Beijing, China" : "PEK",
"Beijing - Nanyuan Airport, China" : "NAY",
"Beira, Mozambique" : "BEW",
"Beirut - Beirut Rafic Hariri International Airport, Lebanon" : "BEY",
"Belem - Val de Cans International Airport, Brazil" : "BEL",
"Belfast - George Best Belfast City Airport, United Kingdom" : "BHD",
"Belfast - Belfast International Airport, United Kingdom" : "BFS",
"Belgaum, India" : "IXG",
"Belgrad (Beograd) - Belgrade Nikola Tesla International, Serbia" : "BEG",
"Belize City - Philip S.W.Goldson International, Belize" : "BZE",
"Bellingham (WA), USA" : "BLI",
"Belo Horizonte - Tancredo Neves International Airport, Brazil" : "CNF",
"Bemidji (MN), USA" : "BJI",
"Benbecula, United Kingdom" : "BEB",
"Benghazi (Bengasi), Libya" : "BEN",
"Benguela, Angola" : "BUG",
"Benton Harbour (MI), USA" : "BEH",
"Berberati, Central African Republic" : "BBT",
"Bergamo/Milan - Orio Al Serio, Italy" : "BGY",
"Bergen, Norway" : "BGO",
"Bergerac - Roumanieres, France" : "EGC",
"Berlin, Germany" : "BER",
"Berlin, Schoenefeld, Germany" : "SXF",
"Berlin, Tegel, Germany" : "TXL",
"Berlin, Tempelhof (ceased operating in 2008), Germany" : "THF",
"Bermuda - L.F. Wade International Airport, Bermuda" : "BDA",
"Berne, Bern-Belp, Switzerland" : "BRN",
"Berne, Railway Service, Switzerland" : "ZDJ",
"Bethel (AK), USA" : "BET",
"Bhopal, India" : "BHO",
"Bhubaneswar, India" : "BBI",
"Biarritz, France" : "BIQ",
"Bilbao, Spain" : "BIO",
"Billings (MT), USA" : "BIL",
"Billund, Denmark" : "BLL",
"Bintulu, Malaysia" : "BTU",
"Biraro, Central African Republic" : "IRO",
"Birmingham - Birmingham International Airport, United Kingdom" : "BHX",
"Birmingham (AL), USA" : "BHM",
"Bishkek - Manas International Airport, Kyrgyzstan" : "FRU",
"Bismarck (ND) - Bismarck Municipal Airport, USA" : "BIS",
"Bissau - Osvaldo Vieiro International Airport, Guinea-Bissau" : "BXO",
"Blackpool, United Kingdom" : "BLK",
"Blackwater, Australia" : "BLT",
"Blantyre (Mandala) - Chileka International Airport, Malawi" : "BLZ",
"Blenheim, New Zealand" : "BHE",
"Bloemfontein - Bloemfontein Airport, South Africa" : "BFN",
"Bloomington (IL), USA" : "BMI",
"Bloomington (IN), USA" : "BMG",
"Bluefield (WV), USA" : "BLF",
"Boa Vista, Brazil" : "BVB",
"Bobo/Dioulasso, Burkina Faso" : "BOY",
"Bodo, Norway" : "BOO",
"Bodrum - Milas Airport, Turkey" : "BJV",
"Bogota - El Nuevo Dorado International Airport, Colombia" : "BOG",
"Boise (ID) - Boise Air Terminal, USA" : "BOI",
"Bologna, Italy" : "BLQ",
"Bombay (Mumbai) - Chhatrapati Shivaji International, India" : "BOM",
"Bonaire, Netherlands Antilles" : "BON",
"Bonaventure, PQ, Canada" : "YVB",
"Bora Bora, French Polynesia" : "BOB",
"Bordeaux - Bordeaux Airport, France" : "BOD",
"Borrego Springs (CA), USA" : "BXS",
"Boston (MA) - General Edward Lawrence Logan, USA" : "BOS",
"Bouake, Cote d'Ivoire" : "BYK",
"Bourgas/Burgas, Bulgaria" : "BOJ",
"Bournemouth, United Kingdom" : "BOH",
"Bowen, Australia" : "ZBO",
"Bozeman (MT), USA" : "BZN",
"Bradford/Warren (PA) /Olean (NY), USA" : "BFD",
"Brainerd (MN), USA" : "BRD",
"Brampton Island, Australia" : "BMP",
"Brasilia - President <NAME> International, Brazil" : "BSB",
"Bratislava - M. R. Štefánik Airport, Slovakia" : "BTS",
"Brazzaville - Maya-Maya Airport, Congo (ROC)" : "BZV",
"Bremen - Bremen Airport (Flughafen Bremen), Germany" : "BRE",
"Brescia, Montichiari, Italy" : "VBS",
"Brest, France" : "BES",
"Bria, Central African Republic" : "BIV",
"Bridgeport (CT), USA" : "BDR",
"Bridgetown - Grantley Adams International, Barbados" : "BGI",
"Brindisi, Italy" : "BDS",
"Brisbane, Australia" : "BNE",
"Bristol, United Kingdom" : "BRS",
"Broennoeysund, Norway" : "BNN",
"Broken Hill, Australia" : "BHQ",
"Brookings (SD), USA" : "BKX",
"Broome, Australia" : "BME",
"Brunswick (GA), USA" : "BQK",
"Brussels - Brussels Airport, Belgium" : "BRU",
"Bucaramanga, Colombia" : "BGA",
"Bucharest, Romania" : "BUH",
"Bucharest - Henri Coandă International Airport, Romania" : "OTP",
"Budapest - Budapest Ferihegy International Airport, Hungary" : "BUD",
"Buenos Aires, Argentina" : "BUE",
"Buenos Aires, Ezeiza International Airport, Argentina" : "EZE",
"Buenos Aires, Jorge Newbery, Argentina" : "AEP",
"Buffalo Range, Zimbabwe" : "BFO",
"Buffalo/Niagara Falls (NY), USA" : "BUF",
"Bujumbura - Bujumbura International Airport, Burundi" : "BJM",
"Bulawayo, Zimbabwe" : "BUQ",
"Bullhead City (NV), USA" : "BHC",
"Bundaberg, Australia" : "BDB",
"Burbank (CA), USA" : "BUR",
"Burlington IA, USA" : "BRL",
"Burlington (VT), USA" : "BTV",
"Burnie (Wynyard), Australia" : "BWT",
"Butte (MT), USA" : "BTM",
"Cabinda, Angola" : "CAB",
"Cagliari, Italy" : "CAG",
"Cairns, Australia" : "CNS",
"Cairo - Cairo International Airport, Egypt" : "CAI",
"Calama - El Loa, Chile" : "CJC",
"Calcutta (Kolkata) - Netaji Subhas Chandra, India" : "CCU",
"Calgary - Calgary International Airport, Canada" : "YYC",
"Cali, Colombia" : "CLO",
"Calicut, India" : "CCJ",
"Calvi, France" : "CLY",
"Cambridge Bay, Canada" : "YCB",
"Cambrigde, United Kingdom" : "CBG",
"Campbeltown, United Kingdom" : "CAL",
"Campo Grande, Brazil" : "CGR",
"Canberra - Canberra Airport, Australia" : "CBR",
"Cancun, Mexico" : "CUN",
"Cannes – Mandelieu Airport, France" : "CEQ",
"Canouan (island) - Canouan Airport, Saint Vincent & the Grenadines" : "CIW",
"Cape Town - Cape Town International Airport, South Africa" : "CPT",
"Caracas - Simón Bolívar International Airport, Venezuela" : "CCS",
"Cardiff - Cardiff Airport, United Kingdom" : "CWL",
"Carlsbad (CA), USA" : "CLD",
"Carnarvon, Australia" : "CVQ",
"Carnot, Central African Republic" : "CRF",
"Carson City (NV), USA" : "CSN",
"Casablanca, Morocco" : "CAS",
"Casablanca, Mohamed V, Morocco" : "CMN",
"Casa de Campo - La Romana International Airport, Dominican Republic" : "LRM",
"Casino, Australia" : "CSI",
"Casper (WY), USA" : "CPR",
"Castaway, Fiji" : "CST",
"Cartagena - Rafael Núñez International Airport, Colombia" : "CTG",
"Castries - George F. L. Charles Airport, Saint Lucia" : "SLU",
"Catania, Italy" : "CTA",
"Cayenne - Cayenne-Rochambeau Airport, French Guiana" : "CAY",
"Cottbus - Cottbus-Drewitz Airport, Germany" : "CBU",
"Cebu City - Mactan-Cebu International, Philippines" : "CEB",
"Cedar City (UT), USA" : "CDC",
"Cedar Rapids IA, USA" : "CID",
"Ceduna, Australia" : "CED",
"Cessnock, Australia" : "CES",
"Chabarovsk (Khabarovsk), Russia" : "KHV",
"Chambery, France" : "CMF",
"Champaign (IL), USA" : "CMI",
"Chandigarh - Chandigarh International Airport, India" : "IXC",
"Changchun, Jilin, PR China" : "CGQ",
"Chania, Greece" : "CHQ",
"Port Macquarie, Australia" : "PQQ",
"Port Menier, PQ, Canada" : "YPN",
"Port Moresby - Jackson Field, Papua New Guinea" : "POM",
"Porto, Portugal" : "OPO",
"Porto Alegre - Salgado Filho International Airport, Brazil" : "POA",
"Port of Spain - Piarco International, Trinidad and Tobago" : "POS",
"Port Said, Egypt" : "PSD",
"Porto Santo, Portugal" : "PXO",
"Porto Velho, Brazil" : "PVH",
"Port Vila, Vanuatu" : "VLI",
"Poughkeepsie (NY), USA" : "POU",
"Poznan, Lawica, Poland" : "POZ",
"Prague - Václav Havel Airport (formerly Ruzyne), Czech Republic" : "PRG",
"Praia - Nelson Mandela International Airport, Cape Verde" : "RAI",
"Presque Island (ME), USA" : "PQI",
"Pretoria - Wonderboom Apt., South Africa" : "PRY",
"Preveza/Lefkas, Greece" : "PVK",
"Prince George, Canada" : "YXS",
"Prince Rupert/Digby Island, Canada" : "YPR",
"Pristina, Serbia" : "PRN",
"Prosperpine, Australia" : "PPP",
"Providence (RI), USA" : "PVD",
"Prudhoe Bay (AK), USA" : "SCC",
"Puebla, Mexico" : "PBC",
"Pueblo (CO), USA" : "PUB",
"Puerto Escondido, Mexico" : "PXM",
"Puerto Ordaz, Venezuela" : "PZO",
"Puerto Plata, Dominican Republic" : "POP",
"Puerto Vallarta, Mexico" : "PVR",
"Pukatawagan, Canada" : "XPK",
"Pula, Croatia (Hrvatska)" : "PUY",
"Pullman (WA), USA" : "PUW",
"Simferopol, Ukraine" : "SIP",
"Sindhri, Pakistan" : "MPD",
"Singapore - Changi, Singapore" : "SIN",
"Singapore - Paya Lebar, Singapore" : "QPG",
"Singapore - Seletar, Singapore" : "XSP",
"Zurich (Zürich) - Kloten, Switzerland" : "ZRH"
};
| 7af4562139e9748dfa295522710c7ac219e5f824 | [
"JavaScript"
] | 2 | JavaScript | VivekKumar1913/Project4--FlightSuretyDApp | 4373c86eb5efbf160259561a7670adc342802a52 | d5fa29e1d5838c87328eddb132d8808a8ef9aaac |
refs/heads/master | <file_sep># This file should contain all the record creation needed to seed the database with its default values.
# The data can then be loaded with the rails db:seed command (or created alongside the database with db:setup).
#
# Examples:
#
# movies = Movie.create([{ name: 'Star Wars' }, { name: 'Lord of the Rings' }])
# Character.create(name: 'Luke', movie: movies.first)
User.destroy_all
Song.destroy_all
Genre.destroy_all
Genre.create([
{name: 'Rock'},
{name: 'Jazz'},
{name: 'Indie'},
{name: 'Pop'},
{name: 'Fiesta'},
{name: 'Experimental'},
{name: 'Latin'},
{name: 'Country'}
])
User.create([
{name: 'Pancracia', email: '<EMAIL>', password: '<PASSWORD>', role: 'admin'},
{name: 'Ruperta', email: '<EMAIL>', password: '<PASSWORD>'},
{name: 'Filomena', email: '<EMAIL>', password: '<PASSWORD>'},
{name: 'Heriberto', email: '<EMAIL>', password: '<PASSWORD>'},
{name: 'Robertina', email: '<EMAIL>', password: '<PASSWORD>'},
{name: '<NAME>', email: '<EMAIL>', password: '<PASSWORD>', role: 'admin'}
])
Song.create([
{name: 'Lotus flower', genre_id:Genre.find_by(name:'Experimental').id, duration: 3},
{name: 'Stairway to heaven',genre_id:Genre.find_by(name:'Rock').id, duration: 2},
{name: 'Rabiosa', genre_id:Genre.find_by(name:'Latin').id, duration: 5},
{name: 'Georgia', genre_id:Genre.find_by(name:'Jazz').id, duration: 6},
{name: 'I met a girl', genre_id:Genre.find_by(name:'Country').id, duration:4},
{name: 'Sorry', genre_id:Genre.find_by(name:'Pop').id, duration: 3},
{name: 'Hotline bling', genre_id:Genre.find_by(name:'Pop').id, duration: 2},
{name: 'Burbujas de amor', genre_id:Genre.find_by(name:'Latin').id, duration: 5},
{name: 'The greatest', genre_id:Genre.find_by(name:'Indie').id, duration: 4}
])
UserSong.create([
{song:Song.find_by(name:'Georgia'), user: User.first},
{song:Song.find_by(name:'Rabiosa'), user: User.last},
{song:Song.find_by(name:'Stairway to heaven'), user: User.first},
{song:Song.find_by(name:'I met a girl'), user: User.first},
{song:Song.find_by(name:'Sorry'), user: User.last},
{song:Song.find_by(name:'Lotus flower'), user: User.first},
])
<file_sep>class UserSongsController < ApplicationController
before_action :authenticate_user!
load_and_authorize_resource
def create
@song = Song.find(params[:song_id])
current_user.songs << @song
redirect_to songs_path
end
def destroy
@user_song_id = UserSong.find(params[:id])
@user_song_id.destroy!
redirect_to users_path(@user_id), :notice => "Your song was deleted"
end
end<file_sep># README
Mini Spotify
- Heroku url: https://minispotifypj.herokuapp.com
- Name: <NAME>
<file_sep>class User < ApplicationRecord
# Include default devise modules. Others available are:
# :confirmable, :lockable, :timeoutable and :omniauthable
devise :database_authenticatable, :registerable,
:recoverable, :rememberable, :trackable, :validatable
has_many :songs, dependent: :destroy
has_many :user_songs
has_many :songs, through: :user_songs
enum role: [:owner, :admin]
before_create :default_role
private
def default_role
self.role = 'owner' if self.role.nil?
end
end
| 306ba4b7ee70eb7cb52ffb4e5410ba599022ed2b | [
"Markdown",
"Ruby"
] | 4 | Ruby | Javieratapiab/Spotifytest | ed5bb6aec65e80975ef95538704d2e669724f4d2 | 3fc707029326b1a7cfe2854bdf73607a97001676 |
refs/heads/master | <repo_name>enesusanmaz/AdoNetEFCoreBenchmark<file_sep>/AdoNetEFCoreBenchmark/AdoNetEFCoreBenchmark/DataAccess/ITestSignature.cs
using System;
using System.Collections.Generic;
using System.Text;
namespace AdoNetEFCoreBenchmark.DataAccess
{
public interface ITestSignature
{
long GetPlayerByID(int id);
long GetPlayersForTeam(int teamID);
long GetTeamsForSport(int sportID);
}
}
<file_sep>/AdoNetEFCoreBenchmark/AdoNetEFCoreBenchmark/Constants.cs
namespace AdoNetEFCoreBenchmark
{
public static class Constants
{
public static readonly string ConnectionString = "Data Source=.;Initial Catalog=Sports;Integrated Security=True;";
}
}
| 624c127f71d9e598eadc6f33d2ac77d5305cb9df | [
"C#"
] | 2 | C# | enesusanmaz/AdoNetEFCoreBenchmark | 9db42fc22125e588438d8435bd1f768a2f1f53be | f896fa77a2d13908a40ae0770608d9d2be66e04d |
refs/heads/master | <repo_name>smallyellowdog/oms-center<file_sep>/oms-api/src/main/java/com/xy/api/service/OrderService.java
package com.xy.api.service;
import com.xy.api.request.OrderDetailQueryRequest;
import com.xy.common.response.BaseResponse;
import com.xy.pojo.order.Order;
public interface OrderService {
BaseResponse<Order> getOrderDetail(OrderDetailQueryRequest orderDetailQueryRequest);
}
| 69c800be3cce61377a03f055eaee8a39baa0eab9 | [
"Java"
] | 1 | Java | smallyellowdog/oms-center | 937994217d9bd11a477decec40522957abccbfa2 | d0e0c3b38f3cfc102c993c0a1c9225682be42e26 |
refs/heads/main | <file_sep>from django.shortcuts import render
from django.http import Http404
from .models import Pet
# Create your views here.
def home(request):
pets = Pet.objects.all()
context = {"pets": pets}
return render(request, "adoptions/home.html", context)
def pet_detail(request, pet_id):
try:
pet = Pet.objects.get(id=pet_id)
except Pet.DoesNotExist:
raise Http404("No pet found with that ID")
return render(request, "adoptions/pet_detail.html", {"pet": pet})
<file_sep>"# pet_management_system"
<file_sep>from . import views
from django.urls import path
app_name = "adoptions"
<file_sep>from django.db import models
class Pet(models.Model):
SEX_CHOICES = [('M', 'Male'), ('F', 'Female')]
name = models.CharField(max_length=100)
submitter = models.CharField(max_length=50)
species = models.CharField(max_length=50)
breed = models.CharField(max_length=50, blank=True)
description = models.TextField()
sex = models.CharField(max_length=1, choices=SEX_CHOICES, blank=True)
submission_date = models.DateTimeField()
age = models.IntegerField(
null=True) # i used null instead of blank because null results to zero in the db which is indistinguishable from setting the value as zero
vaccinations = models.ManyToManyField('Vaccine', blank=True) # creates a many to many relationship with vaccine
class Vaccine(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
<file_sep>from django.contrib import admin
from .models import Pet
@admin.register(
Pet) # the model admin has an attribute called list_display which we can use to list that we want to display
class PetAdmin(admin.ModelAdmin):
list_display = ['name', 'species', 'breed', 'age', 'sex']
| 1e7e3a83237a54d4558f70af1d68c50dc4b5b29e | [
"Markdown",
"Python"
] | 5 | Python | Toyin96/pet_management_system | b8d5ec3626ab83c3570b2ff3557e339ae6f23990 | 506a4d530a5388960552c79488c4bf07c8d620a6 |
refs/heads/master | <file_sep>package com.dao;
import com.beans.User;
public interface UserProfileDAO {
public void createUserProfile(User user);
public void deleteUserProfile(User user);
public void getUserProfile(String userLoginName);
public void updateUserProfile(User user);
public boolean validateUser(User user);
}
<file_sep># dodocs_v1.1
<file_sep>package com.controllers;
import javax.validation.Valid;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.validation.BindingResult;
import org.springframework.web.bind.annotation.ModelAttribute;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.servlet.config.annotation.ViewControllerRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
import com.beans.User;
import com.dao.UserProfileDAO;
import com.impl.UserProfileImpl;
@Controller
public class UserController extends WebMvcConfigurerAdapter{
@Override
public void addViewControllers(ViewControllerRegistry registry) {
registry.addViewController("/registerSuccess").setViewName("registerSuccess");
}
@RequestMapping(value="/addUser", method=RequestMethod.GET)
public String showForm(User user) {
return "Registration";
}
@RequestMapping(value="/saveUser", method=RequestMethod.POST)
public String checkPersonInfo(@Valid User person, BindingResult bindingResult) {
if (bindingResult.hasErrors()) {
return "Registration";
}
UserProfileDAO userDAO = new UserProfileImpl();
userDAO.createUserProfile(person);
return "registerSuccess";
}
@RequestMapping(value="/profile",method = RequestMethod.GET)
public String getUser(@ModelAttribute String userName,Model model)
{
UserProfileDAO user =new UserProfileImpl();
user.getUserProfile(userName);
model.addAttribute("userName", userName);
return "Profile"+"?id="+userName;
}
@RequestMapping(value="/searchProfile",method = RequestMethod.GET)
public String searchUser(Model model)
{
model.addAttribute("user",new User());
return "Profile";
}
@RequestMapping(value="/login",method=RequestMethod.POST)
public String login(User user, Model model)
{
UserProfileDAO userDao = new UserProfileImpl();
// User usr = user;
boolean isUserExists = userDao.validateUser(user);
if(isUserExists)
{
model.addAttribute("user",user.getEmailId());
}
else
{
model.addAttribute("user", "User "+user.getEmailId()+" doesn't exists");
}
return "loginResult";
}
@RequestMapping(value="/login",method=RequestMethod.GET)
public String login(Model model)
{
model.addAttribute(new User());
return "Login";
}
}
| 7638e23a20ccb379092878e818cbcc18068f7d1c | [
"Markdown",
"Java"
] | 3 | Java | pandeyh/dodocs_v1.1 | 1a6851ec0b89c24a4110ee52f40fc64e8b9b50ce | 01f1bc6a921ee41870caa8dd326092571bb597cc |
refs/heads/dev | <repo_name>matter-labs/bellman<file_sep>/src/plonk/better_better_cs/redshift/prover.rs
use crate::pairing::{Engine};
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::worker::Worker;
use crate::plonk::commitments::transparent::utils::log2_floor;
use super::*;
use super::tree_hash::BinaryTreeHasher;
use super::binary_tree::{BinaryTree, BinaryTreeParams};
use crate::plonk::polynomials::*;
use super::multioracle::Multioracle;
use super::super::cs_old::*;
use crate::SynthesisError;
use super::setup::*;
use crate::plonk::better_cs::utils::*;
use crate::plonk::domains::*;
use crate::plonk::fft::cooley_tukey_ntt::*;
use simple_fri::*;
use crate::plonk::commitments::transcript::Prng;
pub(crate) fn get_precomputed_x_lde<E: Engine>(
domain_size: usize,
worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError> {
let coset_factor = E::Fr::multiplicative_generator();
let mut x_poly = Polynomial::from_values(vec![
coset_factor;
domain_size
])?;
x_poly.distribute_powers(&worker, x_poly.omega);
x_poly.bitreverse_enumeration(&worker);
Ok(x_poly)
}
pub(crate) fn get_precomputed_inverse_divisor<E: Engine>(
vanishing_size: usize,
domain_size: usize,
worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError> {
let coset_factor = E::Fr::multiplicative_generator();
let mut vanishing_poly_inverse_bitreversed =
evaluate_vanishing_polynomial_of_degree_on_domain_size::<E::Fr>(
vanishing_size as u64,
&coset_factor,
domain_size as u64,
&worker,
)?;
vanishing_poly_inverse_bitreversed.batch_inversion(&worker)?;
vanishing_poly_inverse_bitreversed.bitreverse_enumeration(&worker);
Ok(vanishing_poly_inverse_bitreversed)
}
pub(crate) struct FirstPartialProverState<E: Engine, H: BinaryTreeHasher<E::Fr>> {
required_domain_size: usize,
non_residues: Vec<E::Fr>,
input_values: Vec<E::Fr>,
witness_polys_ldes: Vec<Polynomial<E::Fr, Values>>,
witness_polys_unpadded_values: Vec<Polynomial<E::Fr, Values>>,
witness_multioracle_tree: BinaryTree<E, H>,
}
pub(crate) struct FirstProverMessage<E: Engine, H: BinaryTreeHasher<E::Fr>> {
pub(crate) n: usize,
pub(crate) num_inputs: usize,
pub(crate) input_values: Vec<E::Fr>,
pub(crate) witness_multioracle_commitment: H::Output,
}
pub(crate) struct FirstVerifierMessage<E: Engine> {
pub(crate) beta: E::Fr,
pub(crate) gamma: E::Fr,
}
pub(crate) struct SecondPartialProverState<E: Engine, H: BinaryTreeHasher<E::Fr>> {
required_domain_size: usize,
non_residues: Vec<E::Fr>,
input_values: Vec<E::Fr>,
witness_polys_ldes: Vec<Polynomial<E::Fr, Values>>,
witness_polys_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
witness_multioracle_tree: BinaryTree<E, H>,
grand_product_polynomial_lde: Vec<Polynomial<E::Fr, Values>>,
grand_product_polynomial_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
grand_product_polynomial_multioracle_tree: BinaryTree<E, H>,
}
pub(crate) struct SecondProverMessage<E: Engine, H: BinaryTreeHasher<E::Fr>> {
pub(crate) grand_product_oracle_commitment: H::Output,
_marker: std::marker::PhantomData<E>
}
pub(crate) struct SecondVerifierMessage<E: Engine> {
pub(crate) alpha: E::Fr,
pub(crate) beta: E::Fr,
pub(crate) gamma: E::Fr,
}
pub(crate) struct ThirdPartialProverState<E: Engine, H: BinaryTreeHasher<E::Fr>> {
required_domain_size: usize,
non_residues: Vec<E::Fr>,
input_values: Vec<E::Fr>,
witness_polys_ldes: Vec<Polynomial<E::Fr, Values>>,
witness_polys_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
witness_multioracle_tree: BinaryTree<E, H>,
grand_product_polynomial_lde: Vec<Polynomial<E::Fr, Values>>,
grand_product_polynomial_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
grand_product_polynomial_multioracle_tree: BinaryTree<E, H>,
t_poly_parts_ldes: Vec<Polynomial<E::Fr, Values>>,
t_poly_parts: Vec<Polynomial<E::Fr, Coefficients>>,
t_poly_parts_multioracle_tree: BinaryTree<E, H>,
}
pub(crate) struct ThirdProverMessage<E: Engine, H: BinaryTreeHasher<E::Fr>> {
pub(crate) quotient_poly_oracle_commitment: H::Output,
_marker: std::marker::PhantomData<E>
}
pub(crate) struct ThirdVerifierMessage<E: Engine> {
pub(crate) alpha: E::Fr,
pub(crate) beta: E::Fr,
pub(crate) gamma: E::Fr,
pub(crate) z: E::Fr,
}
pub(crate) struct FourthPartialProverState<E: Engine, H: BinaryTreeHasher<E::Fr>> {
required_domain_size: usize,
non_residues: Vec<E::Fr>,
input_values: Vec<E::Fr>,
witness_polys_ldes: Vec<Polynomial<E::Fr, Values>>,
witness_polys_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
witness_multioracle_tree: BinaryTree<E, H>,
grand_product_polynomial_lde: Vec<Polynomial<E::Fr, Values>>,
grand_product_polynomial_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
grand_product_polynomial_multioracle_tree: BinaryTree<E, H>,
t_poly_parts_ldes: Vec<Polynomial<E::Fr, Values>>,
t_poly_parts: Vec<Polynomial<E::Fr, Coefficients>>,
t_poly_parts_multioracle_tree: BinaryTree<E, H>,
wire_values_at_z: Vec<(usize, E::Fr)>,
wire_values_at_z_omega: Vec<(usize, E::Fr)>,
setup_values_at_z: Vec<E::Fr>,
permutation_polynomials_at_z: Vec<E::Fr>,
gate_selector_polynomials_at_z: Vec<E::Fr>,
grand_product_at_z: E::Fr,
grand_product_at_z_omega: E::Fr,
quotient_polynomial_parts_at_z: Vec<E::Fr>,
}
pub(crate) struct FourthProverMessage<E: Engine> {
pub(crate) wire_values_at_z: Vec<(usize, E::Fr)>,
pub(crate) wire_values_at_z_omega: Vec<(usize, E::Fr)>,
pub(crate) setup_values_at_z: Vec<E::Fr>,
pub(crate) permutation_polynomials_at_z: Vec<E::Fr>,
pub(crate) gate_selector_polynomials_at_z: Vec<E::Fr>,
pub(crate) grand_product_at_z: E::Fr,
pub(crate) grand_product_at_z_omega: E::Fr,
pub(crate) quotient_polynomial_parts_at_z: Vec<E::Fr>,
}
pub(crate) struct FourthVerifierMessage<E: Engine> {
pub(crate) alpha: E::Fr,
pub(crate) beta: E::Fr,
pub(crate) gamma: E::Fr,
pub(crate) z: E::Fr,
pub(crate) v: E::Fr,
}
pub(crate) struct FifthProverMessage<E: Engine, H: BinaryTreeHasher<E::Fr>> {
pub(crate) fri_intermediate_roots: Vec<H::Output>,
pub(crate) final_coefficients: Vec<E::Fr>,
}
struct WitnessOpeningRequest<'a, F: PrimeField> {
polynomials: Vec<&'a Polynomial<F, Values>>,
opening_point: F,
opening_values: Vec<F>
}
struct SetupOpeningRequest<'a, F: PrimeField> {
polynomials: Vec<&'a Polynomial<F, Values>>,
setup_point: F,
setup_values: Vec<F>,
opening_point: F,
opening_values: Vec<F>
}
pub struct RedshiftProver<E: Engine, H: BinaryTreeHasher<E::Fr>> {
sorted_gates: Vec<Box<dyn GateEquationInternal>>,
gate_constants: Vec<Vec<E::Fr>>,
precomputed_omegas: BitReversedOmegas<E::Fr>,
precomputed_omegas_inv: OmegasInvBitreversed<E::Fr>,
tree_hasher: H,
state_width: usize,
}
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> RedshiftProver<E, H> {
pub(crate) fn first_step<P: PlonkConstraintSystemParams<E>, MG: MainGateEquation>(
assembly: TrivialAssembly<E, P, MG>,
tree_hasher: H,
worker: &Worker,
) -> Result<(
Self,
FirstPartialProverState<E, H>,
FirstProverMessage<E, H>
), SynthesisError>
{
assert!(assembly.is_finalized);
let input_values = assembly.input_assingments.clone();
let n = assembly.n();
let size = n.next_power_of_two();
let num_inputs = assembly.num_inputs;
let required_domain_size = n + 1;
assert!(required_domain_size.is_power_of_two());
let non_residues = make_non_residues::<E::Fr>(
P::STATE_WIDTH - 1
);
let (full_assignments, _) = assembly.make_state_and_witness_polynomials(&worker)?;
let gate_constants = assembly.sorted_gate_constants;
let sorted_gates = assembly.sorted_gates;
// Commit wire polynomials
let omegas_bitreversed = BitReversedOmegas::<E::Fr>::new_for_domain_size(size.next_power_of_two());
let omegas_inv_bitreversed = <OmegasInvBitreversed::<E::Fr> as CTPrecomputations::<E::Fr>>::new_for_domain_size(size.next_power_of_two());
let mut first_message = FirstProverMessage::<E, H> {
n: n,
num_inputs: num_inputs,
input_values: input_values.clone(),
witness_multioracle_commitment: H::placeholder_output()
};
let s = Self {
sorted_gates,
gate_constants,
precomputed_omegas: omegas_bitreversed,
precomputed_omegas_inv: omegas_inv_bitreversed,
tree_hasher: tree_hasher.clone(),
state_width: P::STATE_WIDTH,
};
let coset_factor = E::Fr::multiplicative_generator();
let mut witness_polys_ldes = vec![];
for wire_poly in full_assignments.iter() {
let lde = Polynomial::from_values(wire_poly.clone())?
.ifft_using_bitreversed_ntt(
&worker,
&s.precomputed_omegas_inv,
&E::Fr::one()
)?
.bitreversed_lde_using_bitreversed_ntt(
&worker,
LDE_FACTOR,
&s.precomputed_omegas,
&coset_factor
)?;
witness_polys_ldes.push(lde);
}
// now transform assignments in the polynomials
let mut witness_polys_unpadded_values = vec![];
for p in full_assignments.into_iter() {
let p = Polynomial::from_values_unpadded(p)?;
witness_polys_unpadded_values.push(p);
}
let multioracle = Multioracle::new_from_polynomials(
&witness_polys_ldes,
tree_hasher,
FRI_VALUES_PER_LEAF,
&worker
);
let tree = multioracle.tree;
first_message.witness_multioracle_commitment = tree.get_commitment();
let first_state = FirstPartialProverState::<E, H> {
required_domain_size: n+1,
non_residues: non_residues,
input_values: input_values.clone(),
witness_polys_ldes: witness_polys_ldes,
witness_polys_unpadded_values: witness_polys_unpadded_values,
witness_multioracle_tree: tree
};
Ok((s, first_state, first_message))
}
pub(crate) fn second_step_from_first_step(
&self,
first_state: FirstPartialProverState<E, H>,
first_verifier_message: FirstVerifierMessage<E>,
permutation_polynomials: &Vec<Polynomial<E::Fr, Values>>,
worker: &Worker
) -> Result<(
SecondPartialProverState<E, H>,
SecondProverMessage<E, H>
), SynthesisError>
{
let FirstVerifierMessage { beta, gamma, ..} = first_verifier_message;
assert_eq!(first_state.witness_polys_unpadded_values.len(), self.state_width, "first state must containt assignment poly values");
let mut grand_products_protos_with_gamma = first_state.witness_polys_unpadded_values.clone();
// add gamma here to save computations later
for p in grand_products_protos_with_gamma.iter_mut() {
p.add_constant(&worker, &gamma);
}
let required_domain_size = first_state.required_domain_size;
let domain = Domain::new_for_size(required_domain_size as u64)?;
let mut domain_elements = materialize_domain_elements_with_natural_enumeration(
&domain,
&worker
);
domain_elements.pop().expect("must pop last element for omega^i");
let mut domain_elements_poly_by_beta = Polynomial::from_values_unpadded(domain_elements)?;
domain_elements_poly_by_beta.scale(&worker, beta);
// we take A, B, C, ... values and form (A + beta * X * non_residue + gamma), etc and calculate their grand product
let mut z_num = {
let mut grand_products_proto_it = grand_products_protos_with_gamma.iter().cloned();
let mut z_1 = grand_products_proto_it.next().unwrap();
z_1.add_assign(&worker, &domain_elements_poly_by_beta);
for (mut p, non_res) in grand_products_proto_it.zip(first_state.non_residues.iter()) {
p.add_assign_scaled(&worker, &domain_elements_poly_by_beta, non_res);
z_1.mul_assign(&worker, &p);
}
z_1
};
// we take A, B, C, ... values and form (A + beta * perm_a + gamma), etc and calculate their grand product
let z_den = {
assert_eq!(
permutation_polynomials.len(),
grand_products_protos_with_gamma.len()
);
let mut grand_products_proto_it = grand_products_protos_with_gamma.into_iter();
let mut permutation_polys_it = permutation_polynomials.iter();
let mut z_2 = grand_products_proto_it.next().unwrap();
z_2.add_assign_scaled(&worker, permutation_polys_it.next().unwrap(), &beta);
for (mut p, perm) in grand_products_proto_it
.zip(permutation_polys_it) {
// permutation polynomials
p.add_assign_scaled(&worker, perm, &beta);
z_2.mul_assign(&worker, &p);
}
z_2.batch_inversion(&worker)?;
z_2
};
z_num.mul_assign(&worker, &z_den);
drop(z_den);
let z = z_num.calculate_shifted_grand_product(&worker)?;
assert!(z.size().is_power_of_two());
assert!(z.as_ref()[0] == E::Fr::one());
assert!(z.as_ref().last().expect("must exist") == &E::Fr::one());
// interpolate on the main domain
let grand_product_in_monomial_form = z.ifft_using_bitreversed_ntt(&worker, &self.precomputed_omegas_inv, &E::Fr::one())?;
let coset_factor = E::Fr::multiplicative_generator();
let grand_product_lde = grand_product_in_monomial_form.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
LDE_FACTOR,
&self.precomputed_omegas,
&coset_factor
)?;
let grand_product_ldes = vec![grand_product_lde];
let multioracle = Multioracle::new_from_polynomials(
&grand_product_ldes,
self.tree_hasher.clone(),
FRI_VALUES_PER_LEAF,
&worker
);
let mut witness_polys_in_monomial_form = vec![];
for mut p in first_state.witness_polys_unpadded_values.into_iter() {
p.pad_to_domain()?;
let p = p.ifft_using_bitreversed_ntt(&worker, &self.precomputed_omegas_inv, &E::Fr::one())?;
witness_polys_in_monomial_form.push(p);
}
let tree = multioracle.tree;
let commitment = tree.get_commitment();
let state = SecondPartialProverState::<E, H> {
required_domain_size,
non_residues: first_state.non_residues,
input_values: first_state.input_values,
witness_polys_ldes: first_state.witness_polys_ldes,
witness_polys_in_monomial_form: witness_polys_in_monomial_form,
witness_multioracle_tree: first_state.witness_multioracle_tree,
grand_product_polynomial_lde: grand_product_ldes,
grand_product_polynomial_in_monomial_form: vec![grand_product_in_monomial_form],
grand_product_polynomial_multioracle_tree: tree,
};
let message = SecondProverMessage::<E, H> {
grand_product_oracle_commitment: commitment,
_marker: std::marker::PhantomData
};
Ok((state, message))
}
pub(crate) fn third_step_from_second_step(
&self,
second_state: SecondPartialProverState<E, H>,
second_verifier_message: SecondVerifierMessage<E>,
setup: &SetupMultioracle<E, H>,
worker: &Worker
) -> Result<(
ThirdPartialProverState<E, H>,
ThirdProverMessage<E, H>
), SynthesisError>
{
use std::sync::Arc;
let mut quotient_linearization_challenge = E::Fr::one();
let SecondVerifierMessage { alpha, beta, gamma, .. } = second_verifier_message;
let coset_factor = E::Fr::multiplicative_generator();
let required_domain_size = second_state.required_domain_size;
println!("Domain size = {}", required_domain_size);
let mut max_degree = self.state_width + 1;
let assume_gate_selectors = self.sorted_gates.len() > 1;
let mut extra_precomputations_storage_by_time_dilation = std::collections::HashMap::new();
for gate in self.sorted_gates.iter() {
let mut degree = gate.degree();
if assume_gate_selectors {
degree += 1;
}
if degree > max_degree {
max_degree = degree;
}
}
assert!(max_degree <= 5, "we do not support very high order constraints yet");
let quotient_degree_factor = (max_degree - 1).next_power_of_two();
// let quotient_degree_factor = 8;
let quotient_degree = quotient_degree_factor * required_domain_size;
assert!(quotient_degree.is_power_of_two());
let one = E::Fr::one();
let mut minus_one = one;
minus_one.negate();
// we also assume bitreverse
let mut t = Polynomial::<E::Fr, Values>::new_for_size(quotient_degree)?;
let mut global_scratch_space = t.clone();
let mut public_inputs_processed = false;
let partition_factor = LDE_FACTOR / quotient_degree_factor;
let use_gate_selectors = self.sorted_gates.len() > 1;
let selectors_indexes = setup.gate_selectors_indexes.clone();
println!("Have {} gate selector indexes", selectors_indexes.len());
let mut selectors_range_it = selectors_indexes.into_iter();
let mut local_scratch_space = if use_gate_selectors {
Some(global_scratch_space.clone())
} else {
None
};
for (i, (gate, constants)) in self.sorted_gates.iter().zip(self.gate_constants.iter()).enumerate() {
let mut constants_iter = constants.iter();
if i == 0 {
assert!(gate.can_include_public_inputs());
}
let constant_term_index = gate.put_public_inputs_into_selector_id();
for constraint in gate.get_constraints().iter() {
for term in constraint.0.iter() {
let mut must_refill_scratch = true;
let mut base = match term.0 {
Coefficient::PlusOne => one,
Coefficient::MinusOne => minus_one,
Coefficient::Other => *constants_iter.next().unwrap()
};
for poly in term.1.iter() {
match poly {
PolynomialInConstraint::VariablesPolynomial(
poly_num, TimeDilation(0)
) => {
let poly_lde_ref = &second_state.witness_polys_ldes[*poly_num];
let poly_lde_part = poly_lde_ref.clone_subset_assuming_bitreversed(
partition_factor
)?;
let scratch_space = if use_gate_selectors {
local_scratch_space.as_mut().unwrap()
} else {
&mut global_scratch_space
};
if must_refill_scratch {
must_refill_scratch = false;
scratch_space.reuse_allocation(&poly_lde_part);
} else {
scratch_space.mul_assign(&worker, &poly_lde_part);
}
},
PolynomialInConstraint::VariablesPolynomial(
poly_num, TimeDilation(dilation)
) => {
let dilated_poly = if let Some(dilated_poly) = extra_precomputations_storage_by_time_dilation.get(&(*poly_num, TimeDilation(*dilation))) {
dilated_poly
} else {
let poly_lde_ref = &second_state.witness_polys_ldes[*poly_num];
let poly_lde_part = poly_lde_ref.clone_subset_assuming_bitreversed(
partition_factor
)?;
let shift = quotient_degree_factor * (*dilation);
let dilated_poly = poly_lde_part.clone_shifted_assuming_bitreversed(shift, &worker)?;
extra_precomputations_storage_by_time_dilation.insert((*poly_num, TimeDilation(*dilation)), dilated_poly);
extra_precomputations_storage_by_time_dilation.get(&(*poly_num, TimeDilation(*dilation))).unwrap()
};
let scratch_space = if use_gate_selectors {
local_scratch_space.as_mut().unwrap()
} else {
&mut global_scratch_space
};
if must_refill_scratch {
must_refill_scratch = false;
scratch_space.reuse_allocation(dilated_poly);
} else {
scratch_space.mul_assign(&worker, dilated_poly);
}
},
PolynomialInConstraint::SetupPolynomial(
str_id, poly_num, TimeDilation(0)
) => {
let id = PolyIdentifier::SetupPolynomial(str_id, *poly_num);
let idx = setup.setup_ids.iter().position(|el| el == &id).unwrap();
let poly_lde_ref = &setup.polynomial_ldes[idx];
let poly_lde_part = poly_lde_ref.clone_subset_assuming_bitreversed(
partition_factor
)?;
let process_public_inputs = if constant_term_index.is_some() {
public_inputs_processed == false && constant_term_index.unwrap() == *poly_num
} else {
false
};
let scratch_space = if use_gate_selectors {
local_scratch_space.as_mut().unwrap()
} else {
&mut global_scratch_space
};
if process_public_inputs {
public_inputs_processed = true;
// processing public inputs
assert!(base == one); // base coefficient for here should be one
let mut inputs_poly = Polynomial::<E::Fr, Values>::new_for_size(required_domain_size)?;
for (idx, &input) in second_state.input_values.iter().enumerate() {
inputs_poly.as_mut()[idx] = input;
}
// go into monomial form
let inputs_poly = inputs_poly.ifft_using_bitreversed_ntt(
&worker,
&self.precomputed_omegas_inv,
&E::Fr::one()
)?;
let mut inputs_poly = inputs_poly.bitreversed_lde_using_bitreversed_ntt(
&worker,
LDE_FACTOR / partition_factor,
&self.precomputed_omegas,
&coset_factor
)?;
// add constants selectors vector
inputs_poly.add_assign(&worker, &poly_lde_part);
if must_refill_scratch {
must_refill_scratch = false;
scratch_space.reuse_allocation(&inputs_poly);
} else {
scratch_space.mul_assign(&worker, &inputs_poly);
}
} else {
if must_refill_scratch {
must_refill_scratch = false;
scratch_space.reuse_allocation(&poly_lde_part);
} else {
scratch_space.mul_assign(&worker, &poly_lde_part);
}
}
},
_ => {
unimplemented!()
}
}
}
base.mul_assign("ient_linearization_challenge);
if use_gate_selectors {
let scratch_space = local_scratch_space.as_ref().unwrap();
global_scratch_space.add_assign_scaled(&worker, &scratch_space, &base);
} else {
// add into T poly directly
t.add_assign_scaled(&worker, &global_scratch_space, &base);
}
}
// if use_gate_selectors {
// let selector_idx = (&mut selectors_range_it).next().expect(&format!("must get gate selector for gate {}", i));
// let poly_lde_ref = &setup.polynomial_ldes[selector_idx];
// let poly_lde_part = poly_lde_ref.clone_subset_assuming_bitreversed(
// partition_factor
// )?;
// global_scratch_space.mul_assign(&worker, &poly_lde_part);
// t.add_assign(&worker, &global_scratch_space);
// }
quotient_linearization_challenge.mul_assign(&alpha);
}
if use_gate_selectors {
let selector_idx = (&mut selectors_range_it).next().expect(&format!("must get gate selector for gate {}", i));
let poly_lde_ref = &setup.polynomial_ldes[selector_idx];
let poly_lde_part = poly_lde_ref.clone_subset_assuming_bitreversed(
partition_factor
)?;
global_scratch_space.mul_assign(&worker, &poly_lde_part);
t.add_assign(&worker, &global_scratch_space);
}
}
let mut scratch_space = global_scratch_space;
let grand_product_lde_bitreversed = second_state.grand_product_polynomial_lde[0].clone_subset_assuming_bitreversed(
partition_factor
)?;
let shift = quotient_degree_factor;
let grand_product_shifted_lde_bitreversed = grand_product_lde_bitreversed.clone_shifted_assuming_bitreversed(shift, &worker)?;
let non_residues = second_state.non_residues.clone();
// For both Z_1 and Z_2 we first check for grand products
// z*(X)(A + beta*X + gamma)(B + beta*k_1*X + gamma)(C + beta*K_2*X + gamma) -
// - (A + beta*perm_a(X) + gamma)(B + beta*perm_b(X) + gamma)(C + beta*perm_c(X) + gamma)*Z(X*Omega)== 0
// we use evaluations of the polynomial X and K_i * X on a large domain's coset
{
let mut contrib_z = grand_product_lde_bitreversed.clone();
// A + beta*X + gamma
let poly_lde_ref = &second_state.witness_polys_ldes[0];
let poly_lde_part = poly_lde_ref.clone_subset_assuming_bitreversed(
partition_factor
)?;
scratch_space.reuse_allocation(&poly_lde_part);
drop(poly_lde_ref);
scratch_space.add_constant(&worker, &gamma);
let x_precomp = get_precomputed_x_lde::<E>(
quotient_degree,
&worker
)?;
scratch_space.add_assign_scaled(&worker, &x_precomp, &beta);
contrib_z.mul_assign(&worker, &scratch_space);
assert_eq!(non_residues.len() + 1, self.state_width);
for (w, non_res) in second_state.witness_polys_ldes[1..].iter().zip(non_residues.iter()) {
let mut factor = beta;
factor.mul_assign(&non_res);
let poly_lde_part = w.clone_subset_assuming_bitreversed(
partition_factor
)?;
scratch_space.reuse_allocation(&poly_lde_part);
scratch_space.add_constant(&worker, &gamma);
scratch_space.add_assign_scaled(&worker, &x_precomp, &factor);
contrib_z.mul_assign(&worker, &scratch_space);
}
t.add_assign_scaled(&worker, &contrib_z, "ient_linearization_challenge);
drop(contrib_z);
let mut contrib_z = grand_product_shifted_lde_bitreversed;
// A + beta*perm_a + gamma
for (w, perm_idx) in second_state.witness_polys_ldes.iter().zip(setup.permutations_ranges[0].clone()) {
let poly_lde_part = w.clone_subset_assuming_bitreversed(
partition_factor
)?;
let perm_part = setup.polynomial_ldes[perm_idx].clone_subset_assuming_bitreversed(
partition_factor
)?;
scratch_space.reuse_allocation(&poly_lde_part);
scratch_space.add_constant(&worker, &gamma);
scratch_space.add_assign_scaled(&worker, &perm_part, &beta);
contrib_z.mul_assign(&worker, &scratch_space);
}
t.sub_assign_scaled(&worker, &contrib_z, "ient_linearization_challenge);
drop(contrib_z);
}
quotient_linearization_challenge.mul_assign(&alpha);
// z(omega^0) - 1 == 0
let l_0 = calculate_lagrange_poly::<E::Fr>(&worker, required_domain_size.next_power_of_two(), 0)?;
{
let mut z_minus_one_by_l_0 = grand_product_lde_bitreversed;
z_minus_one_by_l_0.sub_constant(&worker, &E::Fr::one());
let l_coset_lde_bitreversed = l_0.bitreversed_lde_using_bitreversed_ntt(
&worker,
LDE_FACTOR / partition_factor,
&self.precomputed_omegas,
&coset_factor
)?;
z_minus_one_by_l_0.mul_assign(&worker, &l_coset_lde_bitreversed);
t.add_assign_scaled(&worker, &z_minus_one_by_l_0, "ient_linearization_challenge);
drop(z_minus_one_by_l_0);
}
drop(scratch_space);
let divisor_inversed = get_precomputed_inverse_divisor::<E>(
required_domain_size,
quotient_degree,
&worker
)?;
t.mul_assign(&worker, &divisor_inversed);
t.bitreverse_enumeration(&worker);
let t_poly_in_monomial_form = t.icoset_fft_for_generator(
&worker,
&E::Fr::multiplicative_generator()
);
fn get_degree<F: PrimeField>(poly: &Polynomial<F, Coefficients>) -> usize {
let mut degree = poly.as_ref().len() - 1;
for c in poly.as_ref().iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break;
}
}
degree
}
println!("T degree = {}", get_degree::<E::Fr>(&t_poly_in_monomial_form));
let t_poly_parts = t_poly_in_monomial_form.break_into_multiples(required_domain_size)?;
let mut t_poly_parts_ldes = vec![];
for p in t_poly_parts.iter() {
let lde = p.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
LDE_FACTOR,
&self.precomputed_omegas,
&coset_factor
)?;
t_poly_parts_ldes.push(lde);
}
let multioracle = Multioracle::new_from_polynomials(
&t_poly_parts_ldes,
self.tree_hasher.clone(),
FRI_VALUES_PER_LEAF,
&worker
);
let tree = multioracle.tree;
let commitment = tree.get_commitment();
let state = ThirdPartialProverState::<E, H> {
required_domain_size,
non_residues: second_state.non_residues,
input_values: second_state.input_values,
witness_polys_ldes: second_state.witness_polys_ldes,
witness_polys_in_monomial_form: second_state.witness_polys_in_monomial_form,
witness_multioracle_tree: second_state.witness_multioracle_tree,
grand_product_polynomial_lde: second_state.grand_product_polynomial_lde,
grand_product_polynomial_in_monomial_form: second_state.grand_product_polynomial_in_monomial_form,
grand_product_polynomial_multioracle_tree: second_state.grand_product_polynomial_multioracle_tree,
t_poly_parts_ldes: t_poly_parts_ldes,
t_poly_parts: t_poly_parts,
t_poly_parts_multioracle_tree: tree,
};
let message = ThirdProverMessage::<E, H> {
quotient_poly_oracle_commitment: commitment,
_marker: std::marker::PhantomData
};
Ok((state, message))
}
pub(crate) fn fourth_step_from_third_step(
&self,
third_state: ThirdPartialProverState<E, H>,
third_verifier_message: ThirdVerifierMessage<E>,
setup: &SetupMultioracle<E, H>,
worker: &Worker
) -> Result<(
FourthPartialProverState<E, H>,
FourthProverMessage<E>
), SynthesisError>
{
let ThirdVerifierMessage { z, .. } = third_verifier_message;
let required_domain_size = third_state.required_domain_size;
let domain = Domain::new_for_size(required_domain_size as u64)?;
let mut state = FourthPartialProverState::<E, H> {
required_domain_size,
non_residues: third_state.non_residues,
input_values: third_state.input_values,
witness_polys_ldes: third_state.witness_polys_ldes,
witness_polys_in_monomial_form: third_state.witness_polys_in_monomial_form,
witness_multioracle_tree: third_state.witness_multioracle_tree,
grand_product_polynomial_lde: third_state.grand_product_polynomial_lde,
grand_product_polynomial_in_monomial_form: third_state.grand_product_polynomial_in_monomial_form,
grand_product_polynomial_multioracle_tree: third_state.grand_product_polynomial_multioracle_tree,
t_poly_parts_ldes: third_state.t_poly_parts_ldes,
t_poly_parts: third_state.t_poly_parts,
t_poly_parts_multioracle_tree: third_state.t_poly_parts_multioracle_tree,
wire_values_at_z: vec![],
wire_values_at_z_omega: vec![],
setup_values_at_z: vec![E::Fr::zero(); setup.setup_ids.len()],
permutation_polynomials_at_z: vec![],
gate_selector_polynomials_at_z: vec![],
grand_product_at_z: E::Fr::zero(),
grand_product_at_z_omega: E::Fr::zero(),
quotient_polynomial_parts_at_z: vec![],
};
let mut z_by_omega = z;
z_by_omega.mul_assign(&domain.generator);
// now need to go over constraints and decide what polynomials to open where
let mut dilation_maps = std::collections::HashMap::new();
for gate in self.sorted_gates.iter() {
for constraint in gate.get_constraints().iter() {
for term in constraint.0.iter() {
for poly in term.1.iter() {
match poly {
PolynomialInConstraint::VariablesPolynomial(
poly_num, TimeDilation(dilation)
) => {
let poly_id = PolyIdentifier::VariablesPolynomial(*poly_num);
let key = TimeDilation(*dilation);
let entry = dilation_maps.entry(key).or_insert(vec![]);
if !entry.contains(&poly_id) {
entry.push(poly_id);
}
},
PolynomialInConstraint::SetupPolynomial(
str_id, poly_num, TimeDilation(0)
) => {
let poly_id = PolyIdentifier::SetupPolynomial(str_id, *poly_num);
let key = TimeDilation(0);
let entry = dilation_maps.entry(key).or_insert(vec![]);
if !entry.contains(&poly_id) {
entry.push(poly_id);
}
},
_ => {
unimplemented!()
}
}
}
}
}
}
let mut keys: Vec<_> = dilation_maps.keys().map(|el| el.clone()).collect();
keys.sort_by(|a, b| a.0.cmp(&b.0));
assert!(keys.len() <= 2, "large time dilations are not supported");
let points_set = vec![z, z_by_omega];
for (i, key) in keys.into_iter().enumerate() {
let poly_ids = (&mut dilation_maps).remove(&key).unwrap();
for id in poly_ids.into_iter() {
if let Some(setup_poly_idx) = setup.setup_ids.iter().position(|el| el == &id) {
assert!(i == 0, "don't support setup polys with dilation yet");
let poly_ref = &setup.polynomials_in_monomial_form[setup_poly_idx];
let evaluate_at = points_set[i];
let value = poly_ref.evaluate_at(&worker, evaluate_at);
state.setup_values_at_z[setup_poly_idx] = value;
} else {
if let PolyIdentifier::VariablesPolynomial(state_idx) = id {
let poly_ref = &state.witness_polys_in_monomial_form[state_idx];
let evaluate_at = points_set[i];
let value = poly_ref.evaluate_at(&worker, evaluate_at);
if i == 0 {
state.wire_values_at_z.push((state_idx, value));
} else if i == 1 {
state.wire_values_at_z_omega.push((state_idx, value));
}
} else {
unimplemented!()
}
}
}
if i == 0 {
// also open permutation polys and quotient
for setup_poly_idx in setup.permutations_ranges[0].clone() {
let poly_ref = &setup.polynomials_in_monomial_form[setup_poly_idx];
let value = poly_ref.evaluate_at(&worker, z);
state.permutation_polynomials_at_z.push(value);
}
for selector_poly_idx in setup.gate_selectors_indexes.iter() {
let poly_ref = &setup.polynomials_in_monomial_form[*selector_poly_idx];
let value = poly_ref.evaluate_at(&worker, z);
state.gate_selector_polynomials_at_z.push(value);
}
// let mut quotient_at_z = E::Fr::zero();
// let mut current = E::Fr::one();
// let mut z_in_domain_size = z.pow(&[required_domain_size as u64]);
for poly_ref in state.t_poly_parts.iter() {
let value_of_part = poly_ref.evaluate_at(&worker, z);
state.quotient_polynomial_parts_at_z.push(value_of_part);
}
}
}
state.grand_product_at_z = state.grand_product_polynomial_in_monomial_form[0].evaluate_at(&worker, z);
state.grand_product_at_z_omega = state.grand_product_polynomial_in_monomial_form[0].evaluate_at(&worker, z_by_omega);
let message = FourthProverMessage::<E> {
wire_values_at_z: state.wire_values_at_z.clone(),
wire_values_at_z_omega: state.wire_values_at_z_omega.clone(),
setup_values_at_z: state.setup_values_at_z.clone(),
permutation_polynomials_at_z: state.permutation_polynomials_at_z.clone(),
gate_selector_polynomials_at_z: state.gate_selector_polynomials_at_z.clone(),
grand_product_at_z: state.grand_product_at_z,
grand_product_at_z_omega: state.grand_product_at_z_omega,
quotient_polynomial_parts_at_z: state.quotient_polynomial_parts_at_z.clone(),
};
Ok((state, message))
}
fn perform_fri<P: Prng<E::Fr, Input = H::Output>>
(
&self,
aggregation_challenge: E::Fr,
witness_opening_requests: Vec<WitnessOpeningRequest<E::Fr>>,
setup_opening_requests: Vec<SetupOpeningRequest<E::Fr>>,
worker: &Worker,
prng: &mut P
) -> Result<FriOraclesSet<E, H>, SynthesisError> {
let mut len = 0;
for r in witness_opening_requests.iter() {
for p in r.polynomials.iter() {
if len == 0 {
len = p.size();
} else {
assert_eq!(p.size(), len, "poly lengths are different!");
}
}
}
for r in setup_opening_requests.iter() {
for p in r.polynomials.iter() {
if len == 0 {
len = p.size();
} else {
assert_eq!(p.size(), len, "poly lengths are different!");
}
}
}
assert!(len != 0);
let required_divisor_size = len;
let mut final_aggregate = Polynomial::from_values(vec![E::Fr::zero(); required_divisor_size])?;
let mut precomputed_bitreversed_coset_divisor = Polynomial::from_values(vec![E::Fr::one(); required_divisor_size])?;
precomputed_bitreversed_coset_divisor.distribute_powers(&worker, precomputed_bitreversed_coset_divisor.omega);
precomputed_bitreversed_coset_divisor.scale(&worker, E::Fr::multiplicative_generator());
precomputed_bitreversed_coset_divisor.bitreverse_enumeration(&worker);
let mut scratch_space_numerator = final_aggregate.clone();
let mut scratch_space_denominator = final_aggregate.clone();
let mut alpha = E::Fr::one();
for witness_request in witness_opening_requests.iter() {
let z = witness_request.opening_point;
let mut minus_z = z;
minus_z.negate();
scratch_space_denominator.reuse_allocation(&precomputed_bitreversed_coset_divisor);
scratch_space_denominator.add_constant(&worker, &minus_z);
scratch_space_denominator.batch_inversion(&worker)?;
for (poly, value) in witness_request.polynomials.iter().zip(witness_request.opening_values.iter()) {
scratch_space_numerator.reuse_allocation(&poly);
let mut v = *value;
v.negate();
scratch_space_numerator.add_constant(&worker, &v);
scratch_space_numerator.mul_assign(&worker, &scratch_space_denominator);
if aggregation_challenge != E::Fr::one() {
scratch_space_numerator.scale(&worker, alpha);
}
final_aggregate.add_assign(&worker, &scratch_space_numerator);
alpha.mul_assign(&aggregation_challenge);
}
}
for setup_request in setup_opening_requests.iter() {
// for now assume a single setup point per poly and setup point is the same for all polys
// (omega - y)(omega - z) = omega^2 - (z+y)*omega + zy
let setup_point = setup_request.setup_point;
let opening_point = setup_request.opening_point;
let mut t0 = setup_point;
t0.add_assign(&opening_point);
t0.negate();
let mut t1 = setup_point;
t1.mul_assign(&opening_point);
scratch_space_denominator.reuse_allocation(&precomputed_bitreversed_coset_divisor);
worker.scope(scratch_space_denominator.as_ref().len(), |scope, chunk| {
for den in scratch_space_denominator.as_mut().chunks_mut(chunk) {
scope.spawn(move |_| {
for d in den.iter_mut() {
let mut result = *d;
result.square();
result.add_assign(&t1);
let mut tmp = t0;
tmp.mul_assign(&d);
result.add_assign(&tmp);
*d = result;
}
});
}
});
scratch_space_denominator.batch_inversion(&worker)?;
// each numerator must have a value removed of the polynomial that interpolates the following points:
// (setup_x, setup_y)
// (opening_x, opening_y)
// such polynomial is linear and has a form e.g setup_y + (X - setup_x) * (witness_y - setup_y) / (witness_x - setup_x)
for ((poly, value), setup_value) in setup_request.polynomials.iter().zip(setup_request.opening_values.iter()).zip(setup_request.setup_values.iter()) {
scratch_space_numerator.reuse_allocation(&poly);
let intercept = setup_value;
let mut t0 = opening_point;
t0.sub_assign(&setup_point);
let mut slope = t0.inverse().expect("must exist");
let mut t1 = *value;
t1.sub_assign(&setup_value);
slope.mul_assign(&t1);
worker.scope(scratch_space_numerator.as_ref().len(), |scope, chunk| {
for (num, omega) in scratch_space_numerator.as_mut().chunks_mut(chunk).
zip(precomputed_bitreversed_coset_divisor.as_ref().chunks(chunk)) {
scope.spawn(move |_| {
for (n, omega) in num.iter_mut().zip(omega.iter()) {
let mut result = *omega;
result.sub_assign(&setup_point);
result.mul_assign(&slope);
result.add_assign(&intercept);
n.sub_assign(&result);
}
});
}
});
scratch_space_numerator.mul_assign(&worker, &scratch_space_denominator);
if aggregation_challenge != E::Fr::one() {
scratch_space_numerator.scale(&worker, alpha);
}
final_aggregate.add_assign(&worker, &scratch_space_numerator);
alpha.mul_assign(&aggregation_challenge);
}
}
let fri_combiner = FriCombiner::initialize_for_domain_size(
required_divisor_size,
LDE_FACTOR,
1,
E::Fr::multiplicative_generator(),
FRI_VALUES_PER_LEAF,
self.tree_hasher.clone(),
);
println!("Start making FRI oracles");
let oracles = fri_combiner.perform_fri_assuming_bitreversed(
&final_aggregate.as_ref(),
prng,
&worker
)?;
Ok(oracles)
}
pub(crate) fn fifth_step_from_fourth_step<P: Prng<E::Fr, Input = H::Output>>(
&self,
fourth_state: FourthPartialProverState<E, H>,
fourth_verifier_message: FourthVerifierMessage<E>,
setup: &SetupMultioracle<E, H>,
prng: &mut P,
worker: &Worker
) -> Result<FifthProverMessage<E, H>, SynthesisError>
{
let FourthVerifierMessage { z, v, .. } = fourth_verifier_message;
let required_domain_size = fourth_state.required_domain_size;
let domain = Domain::new_for_size(required_domain_size as u64)?;
let mut z_by_omega = z;
z_by_omega.mul_assign(&domain.generator);
// now we need to sort polynomials and gates by
// - first filter setup polynomials
// - each setup is opened separately at reference point and required point
// - then filter witness polys
// - open them at every required point
println!("Start making setup opening requests");
let mut setup_opening_requests = vec![];
// TODO: do better
{
let mut setup_values = vec![];
let mut setup_poly_refs = vec![];
for (i, _) in setup.setup_ids.iter().enumerate() {
setup_values.push(setup.setup_poly_values[i]);
setup_poly_refs.push(&setup.polynomial_ldes[i]);
}
let range_of_permutation_polys = setup.permutations_ranges[0].clone();
for (value, perm_ref) in setup.setup_poly_values[range_of_permutation_polys.clone()].iter()
.zip(setup.polynomial_ldes[range_of_permutation_polys].iter())
{
setup_values.push(*value);
setup_poly_refs.push(perm_ref);
}
for selector_poly_idx in setup.gate_selectors_indexes.iter() {
let poly_ref = &setup.polynomial_ldes[*selector_poly_idx];
let value = setup.setup_poly_values[*selector_poly_idx];
setup_values.push(value);
setup_poly_refs.push(poly_ref);
}
let mut opening_values = vec![];
opening_values.extend_from_slice(&fourth_state.setup_values_at_z[..]);
opening_values.extend_from_slice(&fourth_state.permutation_polynomials_at_z[..]);
opening_values.extend_from_slice(&fourth_state.gate_selector_polynomials_at_z[..]);
assert_eq!(setup_values.len(), opening_values.len(), "number of setup values is not equal to number of opening values");
let request = SetupOpeningRequest {
polynomials: setup_poly_refs,
setup_point: setup.setup_point,
setup_values: setup_values,
opening_point: z,
opening_values: opening_values
};
setup_opening_requests.push(request);
}
println!("Start making witness opening assignments");
let mut witness_opening_requests = vec![];
let opening_points = vec![z, z_by_omega];
let storages = vec![&fourth_state.wire_values_at_z, &fourth_state.wire_values_at_z_omega];
for dilation in 0usize..=1usize {
let mut per_dilation_set = vec![];
for gate in self.sorted_gates.iter() {
for constraint in gate.get_constraints().iter() {
for term in constraint.0.iter() {
for poly in term.1.iter() {
match poly {
PolynomialInConstraint::VariablesPolynomial(
poly_num, TimeDilation(dil)
) => {
if dil == &dilation {
if !per_dilation_set.contains(poly_num) {
per_dilation_set.push(*poly_num)
}
}
},
_ => {}
}
}
}
}
}
let mut opening_values = vec![];
let mut opening_refs = vec![];
let open_at = opening_points[dilation];
let storage = storages[dilation];
for id in per_dilation_set.into_iter() {
let poly_ref = &fourth_state.witness_polys_ldes[id];
let mut tmp: Vec<_> = storage.iter().filter(
|el| el.0 == id
).collect();
assert_eq!(tmp.len(), 1);
let value = tmp.pop().unwrap().1;
opening_values.push(value);
opening_refs.push(poly_ref);
}
if dilation == 0 {
opening_values.push(fourth_state.grand_product_at_z);
opening_refs.push(&fourth_state.grand_product_polynomial_lde[0]);
} else if dilation == 1 {
opening_values.push(fourth_state.grand_product_at_z_omega);
opening_refs.push(&fourth_state.grand_product_polynomial_lde[0]);
}
let request = WitnessOpeningRequest {
polynomials: opening_refs,
opening_point: open_at,
opening_values: opening_values
};
witness_opening_requests.push(request);
}
let fri_oracles_set = self.perform_fri(
v,
witness_opening_requests,
setup_opening_requests,
&worker,
prng
)?;
let commitments = fri_oracles_set.intermediate_roots.clone();
let coeffs = fri_oracles_set.final_coefficients.clone();
let message = FifthProverMessage {
fri_intermediate_roots: commitments,
final_coefficients: coeffs,
};
let num_queries = 32;
use super::multioracle::Multioracle;
{
let idx_start = 0;
let indexes: Vec<usize> = (idx_start..(idx_start+FRI_VALUES_PER_LEAF)).collect();
let setup_tree_params = setup.tree.params.clone();
let witness_tree_params = fourth_state.witness_multioracle_tree.params.clone();
let grand_product_tree_params = fourth_state.grand_product_polynomial_multioracle_tree.params.clone();
let t_poly_tree_params = fourth_state.t_poly_parts_multioracle_tree.params.clone();
let mut fri_subtrees_params = vec![];
for s in fri_oracles_set.intermediate_oracles.iter() {
fri_subtrees_params.push(s.params.clone());
}
let setup_query = setup.tree.produce_multiquery(
indexes.clone(),
setup.polynomial_ldes.len(),
&Multioracle::<E, H>::combine_leafs(
&setup.polynomial_ldes,
FRI_VALUES_PER_LEAF,
&worker,
)
);
let witness_query = fourth_state.witness_multioracle_tree.produce_multiquery(
indexes.clone(),
fourth_state.witness_polys_ldes.len(),
&Multioracle::<E, H>::combine_leafs(
&fourth_state.witness_polys_ldes,
FRI_VALUES_PER_LEAF,
&worker,
)
);
let grand_product_query = fourth_state.grand_product_polynomial_multioracle_tree.produce_multiquery(
indexes.clone(),
fourth_state.grand_product_polynomial_lde.len(),
&Multioracle::<E, H>::combine_leafs(
&fourth_state.grand_product_polynomial_lde,
FRI_VALUES_PER_LEAF,
&worker,
)
);
let quotient_query = fourth_state.t_poly_parts_multioracle_tree.produce_multiquery(
indexes.clone(),
fourth_state.t_poly_parts_ldes.len(),
&Multioracle::<E, H>::combine_leafs(
&fourth_state.t_poly_parts_ldes,
FRI_VALUES_PER_LEAF,
&worker,
)
);
let mut fri_queries = vec![];
for ((vals, tree), params) in fri_oracles_set.intermediate_leaf_values.iter()
.zip(fri_oracles_set.intermediate_oracles.iter())
.zip(fri_subtrees_params.iter())
{
let idx_start = 0;
let indexes: Vec<usize> = (idx_start..(idx_start + params.values_per_leaf)).collect();
let query = tree.produce_query(
indexes,
&vals
);
fri_queries.push(query);
}
let hasher = setup.tree.tree_hasher.clone();
let _ = BinaryTree::verify_multiquery(
&setup.tree.get_commitment(),
&setup_query,
&setup_tree_params,
&hasher
);
let _ = BinaryTree::verify_multiquery(
&setup.tree.get_commitment(),
&witness_query,
&witness_tree_params,
&hasher
);
let _ = BinaryTree::verify_multiquery(
&setup.tree.get_commitment(),
&grand_product_query,
&grand_product_tree_params,
&hasher
);
let _ = BinaryTree::verify_multiquery(
&setup.tree.get_commitment(),
"ient_query,
&t_poly_tree_params,
&hasher
);
for (query, params) in fri_queries.into_iter()
.zip(fri_subtrees_params.iter()) {
let _ = BinaryTree::verify_query(
&setup.tree.get_commitment(),
&query,
¶ms,
&hasher
);
}
}
Ok(message)
}
}
<file_sep>/run_diff_windows_nightly.sh
#!/bin/sh
RUSTFLAGS="-C target-cpu=native -C target_feature=+bmi2,+adx,+sse4.1" cargo +nightly test --release --features "asm" -- --ignored --nocapture test_large_data_different_windows_multiexp
<file_sep>/src/gm17/mod.rs
use crate::pairing::{
Engine,
CurveAffine,
EncodedPoint
};
use crate::{
SynthesisError
};
use crate::source::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
#[cfg(test)]
mod tests;
mod generator;
// mod prover;
// mod verifier;
pub use self::generator::*;
// pub use self::prover::*;
// pub use self::verifier::*;
#[derive(Debug, Clone)]
pub struct Proof<E: Engine> {
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Self) -> bool {
self.a == other.a &&
self.b == other.b &&
self.c == other.c
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.a.into_compressed().as_ref())?;
writer.write_all(self.b.into_compressed().as_ref())?;
writer.write_all(self.c.into_compressed().as_ref())?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Compressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Compressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let a = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g2_repr.as_mut())?;
let b = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let c = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
Ok(Proof {
a: a,
b: b,
c: c
})
}
}
#[derive(Clone)]
pub struct VerifyingKey<E: Engine> {
pub h_g2: E::G2Affine,
// alpha in g1 for verifying and for creating A/C elements of
// proof. Never the point at infinity.
pub alpha_g1: E::G1Affine,
// beta in g2 for verifying. Never the point at infinity.
pub beta_g2: E::G2Affine,
// gamma in g1 for verifying. Never the point at infinity.
pub gamma_g1: E::G1Affine,
// gamma in g2 for verifying. Never the point at infinity.
pub gamma_g2: E::G2Affine,
// Elements of the form G^{gamma * A_i(t) + (alpha + beta) * A_i(t)}
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
pub ic: Vec<E::G1Affine>
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &Self) -> bool {
self.h_g2 == other.h_g2 &&
self.alpha_g1 == other.alpha_g1 &&
self.beta_g2 == other.beta_g2 &&
self.gamma_g1 == other.gamma_g1 &&
self.gamma_g2 == other.gamma_g2 &&
self.ic == other.ic
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.h_g2.into_uncompressed().as_ref())?;
writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?;
writer.write_all(self.beta_g2.into_uncompressed().as_ref())?;
writer.write_all(self.gamma_g1.into_uncompressed().as_ref())?;
writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.ic.len() as u32)?;
for ic in &self.ic {
writer.write_all(ic.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g2_repr.as_mut())?;
let h_h2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let gamma_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let ic_len = reader.read_u32::<BigEndian>()? as usize;
let mut ic = vec![];
for _ in 0..ic_len {
reader.read_exact(g1_repr.as_mut())?;
let g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
ic.push(g1);
}
Ok(VerifyingKey {
h_g2: h_h2,
alpha_g1: alpha_g1,
beta_g2: beta_g2,
gamma_g1: gamma_g1,
gamma_g2: gamma_g2,
ic: ic
})
}
}
#[derive(Clone)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
pub a_g1: Arc<Vec<E::G1Affine>>,
pub a_g2: Arc<Vec<E::G2Affine>>,
pub c_1_g1: Arc<Vec<E::G1Affine>>,
pub c_2_g1: Arc<Vec<E::G1Affine>>,
pub gamma_z: E::G1Affine,
pub gamma_z_g2: E::G2Affine,
pub ab_gamma_z_g1: E::G1Affine,
pub gamma2_z2_g1: E::G1Affine,
pub gamma2_z_t: Arc<Vec<E::G1Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Self) -> bool {
self.vk == other.vk &&
self.a_g1 == other.a_g1 &&
self.a_g2 == other.a_g2 &&
self.c_1_g1 == other.c_1_g1 &&
self.c_2_g1 == other.c_2_g1 &&
self.gamma_z == other.gamma_z &&
self.gamma_z_g2 == other.gamma_z_g2 &&
self.ab_gamma_z_g1 == other.ab_gamma_z_g1 &&
self.gamma2_z2_g1 == other.gamma2_z2_g1 &&
self.gamma2_z_t == other.gamma2_z_t
}
}
// impl<E: Engine> Parameters<E> {
// pub fn write<W: Write>(
// &self,
// mut writer: W
// ) -> io::Result<()>
// {
// self.vk.write(&mut writer)?;
// writer.write_u32::<BigEndian>(self.h.len() as u32)?;
// for g in &self.h[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.l.len() as u32)?;
// for g in &self.l[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.a.len() as u32)?;
// for g in &self.a[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.b_g1.len() as u32)?;
// for g in &self.b_g1[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.b_g2.len() as u32)?;
// for g in &self.b_g2[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// Ok(())
// }
// pub fn read<R: Read>(
// mut reader: R,
// checked: bool
// ) -> io::Result<Self>
// {
// let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
// let mut repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
// reader.read_exact(repr.as_mut())?;
// if checked {
// repr
// .into_affine()
// } else {
// repr
// .into_affine_unchecked()
// }
// .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
// .and_then(|e| if e.is_zero() {
// Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
// } else {
// Ok(e)
// })
// };
// let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
// let mut repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
// reader.read_exact(repr.as_mut())?;
// if checked {
// repr
// .into_affine()
// } else {
// repr
// .into_affine_unchecked()
// }
// .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
// .and_then(|e| if e.is_zero() {
// Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
// } else {
// Ok(e)
// })
// };
// let vk = VerifyingKey::<E>::read(&mut reader)?;
// let mut h = vec![];
// let mut l = vec![];
// let mut a = vec![];
// let mut b_g1 = vec![];
// let mut b_g2 = vec![];
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// h.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// l.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// a.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// b_g1.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// b_g2.push(read_g2(&mut reader)?);
// }
// }
// Ok(Parameters {
// vk: vk,
// h: Arc::new(h),
// l: Arc::new(l),
// a: Arc::new(a),
// b_g1: Arc::new(b_g1),
// b_g2: Arc::new(b_g2)
// })
// }
// }
// pub struct PreparedVerifyingKey<E: Engine> {
// /// Pairing result of alpha*beta
// alpha_g1_beta_g2: E::Fqk,
// /// -gamma in G2
// neg_gamma_g2: <E::G2Affine as CurveAffine>::Prepared,
// /// -delta in G2
// neg_delta_g2: <E::G2Affine as CurveAffine>::Prepared,
// /// Copy of IC from `VerifiyingKey`.
// ic: Vec<E::G1Affine>
// }
// pub trait ParameterSource<E: Engine> {
// type G1Builder: SourceBuilder<E::G1Affine>;
// type G2Builder: SourceBuilder<E::G2Affine>;
// fn get_vk(
// &mut self,
// num_ic: usize
// ) -> Result<VerifyingKey<E>, SynthesisError>;
// fn get_h(
// &mut self,
// num_h: usize
// ) -> Result<Self::G1Builder, SynthesisError>;
// fn get_l(
// &mut self,
// num_l: usize
// ) -> Result<Self::G1Builder, SynthesisError>;
// fn get_a(
// &mut self,
// num_inputs: usize,
// num_aux: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
// fn get_b_g1(
// &mut self,
// num_inputs: usize,
// num_aux: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
// fn get_b_g2(
// &mut self,
// num_inputs: usize,
// num_aux: usize
// ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>;
// }
// impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
// type G1Builder = (Arc<Vec<E::G1Affine>>, usize);
// type G2Builder = (Arc<Vec<E::G2Affine>>, usize);
// fn get_vk(
// &mut self,
// _: usize
// ) -> Result<VerifyingKey<E>, SynthesisError>
// {
// Ok(self.vk.clone())
// }
// fn get_h(
// &mut self,
// _: usize
// ) -> Result<Self::G1Builder, SynthesisError>
// {
// Ok((self.h.clone(), 0))
// }
// fn get_l(
// &mut self,
// _: usize
// ) -> Result<Self::G1Builder, SynthesisError>
// {
// Ok((self.l.clone(), 0))
// }
// fn get_a(
// &mut self,
// num_inputs: usize,
// _: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
// {
// Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs)))
// }
// fn get_b_g1(
// &mut self,
// num_inputs: usize,
// _: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
// {
// Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs)))
// }
// fn get_b_g2(
// &mut self,
// num_inputs: usize,
// _: usize
// ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>
// {
// Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs)))
// }
// }
// #[cfg(test)]
// mod test_with_bls12_381 {
// use super::*;
// use {Circuit, SynthesisError, ConstraintSystem};
// use rand::{Rand, thread_rng};
// use crate::pairing::ff::{Field};
// use crate::pairing::bls12_381::{Bls12, Fr};
// #[test]
// fn serialization() {
// struct MySillyCircuit<E: Engine> {
// a: Option<E::Fr>,
// b: Option<E::Fr>
// }
// impl<E: Engine> Circuit<E> for MySillyCircuit<E> {
// fn synthesize<CS: ConstraintSystem<E>>(
// self,
// cs: &mut CS
// ) -> Result<(), SynthesisError>
// {
// let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
// let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
// let c = cs.alloc_input(|| "c", || {
// let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
// let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
// a.mul_assign(&b);
// Ok(a)
// })?;
// cs.enforce(
// || "a*b=c",
// |lc| lc + a,
// |lc| lc + b,
// |lc| lc + c
// );
// Ok(())
// }
// }
// let rng = &mut thread_rng();
// let params = generate_random_parameters::<Bls12, _, _>(
// MySillyCircuit { a: None, b: None },
// rng
// ).unwrap();
// {
// let mut v = vec![];
// params.write(&mut v).unwrap();
// assert_eq!(v.len(), 2136);
// let de_params = Parameters::read(&v[..], true).unwrap();
// assert!(params == de_params);
// let de_params = Parameters::read(&v[..], false).unwrap();
// assert!(params == de_params);
// }
// let pvk = prepare_verifying_key::<Bls12>(¶ms.vk);
// for _ in 0..100 {
// let a = Fr::rand(rng);
// let b = Fr::rand(rng);
// let mut c = a;
// c.mul_assign(&b);
// let proof = create_random_proof(
// MySillyCircuit {
// a: Some(a),
// b: Some(b)
// },
// ¶ms,
// rng
// ).unwrap();
// let mut v = vec![];
// proof.write(&mut v).unwrap();
// assert_eq!(v.len(), 192);
// let de_proof = Proof::read(&v[..]).unwrap();
// assert!(proof == de_proof);
// assert!(verify_proof(&pvk, &proof, &[c]).unwrap());
// assert!(!verify_proof(&pvk, &proof, &[a]).unwrap());
// }
// }
// }<file_sep>/src/sonic/helped/helper.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::Parameters;
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
use crate::sonic::sonic::CountNandQ;
#[derive(Clone)]
pub struct Aggregate<E: Engine> {
// Commitment to s(z, Y)
pub c: E::G1Affine,
// We have to open each of the S commitments to a random point `z`
pub s_opening: E::G1Affine,
// We have to open C to each constituent `y`
pub c_openings: Vec<(E::G1Affine, E::Fr)>,
// Then we have to finally open C
pub opening: E::G1Affine,
pub z: E::Fr,
pub w: E::Fr,
}
pub fn create_aggregate<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
params: &Parameters<E>,
) -> Aggregate<E>
{
let n = params.vk.n;
let q = params.vk.q;
create_aggregate_on_srs_using_information::<E, C, S>(circuit, inputs, ¶ms.srs, n, q)
}
pub fn create_aggregate_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
srs: &SRS<E>,
) -> Aggregate<E>
{
// TODO: precompute this?
let (n, q) = {
let mut tmp = CountNandQ::<S>::new();
S::synthesize(&mut tmp, circuit).unwrap(); // TODO
(tmp.n, tmp.q)
};
create_aggregate_on_srs_using_information::<E, C, S>(circuit, inputs, srs, n, q)
}
pub fn create_aggregate_on_srs_using_information<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
srs: &SRS<E>,
n: usize,
q: usize,
) -> Aggregate<E>
{
let mut transcript = Transcript::new(&[]);
let mut y_values: Vec<E::Fr> = Vec::with_capacity(inputs.len());
for &(ref proof, ref sxyadvice) in inputs {
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y_values.push(transcript.get_challenge_scalar());
}
transcript.commit_point(&sxyadvice.s);
}
let z: E::Fr = transcript.get_challenge_scalar();
// Compute s(z, Y)
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SyEval::new(z, n, q);
S::synthesize(&mut tmp, circuit).unwrap(); // TODO
tmp.poly()
};
// Compute C = g^{s(z, x)}
let c = multiexp(
srs.g_positive_x_alpha[0..(n + q)]
.iter()
.chain_ext(srs.g_negative_x_alpha[0..n].iter()),
s_poly_positive.iter().chain_ext(s_poly_negative.iter())
).into_affine();
transcript.commit_point(&c);
// Open C at w
let w: E::Fr = transcript.get_challenge_scalar();
let value = compute_value::<E>(&w, &s_poly_positive, &s_poly_negative);
let opening = {
let mut value = value;
value.negate();
polynomial_commitment_opening(
n,
0,
s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()),
w,
&srs
)
};
// Let's open up C to every y.
fn compute_value<E: Engine>(y: &E::Fr, poly_positive: &[E::Fr], poly_negative: &[E::Fr]) -> E::Fr {
let mut value = E::Fr::zero();
let yinv = y.inverse().unwrap(); // TODO
let positive_powers_contrib = evaluate_at_consequitive_powers(poly_positive, *y, *y);
let negative_powers_contrib = evaluate_at_consequitive_powers(poly_negative, yinv, yinv);
value.add_assign(&positive_powers_contrib);
value.add_assign(&negative_powers_contrib);
value
}
use std::time::Instant;
let start = Instant::now();
let mut c_openings = vec![];
for y in &y_values {
let value = compute_value::<E>(y, &s_poly_positive, &s_poly_negative);
let opening = {
let mut value = value;
value.negate();
polynomial_commitment_opening(
n,
0,
s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()),
*y,
&srs
)
};
c_openings.push((opening, value));
}
println!("Evaluation of s(z, Y) taken {:?}", start.elapsed());
// Okay, great. Now we need to open up each S at the same point z to the same value.
// Since we're opening up all the S's at the same point, we create a bunch of random
// challenges instead and open up a random linear combination.
let mut poly_negative = vec![E::Fr::zero(); n];
let mut poly_positive = vec![E::Fr::zero(); 2*n];
let mut expected_value = E::Fr::zero();
// TODO: this part can be further parallelized due to synthesis of S(X, y) being singlethreaded
let start = Instant::now();
for (y, c_opening) in y_values.iter().zip(c_openings.iter()) {
// Compute s(X, y_i)
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(*y, n);
S::synthesize(&mut tmp, circuit).unwrap(); // TODO
tmp.poly()
};
let mut value = c_opening.1;
let r: E::Fr = transcript.get_challenge_scalar();
value.mul_assign(&r);
expected_value.add_assign(&value);
mul_add_polynomials(& mut poly_negative[..], &s_poly_negative[..], r);
mul_add_polynomials(& mut poly_positive[..], &s_poly_positive[..], r);
}
println!("Re-evaluation of {} S polynomials taken {:?}", y_values.len(), start.elapsed());
let s_opening = {
let mut value = expected_value;
value.negate();
polynomial_commitment_opening(
n,
0,
poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(poly_positive.iter()),
z,
&srs
)
};
Aggregate {
// Commitment to s(z, Y)
c,
// We have to open each of the S commitments to a random point `z`
s_opening,
// We have to open C to each constituent `y`
c_openings,
// Then we have to finally open C
opening,
z: z,
w: w
}
}<file_sep>/src/plonk/better_better_cs/gadgets/assignment.rs
use crate::{
SynthesisError
};
pub trait Assignment<T> {
fn get(&self) -> Result<&T, SynthesisError>;
fn grab(self) -> Result<T, SynthesisError>;
}
impl<T: Clone> Assignment<T> for Option<T> {
fn get(&self) -> Result<&T, SynthesisError> {
match self {
Some(v) => Ok(v),
None => Err(SynthesisError::AssignmentMissing)
}
}
fn grab(self) -> Result<T, SynthesisError> {
match self {
Some(v) => Ok(v),
None => Err(SynthesisError::AssignmentMissing)
}
}
}<file_sep>/src/sonic/mod.rs
pub use crate::{SynthesisError};
pub mod sonic;
pub mod srs;
pub mod util;
pub mod helped;
pub mod cs;
pub mod unhelped;
mod transcript;
#[cfg(test)]
mod tests;
<file_sep>/src/plonk/plonk/generator.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
use crate::worker::*;
use crate::plonk::polynomials::*;
use crate::plonk::domains::*;
use crate::plonk::commitments::*;
use crate::plonk::utils::*;
use super::prover::ProvingAssembly;
#[derive(Debug)]
pub struct GeneratorAssembly<E: Engine> {
pub(crate) m: usize,
pub(crate) n: usize,
pub(crate) input_gates: Vec<Gate<E::Fr>>,
pub(crate) aux_gates: Vec<Gate<E::Fr>>,
pub(crate) num_inputs: usize,
pub(crate) num_aux: usize,
pub(crate) inputs_map: Vec<usize>,
pub(crate) is_finalized: bool,
}
impl<E: Engine> ConstraintSystem<E> for GeneratorAssembly<E> {
// const ZERO: Variable = Variable(Index::Aux(1));
// const ONE: Variable = Variable(Index::Aux(2));
// allocate a variable
fn alloc<F>(&mut self, _value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
self.num_aux += 1;
let index = self.num_aux;
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, _value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
self.num_inputs += 1;
let index = self.num_inputs;
let input_var = Variable(Index::Input(index));
let gate = Gate::<E::Fr>::new_enforce_constant_gate(input_var, Some(E::Fr::zero()), self.dummy_variable());
self.input_gates.push(gate);
Ok(input_var)
}
// enforce variable as boolean
fn enforce_boolean(&mut self, variable: Variable) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_enforce_boolean_gate(variable, self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate an abstract gate
fn new_gate(&mut self, variables: (Variable, Variable, Variable),
coeffs:(E::Fr, E::Fr, E::Fr, E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a constant
fn enforce_constant(&mut self, variable: Variable, constant: E::Fr) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_constant_gate(variable, Some(constant), self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_2(&mut self, variables: (Variable, Variable)) -> Result<(), SynthesisError> {
// q_l, q_r, q_o, q_c = 0, q_m = 1
let (v_0, v_1) = variables;
let zero = E::Fr::zero();
let one = E::Fr::one();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (zero, zero, zero, one, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_3(&mut self, variables: (Variable, Variable, Variable)) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_multiplication_gate(variables);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_2(&mut self, variables: (Variable, Variable), coeffs:(E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let (v_0, v_1) = variables;
let (c_0, c_1) = coeffs;
let zero = E::Fr::zero();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (c_0, c_1, zero, zero, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_3(&mut self, variables: (Variable, Variable, Variable), coeffs:(E::Fr, E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_zero_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
fn get_dummy_variable(&self) -> Variable {
self.dummy_variable()
}
}
impl<E: Engine> GeneratorAssembly<E> {
fn new_empty_gate(&mut self) -> usize {
self.n += 1;
let index = self.n;
self.aux_gates.push(Gate::<E::Fr>::empty());
index
}
fn set_gate(&mut self, gate: Gate<E::Fr>, index: usize) {
self.aux_gates[index-1] = gate;
}
pub fn new() -> Self {
let mut tmp = Self {
n: 0,
m: 0,
input_gates: vec![],
aux_gates: vec![],
num_inputs: 0,
num_aux: 0,
inputs_map: vec![],
is_finalized: false,
};
let zero = tmp.alloc(|| Ok(E::Fr::zero())).expect("should have no issues");
tmp.enforce_constant(zero, E::Fr::zero()).expect("should have no issues");
// let one = tmp.alloc(|| Ok(E::Fr::one())).expect("should have no issues");
// tmp.enforce_constant(one, E::Fr::one()).expect("should have no issues");
// match (zero, <Self as ConstraintSystem<E>>::ZERO) {
// (Variable(Index::Aux(1)), Variable(Index::Aux(1))) => {},
// _ => panic!("zero variable is incorrect")
// }
// match (one, <Self as ConstraintSystem<E>>::ONE) {
// (Variable(Index::Aux(2)), Variable(Index::Aux(2))) => {},
// _ => panic!("one variable is incorrect")
// }
match (tmp.dummy_variable(), zero) {
(Variable(Index::Aux(1)), Variable(Index::Aux(1))) => {},
_ => panic!("zero variable is incorrect")
}
assert_eq!(tmp.num_inputs, 0);
assert_eq!(tmp.num_aux, 1);
tmp
}
// return variable that is not in a constraint formally, but has some value
fn dummy_variable(&self) -> Variable {
// <Self as ConstraintSystem<E>>::ZERO
Variable(Index::Aux(1))
}
pub(crate) fn make_circuit_description_polynomials(&self, worker: &Worker) -> Result<(
Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>,
Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>
), SynthesisError> {
assert!(self.is_finalized);
let total_num_gates = self.input_gates.len() + self.aux_gates.len();
let mut q_l = vec![E::Fr::zero(); total_num_gates];
let mut q_r = vec![E::Fr::zero(); total_num_gates];
let mut q_o = vec![E::Fr::zero(); total_num_gates];
let mut q_m = vec![E::Fr::zero(); total_num_gates];
let mut q_c = vec![E::Fr::zero(); total_num_gates];
fn coeff_into_field_element<F: PrimeField>(coeff: &Coeff<F>) -> F {
match coeff {
Coeff::Zero => {
F::zero()
},
Coeff::One => {
F::one()
},
Coeff::NegativeOne => {
let mut tmp = F::one();
tmp.negate();
tmp
},
Coeff::Full(c) => {
*c
},
}
}
// expect a small number of inputs
for (((((gate, q_l), q_r), q_o), q_m), q_c) in self.input_gates.iter()
.zip(q_l.iter_mut())
.zip(q_r.iter_mut())
.zip(q_o.iter_mut())
.zip(q_m.iter_mut())
.zip(q_c.iter_mut())
{
*q_l = coeff_into_field_element::<E::Fr>(&gate.q_l);
*q_r = coeff_into_field_element::<E::Fr>(&gate.q_r);
*q_o = coeff_into_field_element::<E::Fr>(&gate.q_o);
*q_m = coeff_into_field_element::<E::Fr>(&gate.q_m);
*q_c = coeff_into_field_element::<E::Fr>(&gate.q_c);
}
let num_input_gates = self.input_gates.len();
let q_l_aux = &mut q_l[num_input_gates..];
let q_r_aux = &mut q_r[num_input_gates..];
let q_o_aux = &mut q_o[num_input_gates..];
let q_m_aux = &mut q_m[num_input_gates..];
let q_c_aux = &mut q_c[num_input_gates..];
debug_assert!(self.aux_gates.len() == q_l_aux.len());
worker.scope(self.aux_gates.len(), |scope, chunk| {
for (((((gate, q_l), q_r), q_o), q_m), q_c) in self.aux_gates.chunks(chunk)
.zip(q_l_aux.chunks_mut(chunk))
.zip(q_r_aux.chunks_mut(chunk))
.zip(q_o_aux.chunks_mut(chunk))
.zip(q_m_aux.chunks_mut(chunk))
.zip(q_c_aux.chunks_mut(chunk))
{
scope.spawn(move |_| {
for (((((gate, q_l), q_r), q_o), q_m), q_c) in gate.iter()
.zip(q_l.iter_mut())
.zip(q_r.iter_mut())
.zip(q_o.iter_mut())
.zip(q_m.iter_mut())
.zip(q_c.iter_mut())
{
*q_l = coeff_into_field_element(&gate.q_l);
*q_r = coeff_into_field_element(&gate.q_r);
*q_o = coeff_into_field_element(&gate.q_o);
*q_m = coeff_into_field_element(&gate.q_m);
*q_c = coeff_into_field_element(&gate.q_c);
}
});
}
});
let q_l = Polynomial::from_values(q_l)?;
let q_r = Polynomial::from_values(q_r)?;
let q_o = Polynomial::from_values(q_o)?;
let q_m = Polynomial::from_values(q_m)?;
let q_c = Polynomial::from_values(q_c)?;
Ok((q_l, q_r, q_o, q_m, q_c))
}
pub(crate) fn calculate_permutations_as_in_a_paper(&self) -> (Vec<usize>, Vec<usize>, Vec<usize>) {
assert!(self.is_finalized);
let num_gates = self.input_gates.len() + self.aux_gates.len();
let num_partitions = self.num_inputs + self.num_aux;
let num_inputs = self.num_inputs;
// in the partition number i there is a set of indexes in V = (a, b, c) such that V_j = i
let mut partitions = vec![vec![]; num_partitions + 1];
for (j, gate) in self.input_gates.iter().chain(&self.aux_gates).enumerate()
{
match gate.a_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j+1);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j+1);
}
},
}
match gate.b_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j + 1 + num_gates);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j + 1 + num_gates);
}
},
}
match gate.c_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j + 1 + 2*num_gates);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j + 1 + 2*num_gates);
}
},
}
}
let mut sigma_1: Vec<_> = (1..=num_gates).collect();
let mut sigma_2: Vec<_> = ((num_gates+1)..=(2*num_gates)).collect();
let mut sigma_3: Vec<_> = ((2*num_gates + 1)..=(3*num_gates)).collect();
let mut permutations = vec![vec![]; num_partitions + 1];
fn rotate(mut vec: Vec<usize>) -> Vec<usize> {
if vec.len() > 0 {
let els: Vec<_> = vec.drain(0..1).collect();
vec.push(els[0]);
}
vec
}
for (i, partition) in partitions.into_iter().enumerate().skip(1) {
// copy-permutation should have a cycle around the partition
let permutation = rotate(partition.clone());
permutations[i] = permutation.clone();
for (original, new) in partition.into_iter()
.zip(permutation.into_iter())
{
if original <= num_gates {
debug_assert!(sigma_1[original - 1] == original);
sigma_1[original - 1] = new;
} else if original <= 2*num_gates {
debug_assert!(sigma_2[original - num_gates - 1] == original);
sigma_2[original - num_gates - 1] = new;
} else {
debug_assert!(sigma_3[original - 2*num_gates - 1] == original);
sigma_3[original - 2*num_gates - 1] = new;
}
}
}
(sigma_1, sigma_2, sigma_3)
}
fn make_s_id(&self) -> Vec<usize> {
assert!(self.is_finalized);
let size = self.input_gates.len() + self.aux_gates.len();
let result: Vec<_> = (1..=size).collect();
result
}
pub(crate) fn output_setup_polynomials(&self, worker: &Worker) -> Result<
(
Polynomial::<E::Fr, Coefficients>, // q_l
Polynomial::<E::Fr, Coefficients>, // q_r
Polynomial::<E::Fr, Coefficients>, // q_o
Polynomial::<E::Fr, Coefficients>, // q_m
Polynomial::<E::Fr, Coefficients>, // q_c
Polynomial::<E::Fr, Coefficients>, // s_id
Polynomial::<E::Fr, Coefficients>, // sigma_1
Polynomial::<E::Fr, Coefficients>, // sigma_2
Polynomial::<E::Fr, Coefficients>, // sigma_3
), SynthesisError>
{
assert!(self.is_finalized);
let s_id = self.make_s_id();
let (sigma_1, sigma_2, sigma_3) = self.calculate_permutations_as_in_a_paper();
let s_id = convert_to_field_elements::<E::Fr>(&s_id, &worker);
let sigma_1 = convert_to_field_elements::<E::Fr>(&sigma_1, &worker);
let sigma_2 = convert_to_field_elements::<E::Fr>(&sigma_2, &worker);
let sigma_3 = convert_to_field_elements::<E::Fr>(&sigma_3, &worker);
let s_id = Polynomial::from_values(s_id)?;
let sigma_1 = Polynomial::from_values(sigma_1)?;
let sigma_2 = Polynomial::from_values(sigma_2)?;
let sigma_3 = Polynomial::from_values(sigma_3)?;
let (q_l, q_r, q_o, q_m, q_c) = self.make_circuit_description_polynomials(&worker)?;
let s_id = s_id.ifft(&worker);
let sigma_1 = sigma_1.ifft(&worker);
let sigma_2 = sigma_2.ifft(&worker);
let sigma_3 = sigma_3.ifft(&worker);
let q_l = q_l.ifft(&worker);
let q_r = q_r.ifft(&worker);
let q_o = q_o.ifft(&worker);
let q_m = q_m.ifft(&worker);
let q_c = q_c.ifft(&worker);
Ok((q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3))
}
pub fn num_gates(&self) -> usize {
self.input_gates.len() + self.aux_gates.len()
}
pub fn finalize(&mut self) {
if self.is_finalized {
return;
}
let n = self.input_gates.len() + self.aux_gates.len();
if (n+1).is_power_of_two() {
self.is_finalized = true;
return;
}
let empty_gate = Gate::<E::Fr>::new_empty_gate(self.dummy_variable());
let new_aux_len = (n+1).next_power_of_two() - 1 - self.input_gates.len();
self.aux_gates.resize(new_aux_len, empty_gate);
let n = self.input_gates.len() + self.aux_gates.len();
assert!((n+1).is_power_of_two());
self.is_finalized = true;
}
}
use super::prover::*;
use crate::plonk::fft::cooley_tukey_ntt::CTPrecomputations;
use crate::pairing::CurveAffine;
pub fn setup_with_precomputations<E: Engine, C: Circuit<E>, CP: CTPrecomputations<E::Fr>>(
circuit: &C,
omegas_bitreversed: &CP,
bases: &[E::G1Affine]
) -> Result<(PlonkSetup<E>, PlonkSetupPrecomputation<E>), SynthesisError>
{
let mut assembly = GeneratorAssembly::<E>::new();
circuit.synthesize(&mut assembly)?;
assembly.finalize();
let n = assembly.num_gates();
let worker = Worker::new();
let (q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3) = assembly.output_setup_polynomials(&worker)?;
let q_l_commitment_data = ProvingAssembly::<E>::commit_single_poly(&q_l, &bases, &worker)?;
let q_r_commitment_data = ProvingAssembly::<E>::commit_single_poly(&q_r, &bases, &worker)?;
let q_o_commitment_data = ProvingAssembly::<E>::commit_single_poly(&q_o, &bases, &worker)?;
let q_m_commitment_data = ProvingAssembly::<E>::commit_single_poly(&q_m, &bases, &worker)?;
let q_c_commitment_data = ProvingAssembly::<E>::commit_single_poly(&q_c, &bases, &worker)?;
let s_id_commitment_data = ProvingAssembly::<E>::commit_single_poly(&s_id, &bases, &worker)?;
let sigma_1_commitment_data = ProvingAssembly::<E>::commit_single_poly(&sigma_1, &bases, &worker)?;
let sigma_2_commitment_data = ProvingAssembly::<E>::commit_single_poly(&sigma_2, &bases, &worker)?;
let sigma_3_commitment_data = ProvingAssembly::<E>::commit_single_poly(&sigma_3, &bases, &worker)?;
let setup = PlonkSetup::<E> {
n: n,
q_l: q_l_commitment_data,
q_r: q_r_commitment_data,
q_o: q_o_commitment_data,
q_m: q_m_commitment_data,
q_c: q_c_commitment_data,
s_id: s_id_commitment_data,
sigma_1: sigma_1_commitment_data,
sigma_2: sigma_2_commitment_data,
sigma_3: sigma_3_commitment_data,
};
let coset_generator = E::Fr::multiplicative_generator();
let q_l_lde = q_l.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let q_r_lde = q_r.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let q_o_lde = q_o.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let q_m_lde = q_m.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let q_c_lde = q_c.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let s_id_lde = s_id.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let sigma_1_lde = sigma_1.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let sigma_2_lde = sigma_2.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let sigma_3_lde = sigma_3.bitreversed_lde_using_bitreversed_ntt(&worker, 4, omegas_bitreversed, &coset_generator)?;
let precomputation = PlonkSetupPrecomputation::<E> {
q_l_aux: q_l_lde,
q_r_aux: q_r_lde,
q_o_aux: q_o_lde,
q_m_aux: q_m_lde,
q_c_aux: q_c_lde,
s_id_aux: s_id_lde,
sigma_1_aux: sigma_1_lde,
sigma_2_aux: sigma_2_lde,
sigma_3_aux: sigma_3_lde
};
Ok((setup, precomputation))
}<file_sep>/src/marlin/generator.rs
use crate::log::Stopwatch;
use rand::Rng;
use std::sync::Arc;
use std::collections::HashMap;
use crate::pairing::{
Engine,
Wnaf,
CurveProjective,
CurveAffine
};
use crate::pairing::ff::{
PrimeField,
Field
};
use super::{
IndexedSetup,
};
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use crate::domain::{
EvaluationDomain,
Scalar
};
use crate::worker::{
Worker
};
use crate::plonk::polynomials::*;
/// This is our assembly structure that we'll use to synthesize the
/// circuit into to perform indexing
struct KeypairAssembly<E: Engine> {
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
num_non_zero_in_a: usize,
num_non_zero_in_b: usize,
num_non_zero_in_c: usize,
// these are indexed by variable, and then by index in certain constraint
a_rows: Vec<LinearCombination<E>>,
b_rows: Vec<LinearCombination<E>>,
c_rows: Vec<LinearCombination<E>>,
deduplication_scratch: HashMap<Variable, E::Fr>,
}
impl<E: Engine> KeypairAssembly<E> {
fn pad_to_square(&mut self) -> Result<(), SynthesisError> {
let size = if self.num_inputs + self.num_aux >= self.num_constraints {
self.num_inputs + self.num_aux
} else {
self.num_constraints
};
self.pad_square_to_size(size)
}
fn pad_square_to_size(&mut self, size: usize) -> Result<(), SynthesisError> {
for _ in (self.num_inputs + self.num_aux)..size {
self.alloc(|| "", || {
Ok(E::Fr::one())
})?;
}
self.a_rows.resize(size, LinearCombination::zero());
self.b_rows.resize(size, LinearCombination::zero());
self.c_rows.resize(size, LinearCombination::zero());
self.num_constraints = size;
Ok(())
}
fn into_indexer_input(self, _worker: &Worker) ->
Result<(usize, usize, (Vec<Vec<(usize, E::Fr)>>, Vec<Vec<(usize, E::Fr)>> , Vec<Vec<(usize, E::Fr)>>)), SynthesisError>
{
let domain_h_size = self.num_inputs + self.num_aux;
let domain_h_size = domain_h_size.next_power_of_two();
let domain_k_size = *[self.num_non_zero_in_a, self.num_non_zero_in_b, self.num_non_zero_in_c].iter().max().expect("must exist");
let domain_k_size = domain_k_size.next_power_of_two();
fn into_sparse_matrix<E: Engine>(
constraints: Vec<LinearCombination<E>>,
num_inputs: usize)
-> Vec<Vec<(usize, E::Fr)>> {
let mut result = Vec::with_capacity(constraints.len());
for row in constraints.into_iter() {
let mut new = Vec::with_capacity(row.0.len());
for (var, coeff) in row.0.into_iter() {
match var {
Variable(Index::Input(i)) => {
new.push((i, coeff));
},
Variable(Index::Aux(i)) => {
new.push((i+num_inputs, coeff));
}
}
}
result.push(new);
}
result
}
let num_inputs = self.num_inputs;
let a_matrix = into_sparse_matrix(self.a_rows, num_inputs);
let b_matrix = into_sparse_matrix(self.b_rows, num_inputs);
let c_matrix = into_sparse_matrix(self.c_rows, num_inputs);
Ok((
domain_h_size,
domain_k_size,
(a_matrix, b_matrix, c_matrix)
))
}
}
use crate::plonk::domains::Domain;
pub(crate) fn materialize_domain_elements<F: PrimeField>(domain: &Domain<F>, worker: &Worker) -> Vec<F> {
let mut values = vec![F::zero(); domain.size as usize];
let generator = domain.generator;
worker.scope(values.len(), |scope, chunk| {
for (i, values) in values.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = generator.pow(&[(i*chunk) as u64]);
for p in values {
*p = current_power;
current_power.mul_assign(&generator);
}
});
}
});
values
}
pub(crate) fn eval_unnormalized_bivariate_lagrange_poly_over_diaginal<F: PrimeField>(
vanishing_degree: u64,
evaluate_on_domain: &Domain<F>,
worker: &Worker
) -> Vec<F> {
let mut values = vec![F::zero(); evaluate_on_domain.size as usize];
let mut repr = F::Repr::default();
repr.as_mut()[0] = vanishing_degree;
let size_as_fe = F::from_repr(repr).expect("must convert domain size into field element");
// we need to calculate elements like X^{S - 1} where S is a domain size
// so for omega^0 we have omega ^ 0 = 1
// for omega^1 we have omega^{S-1}
// for omega^2 we have (omega^2)^{S-1} = (omega^{S-1}) ^ 2
// and continue to distribute powers this way
let generator_in_size_minus_one = evaluate_on_domain.generator.pow(&[vanishing_degree - 1u64]);
// each element is size * X ^ {size - 1}, so we just distribute powers of `generator_in_size_minus_one`
worker.scope(values.len(), |scope, chunk| {
for (i, values) in values.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = generator_in_size_minus_one.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&size_as_fe);
for p in values {
*p = current_power;
current_power.mul_assign(&generator_in_size_minus_one);
}
});
}
});
values
}
pub(crate) fn eval_unnormalized_bivariate_lagrange_poly_over_different_inputs<F: PrimeField>(
alpha: F,
vanishing_poly_size: u64,
evaluate_on_domain: &Domain<F>,
worker: &Worker
) -> Vec<F> {
// (vanishing(X) - vanishing(alpha)) / (x - alpha)
// we evaluate it on the domain where vanishing(X) == 0
// and make it as
// vanishing(alpha) / (alpha - x)
let vanishing_at_alpha = evaluate_vanishing_for_size(&alpha, vanishing_poly_size);
let inv_vanishing_at_alpha = vanishing_at_alpha.inverse().ok_or(SynthesisError::DivisionByZero).expect("should not vanish on random x");
let inverses = materialize_domain_elements(evaluate_on_domain, &worker);
let mut inverses = Polynomial::from_values(inverses).expect("must fit into the domain");
inverses.map(&worker, |element| {
let mut tmp = alpha;
tmp.sub_assign(&*element);
tmp.mul_assign(&inv_vanishing_at_alpha);
*element = tmp;
});
inverses.batch_inversion(&worker).expect("must inverse as there are no zeroes");
inverses.into_coeffs()
}
pub(crate) fn reindex_from_one_domain_to_another_assuming_natural_ordering<F: PrimeField>(
domain_0: &Domain<F>,
domain_1: &Domain<F>,
index: usize
) -> usize {
assert!(domain_0.size <= domain_1.size);
let lde_factor = domain_1.size / domain_0.size;
// in natural ordering element of index i will go into index i*lde_factor
let new_index = index * (lde_factor as usize);
new_index
}
fn reindex_from_one_domain_to_another_assuming_bitreversed_ordering<F: PrimeField>(
domain_0: &Domain<F>,
domain_1: &Domain<F>,
index: usize
) -> usize {
assert!(domain_0.size <= domain_1.size);
// in bitreversed ordering element of index i will always be in the beginning and unchanged index
index
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
fn sort_vars(v_0: &Variable, v_1: &Variable) -> std::cmp::Ordering {
match (v_0, v_1) {
(Variable(Index::Input(v_0_value)), Variable(Index::Input(v_1_value))) => {
v_0_value.cmp(v_1_value)
},
(Variable(Index::Input(_)), Variable(Index::Aux(_))) => {
std::cmp::Ordering::Less
},
(Variable(Index::Aux(_)), Variable(Index::Input(_))) => {
std::cmp::Ordering::Greater
},
(Variable(Index::Aux(v_0_value)), Variable(Index::Aux(v_1_value))) => {
v_0_value.cmp(v_1_value)
}
}
}
fn deduplicate_with_sort<E: Engine>(
lc: LinearCombination<E>,
scratch: &mut HashMap<Variable, E::Fr>
) -> LinearCombination<E> {
assert!(scratch.is_empty());
if lc.as_ref().len() == 0 {
return lc;
}
for (var, coeff) in lc.0.into_iter() {
if let Some(existing_index) = scratch.get_mut(&var) {
existing_index.add_assign(&coeff);
} else {
scratch.insert(var, coeff);
}
}
let mut deduped_vec: Vec<(Variable, E::Fr)> = scratch.drain().filter(|(_var, coeff)| !coeff.is_zero()).collect();
deduped_vec.sort_by(|(a, _), (b, _)| sort_vars(a, b));
scratch.clear();
LinearCombination(deduped_vec)
}
let a = deduplicate_with_sort(a(LinearCombination::zero()), &mut self.deduplication_scratch);
let b = deduplicate_with_sort(b(LinearCombination::zero()), &mut self.deduplication_scratch);
let c = deduplicate_with_sort(c(LinearCombination::zero()), &mut self.deduplication_scratch);
// keep track on number of non-zero entries in a/b/c
let num_non_zero_into_a = a.as_ref().len();
let num_non_zero_into_b = b.as_ref().len();
let num_non_zero_into_c = c.as_ref().len();
let (a, b) = {
self.num_non_zero_in_a += num_non_zero_into_a;
self.num_non_zero_in_b += num_non_zero_into_b;
(a, b)
};
// // keep track on A/B densities and swap if necessary
// let (a, b) = if self.num_non_zero_in_a >= self.num_non_zero_in_b {
// // there are more in a than in b
// if num_non_zero_into_a >= num_non_zero_into_b {
// // swap a/b
// self.num_non_zero_in_a += num_non_zero_into_b;
// self.num_non_zero_in_b += num_non_zero_into_a;
// (b, a)
// } else {
// // don't swap
// self.num_non_zero_in_a += num_non_zero_into_a;
// self.num_non_zero_in_b += num_non_zero_into_b;
// (a, b)
// }
// } else {
// if num_non_zero_into_b >= num_non_zero_into_a {
// // swap a/b
// self.num_non_zero_in_a += num_non_zero_into_b;
// self.num_non_zero_in_b += num_non_zero_into_a;
// (b, a)
// } else {
// // don't swap
// self.num_non_zero_in_a += num_non_zero_into_a;
// self.num_non_zero_in_b += num_non_zero_into_b;
// (a, b)
// }
// };
self.num_non_zero_in_c += num_non_zero_into_c;
self.a_rows.push(a);
self.b_rows.push(b);
self.c_rows.push(c);
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Create parameters for a circuit, given some toxic waste.
pub fn generate_parameters<E, C>(
circuit: C,
) -> Result<IndexedSetup<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
num_non_zero_in_a: 0,
num_non_zero_in_b: 0,
num_non_zero_in_c: 0,
a_rows: vec![],
b_rows: vec![],
c_rows: vec![],
deduplication_scratch: HashMap::with_capacity((E::Fr::NUM_BITS * 2) as usize),
};
// Allocate the "one" input variable
assembly.alloc_input(|| "CS::ONE", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
// // Input constraints to ensure full density of IC query
// // x * 0 = 0
// for i in 0..assembly.num_inputs {
// assembly.enforce(|| "",
// |lc| lc + Variable(Index::Input(i)),
// |lc| lc,
// |lc| lc,
// );
// }
assembly.pad_to_square()?;
let worker = Worker::new();
let (domain_h_size, domain_k_size, (a_matrix, b_matrix, c_matrix)) = assembly.into_indexer_input(&worker)?;
let domain_h = Domain::new_for_size(domain_h_size as u64)?;
let domain_k = Domain::new_for_size(domain_k_size as u64)?;
// todo: move materialized domain elements out
fn interpolate_matrix<F: PrimeField>(
matrix: Vec<Vec<(usize, F)>>,
domain_h: &Domain<F>,
domain_k: &Domain<F>,
worker: &Worker
) -> Result<
(
usize,
[Polynomial<F, Coefficients>; 3],
[Vec<usize>; 2]
), SynthesisError> {
let mut row_vec = Vec::with_capacity(domain_k.size as usize);
let mut col_vec = Vec::with_capacity(domain_k.size as usize);
let mut val_vec = Vec::with_capacity(domain_k.size as usize);
let mut inverses_for_lagrange_polys = Vec::with_capacity(domain_k.size as usize);
let domain_h_elements = materialize_domain_elements(domain_h, worker);
let unnormalized_largrange_values_over_k = eval_unnormalized_bivariate_lagrange_poly_over_diaginal(
domain_h.size,
domain_k,
&worker
);
let mut row_indexes = Vec::with_capacity(domain_k.size as usize);
let mut col_indexes = Vec::with_capacity(domain_k.size as usize);
for (row_index, row) in matrix.into_iter().enumerate() {
for (col_index, coeff) in row {
let row_val = domain_h_elements[row_index];
row_indexes.push(row_index);
let col_val = domain_h_elements[col_index]; // TODO: do something with inputs?
col_indexes.push(col_index);
row_vec.push(row_val);
col_vec.push(col_val);
val_vec.push(coeff);
// row and column indexes are over H, but we can quickly pull their values from evaluations
// over K
let idx_row_into_larger_domain = reindex_from_one_domain_to_another_assuming_natural_ordering(
domain_h,
domain_k,
row_index);
let idx_col_into_larger_domain = reindex_from_one_domain_to_another_assuming_natural_ordering(
domain_h,
domain_k,
col_index);
let mut lagrange_eval_value = unnormalized_largrange_values_over_k[idx_row_into_larger_domain];
lagrange_eval_value.mul_assign(&unnormalized_largrange_values_over_k[idx_col_into_larger_domain]);
inverses_for_lagrange_polys.push(lagrange_eval_value);
}
}
let num_non_zero = row_vec.len();
let mut inverses_for_lagrange = Polynomial::from_values_unpadded(inverses_for_lagrange_polys)?;
inverses_for_lagrange.batch_inversion(&worker)?;
let mut val_values = Polynomial::from_values_unpadded(val_vec)?;
val_values.mul_assign(&worker, &inverses_for_lagrange);
// now pad to the domain size with zeroes
val_values.pad_to_size(domain_k.size as usize)?;
// val_values.pad_to_domain()?;
assert!(val_values.size() == domain_k.size as usize);
assert!(row_vec.len() <= domain_k.size as usize);
assert!(col_vec.len() <= domain_k.size as usize);
row_vec.resize(val_values.size(), F::one());
col_vec.resize(val_values.size(), F::one());
let row_values = Polynomial::from_values(row_vec)?;
let col_values = Polynomial::from_values(col_vec)?;
let val_poly = val_values.ifft(&worker);
let row_poly = row_values.ifft(&worker);
let col_poly = col_values.ifft(&worker);
// row_indexes.resize(domain_k.size as usize, 0);
Ok((num_non_zero, [row_poly, col_poly, val_poly], [row_indexes, col_indexes]))
}
let (a_num_non_zero, [row_a, col_a, val_a], [row_a_indexes, col_a_indexes]) = interpolate_matrix(a_matrix, &domain_h, &domain_k, &worker)?;
let (b_num_non_zero, [row_b, col_b, val_b], [row_b_indexes, col_b_indexes]) = interpolate_matrix(b_matrix, &domain_h, &domain_k, &worker)?;
let (c_num_non_zero, [row_c, col_c, val_c], [row_c_indexes, col_c_indexes]) = interpolate_matrix(c_matrix, &domain_h, &domain_k, &worker)?;
Ok(IndexedSetup {
a_num_non_zero,
b_num_non_zero,
c_num_non_zero,
domain_h_size,
domain_k_size,
a_matrix_poly: val_a,
b_matrix_poly: val_b,
c_matrix_poly: val_c,
a_row_poly: row_a,
b_row_poly: row_b,
c_row_poly: row_c,
a_col_poly: col_a,
b_col_poly: col_b,
c_col_poly: col_c,
a_row_indexes: row_a_indexes,
b_row_indexes: row_b_indexes,
c_row_indexes: row_c_indexes,
a_col_indexes: col_a_indexes,
b_col_indexes: col_b_indexes,
c_col_indexes: col_c_indexes,
})
}
pub fn evaluate_bivariate_lagrange_at_point<F: PrimeField>(
x: F,
y: F,
vanishing_domain_size: u64
) -> Result<F, SynthesisError> {
if x == y {
return evaluate_bivariate_lagrange_at_diagonal_point(x, vanishing_domain_size);
}
let x_vanishing = evaluate_vanishing_for_size(&x, vanishing_domain_size);
let y_vanishing = evaluate_vanishing_for_size(&y, vanishing_domain_size);
let mut num = x_vanishing;
num.sub_assign(&y_vanishing);
let mut den = x;
den.sub_assign(&y);
let den = den.inverse().ok_or(SynthesisError::DivisionByZero)?;
num.mul_assign(&den);
Ok(num)
}
pub fn evaluate_bivariate_lagrange_at_diagonal_point<F: PrimeField>(x: F, vanishing_domain_size: u64) -> Result<F, SynthesisError> {
let mut repr = F::Repr::default();
repr.as_mut()[0] = vanishing_domain_size;
let size_as_fe = F::from_repr(repr).expect("must convert domain size into field element");
let mut result = x.pow(&[vanishing_domain_size - 1]);
result.mul_assign(&size_as_fe);
Ok(result)
}
fn evaluate_bivariate_lagrange_at_point_for_vanishing_y<F: PrimeField>(x: F, y: F, vanishing_domain_size: u64) -> Result<F, SynthesisError> {
if x == y {
return evaluate_bivariate_lagrange_at_diagonal_point(x, vanishing_domain_size);
}
let mut x_vanishing = x.pow(&[vanishing_domain_size]);
x_vanishing.sub_assign(&F::one());
let mut num = x_vanishing;
let mut den = x;
den.sub_assign(&y);
let den = den.inverse().ok_or(SynthesisError::DivisionByZero)?;
num.mul_assign(&den);
Ok(num)
}
pub(crate) fn evaluate_vanishing_for_size<F: PrimeField>(point: &F, vanishing_domain_size: u64) -> F {
let mut result = point.pow(&[vanishing_domain_size]);
result.sub_assign(&F::one());
result
}
#[derive(Clone)]
pub(crate) struct IndexerTester<E: Engine> {
pub(crate) a: Option<E::Fr>,
pub(crate) b: Option<E::Fr>,
}
impl<E: Engine> Circuit<E> for IndexerTester<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if let Some(a_value) = self.a {
Ok(a_value)
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a is zero",
|lc| lc + a_var,
|lc| lc + CS::one(),
|lc| lc
);
let b_var = cs.alloc(|| "b", || {
if let Some(b_value) = self.b {
Ok(b_value)
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "b is one",
|lc| lc + b_var,
|lc| lc + CS::one(),
|lc| lc + CS::one()
);
let c_var = cs.alloc_input(|| "c", || {
if let Some(a_value) = self.a {
Ok(a_value)
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a is equal to c",
|lc| lc + a_var,
|lc| lc + CS::one(),
|lc| lc + c_var
);
cs.enforce(
|| "large linear combinations (valid)",
|lc| lc + a_var + b_var + c_var,
|lc| lc + CS::one(),
|lc| lc + a_var + b_var + c_var
);
cs.enforce(
|| "large linear combinations (invalid)",
|lc| lc + a_var + b_var + c_var,
|lc| lc + a_var + b_var + c_var,
|lc| lc
);
Ok(())
}
}
#[cfg(test)]
mod test {
use crate::tests::XORDemo;
use crate::plonk::domains::*;
use crate::worker::Worker;
use super::*;
use std::marker::PhantomData;
fn test_over_engine_and_circuit<E: Engine, C: Circuit<E>>(
circuit: C
) {
let params = generate_parameters(circuit).unwrap();
let worker = Worker::new();
println!("Params domain H size = {}", params.domain_h_size);
println!("Params domain K size = {}", params.domain_k_size);
let domain_h = Domain::<E::Fr>::new_for_size(params.domain_h_size as u64).unwrap();
let domain_k = Domain::<E::Fr>::new_for_size(params.domain_k_size as u64).unwrap();
let generator_h = domain_h.generator;
let generator_k = domain_k.generator;
let mut a_matrix_values = vec![];
for i in 0..params.domain_h_size {
let x = generator_h.pow(&[i as u64]);
let mut row = vec![];
for j in 0..params.domain_h_size {
let y = generator_h.pow(&[j as u64]);
let mut sum = E::Fr::zero();
// sum
for k in 0..params.domain_k_size {
let k = generator_k.pow(&[k as u64]);
let col_value = params.a_col_poly.evaluate_at(&worker, k);
let row_value = params.a_row_poly.evaluate_at(&worker, k);
let vanishing_at_col_value = evaluate_vanishing_for_size(&col_value, params.domain_h_size as u64);
assert!(vanishing_at_col_value.is_zero());
let vanishing_at_row_value = evaluate_vanishing_for_size(&row_value, params.domain_h_size as u64);
assert!(vanishing_at_row_value.is_zero());
let lag_x = evaluate_bivariate_lagrange_at_point_for_vanishing_y(x, row_value, params.domain_h_size as u64).unwrap();
let lag_y = evaluate_bivariate_lagrange_at_point_for_vanishing_y(y, col_value, params.domain_h_size as u64).unwrap();
let val = params.a_matrix_poly.evaluate_at(&worker, k);
let mut result = lag_y;
result.mul_assign(&lag_x);
result.mul_assign(&val);
sum.add_assign(&result);
}
row.push(sum);
}
a_matrix_values.push(row);
}
println!("Indexed A matrix values are {:?}", a_matrix_values);
println!("A row indexes are {:?}", params.a_row_indexes);
println!("A column indexes are {:?}", params.a_col_indexes);
}
#[test]
fn test_interpolation_poly_1() {
use crate::pairing::bn256::{Bn256};
let c = XORDemo::<Bn256> {
a: None,
b: None,
_marker: PhantomData
};
test_over_engine_and_circuit(c);
}
#[test]
fn test_interpolation_poly_2() {
use crate::pairing::bn256::{Bn256, Fr};
let c = IndexerTester::<Bn256> {
a: None,
b: None,
};
test_over_engine_and_circuit(c);
}
}<file_sep>/src/plonk/commitments/transparent/fri/naive_fri/verifier.rs
use crate::pairing::ff::PrimeField;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::polynomials::*;
use crate::plonk::domains::*;
use crate::worker::*;
use crate::SynthesisError;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transparent::utils::log2_floor;
use super::naive_fri::*;
use super::super::*;
impl<F: PrimeField, I: IOP<F>> NaiveFriIop<F, I> {
pub fn verify_prototype(
proof: & FRIProofPrototype<F, I>,
leaf_values: & Polynomial<F, Values>,
natural_element_index: usize
) -> Result<bool, SynthesisError> {
let mut two = F::one();
two.double();
let two_inv = two.inverse().ok_or(
SynthesisError::DivisionByZero
)?;
// start from the bottom: we need to get a "pair" and calculate FRI step
let domain = Domain::<F>::new_for_size((proof.initial_degree_plus_one * proof.lde_factor) as u64)?;
let domain_element = domain.generator.pow([natural_element_index as u64]);
let el = domain_element.pow([domain.size]);
if el != F::one() {
return Err(SynthesisError::UnexpectedIdentity);
}
let mut omega = domain.generator;
let mut omega_inv = omega.inverse().ok_or(
SynthesisError::DivisionByZero
)?;
debug_assert_eq!(F::one(), omega_inv.pow([domain.size]));
let mut expected_value: Option<F> = None;
let mut domain_size = domain.size as usize;
let mut domain_idx = natural_element_index;
for (iop_values, iop_challenge) in Some(leaf_values).into_iter().chain(&proof.intermediate_values)
.zip(proof.challenges.iter()) {
let coset_values = <I::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, domain_size);
assert!(coset_values.len() == 2);
assert!(coset_values[0] < coset_values[1]);
let f_at_omega = I::get_for_natural_index(iop_values.as_ref(), coset_values[0]);
if let Some(value) = expected_value {
if !coset_values.contains(&domain_idx) {
return Ok(false);
}
let supplied_value = *I::get_for_natural_index(iop_values.as_ref(), domain_idx);
// check consistency
if supplied_value != value {
return Ok(false);
}
}
let f_at_minus_omega = I::get_for_natural_index(iop_values.as_ref(), coset_values[1]);
let divisor = omega_inv.pow([coset_values[0] as u64]);
let mut v_even_coeffs = *f_at_omega;
v_even_coeffs.add_assign(&f_at_minus_omega);
let mut v_odd_coeffs = *f_at_omega;
v_odd_coeffs.sub_assign(&f_at_minus_omega);
v_odd_coeffs.mul_assign(&divisor);
// those can be treated as (doubled) evaluations of polynomials that
// are themselves made only from even or odd coefficients of original poly
// (with reduction of degree by 2) on a domain of the size twice smaller
// with an extra factor of "omega" in odd coefficients
// to do assemble FRI step we just need to add them with a random challenge
let mut tmp = v_odd_coeffs;
tmp.mul_assign(&iop_challenge);
tmp.add_assign(&v_even_coeffs);
tmp.mul_assign(&two_inv);
expected_value = Some(tmp);
// we have jumped in a coset and can keep it ordered using the smaller index out of two
// domain_idx = coset_values[0];
// debug_assert!(domain_idx < domain_size / 2);
let (next_idx, next_size) = Domain::<F>::index_and_size_for_next_domain(domain_idx, domain_size);
domain_idx = next_idx;
domain_size = next_size;
omega.square();
omega_inv.square();
}
// finally we need to get expected value from coefficients
let mut expected_value_from_coefficients = F::zero();
let mut power = F::one();
let evaluation_point = omega.pow([domain_idx as u64]);
for c in proof.final_coefficients.iter() {
let mut tmp = power;
tmp.mul_assign(c);
expected_value_from_coefficients.add_assign(&tmp);
power.mul_assign(&evaluation_point);
}
let expected_value = expected_value.expect("is some");
let valid = expected_value_from_coefficients == expected_value;
Ok(valid)
}
// pub fn verify_proof_queries<P: Prng<F, Input = < < I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F> >::HashOutput> >(
// proof: &FRIProof<F, I>,
// natural_element_indexes: Vec<usize>,
// degree: usize,
// expected_value_from_oracle: F,
// prng: &mut P
// ) -> Result<bool, SynthesisError> {
// }
pub fn verify_proof_queries(
proof: &FRIProof<F, I>,
natural_element_indexes: Vec<usize>,
degree: usize,
expected_values_from_oracle: &[F],
fri_challenges: &[F]
) -> Result<bool, SynthesisError> {
let mut two = F::one();
two.double();
let two_inv = two.inverse().ok_or(
SynthesisError::DivisionByZero
)?;
let domain = Domain::<F>::new_for_size((proof.initial_degree_plus_one * proof.lde_factor) as u64)?;
let omega = domain.generator;
let omega_inv = omega.inverse().ok_or(
SynthesisError::DivisionByZero
)?;
assert!(fri_challenges.len() == proof.roots.len());
assert!(natural_element_indexes.len() == proof.queries.len());
for ((query, natural_element_index), expected_value_from_oracle) in proof.queries.iter()
.zip(natural_element_indexes.into_iter())
.zip(expected_values_from_oracle.iter())
{
let domain_element = domain.generator.pow([natural_element_index as u64]);
let el = domain_element.pow([domain.size]);
if el != F::one() {
return Err(SynthesisError::UnexpectedIdentity);
}
// let next_domain_size = domain.size / 2;
// let el = domain_element.pow([next_domain_size]);
// if el == F::one() {
// return Err(SynthesisError::UnexpectedIdentity);
// }
let mut expected_value: Option<F> = None;
let mut domain_size = domain.size as usize;
let mut domain_idx = natural_element_index;
let mut omega = omega;
let mut omega_inv = omega_inv;
if query.len() % degree != 0 {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
for (round, ((root, queries), iop_challenge)) in proof.roots.iter()
.zip(query.chunks_exact(degree))
.zip(fri_challenges.iter())
.enumerate()
{
let coset_values = <I::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, domain_size);
if coset_values.len() != <I::Combiner as CosetCombiner<F>>::COSET_SIZE {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
for q in queries.iter() {
if !coset_values.contains(&q.natural_index()) {
println!("Coset values do not contain query index {}", q.natural_index());
return Ok(false);
}
}
if round == 0 {
for q in queries.iter() {
if q.natural_index() == natural_element_index && q.value() != *expected_value_from_oracle {
println!("Expected {}, got {} from query", expected_value_from_oracle, q.value());
return Ok(false);
}
}
}
for (c, q) in coset_values.iter().zip(queries.iter()) {
let tree_index = <I::Combiner as CosetCombiner<F>>::natural_index_into_tree_index(*c);
if q.tree_index() != tree_index {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
assert!(q.natural_index() == *c, "coset values and produced queries are expected to be sorted!");
}
for q in queries.iter() {
if !I::verify_query(&q, &root) {
println!("Query is not in the root");
return Ok(false);
}
}
let f_at_omega = (&queries[0]).value();
if let Some(value) = expected_value {
if !coset_values.contains(&domain_idx) {
println!("Coset values {:?} do not containt required index {}", coset_values, domain_idx);
return Ok(false);
}
let q: Vec<_> = queries.iter().filter(|el| el.natural_index() == domain_idx).collect();
if q.len() != 1 {
println!("Queries containt duplicate opening for required index {}", domain_idx);
return Ok(false)
}
let supplied_value = q[0].value();
// check in the next domain
if supplied_value != value {
println!("Query value {} is not equal to the expected value {} for round {}", supplied_value, value, round);
return Ok(false);
}
}
let f_at_minus_omega = (&queries[1]).value();
let divisor = omega_inv.pow([coset_values[0] as u64]);
let mut v_even_coeffs = f_at_omega;
v_even_coeffs.add_assign(&f_at_minus_omega);
let mut v_odd_coeffs = f_at_omega;
v_odd_coeffs.sub_assign(&f_at_minus_omega);
v_odd_coeffs.mul_assign(&divisor);
// those can be treated as (doubled) evaluations of polynomials that
// are themselves made only from even or odd coefficients of original poly
// (with reduction of degree by 2) on a domain of the size twice smaller
// with an extra factor of "omega" in odd coefficients
// to do assemble FRI step we just need to add them with a random challenge
let mut tmp = v_odd_coeffs;
tmp.mul_assign(&iop_challenge);
tmp.add_assign(&v_even_coeffs);
tmp.mul_assign(&two_inv);
expected_value = Some(tmp);
// we have jumped in a coset and can keep it ordered using the smaller index out of two
// domain_idx = coset_values[0];
let (next_idx, next_size) = Domain::<F>::index_and_size_for_next_domain(domain_idx, domain_size);
domain_idx = next_idx;
domain_size = next_size;
omega.square();
omega_inv.square();
}
// finally we need to get expected value from coefficients
let mut expected_value_from_coefficients = F::zero();
let mut power = F::one();
let evaluation_point = omega.pow([domain_idx as u64]);
for c in proof.final_coefficients.iter() {
let mut tmp = power;
tmp.mul_assign(c);
expected_value_from_coefficients.add_assign(&tmp);
power.mul_assign(&evaluation_point);
}
let expected_value = expected_value.expect("is some");
let valid = expected_value_from_coefficients == expected_value;
if !valid {
println!("Value from supplied coefficients {} is not equal to the value from queries {} for natural index {}", expected_value_from_coefficients, expected_value, natural_element_index);
println!("Final coefficients = {:?}", proof.final_coefficients);
return Ok(false);
}
}
Ok(true)
}
}
<file_sep>/src/plonk/commitments/transparent/fri/coset_combining_fri/query_producer.rs
use crate::pairing::ff::PrimeField;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::polynomials::*;
use crate::plonk::domains::*;
use crate::worker::*;
use crate::SynthesisError;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transparent::utils::log2_floor;
use super::fri::*;
use super::super::*;
impl<F: PrimeField, I: IOP<F>> FRIProofPrototype<F, I> {
pub fn produce_proof(
self,
iop_values: &Polynomial<F, Values>,
natural_first_element_indexes: Vec<usize>,
) -> Result<FRIProof<F, I>, SynthesisError> {
let domain_size = self.initial_degree_plus_one * self.lde_factor;
let mut roots = vec![];
let l0_commitment = Some(self.l0_commitment);
for iop in l0_commitment.iter().chain(&self.intermediate_commitments) {
roots.push(iop.get_root());
}
let mut rounds = vec![];
for natural_first_element_index in natural_first_element_indexes.into_iter() {
let mut queries = vec![];
let mut domain_idx = natural_first_element_index;
let mut domain_size = domain_size;
for (iop, leaf_values) in l0_commitment.iter().chain(&self.intermediate_commitments)
.zip(Some(iop_values).into_iter().chain(&self.intermediate_values)) {
let coset_values = <I::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, domain_size);
if coset_values.len() != <I::Combiner as CosetCombiner<F>>::COSET_SIZE {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
for idx in coset_values.into_iter() {
let query = iop.query(idx, leaf_values.as_ref());
queries.push(query);
}
let (next_idx, next_size) = Domain::<F>::index_and_size_for_next_domain(domain_idx, domain_size);
domain_idx = next_idx;
domain_size = next_size;
}
rounds.push(queries);
}
let proof = FRIProof::<F, I> {
queries: rounds,
roots,
final_coefficients: self.final_coefficients,
initial_degree_plus_one: self.initial_degree_plus_one,
output_coeffs_at_degree_plus_one: self.output_coeffs_at_degree_plus_one,
lde_factor: self.lde_factor,
};
Ok(proof)
}
}<file_sep>/src/sonic/README.md
# Description
Initial SONIC proof system integration using the code from the [original implementation](https://github.com/zknuckles/sonic.git). It's here for experimental reasons and evaluation of the following properties:
- How applicable is "helped" procedure for a case of Ethereum
- What is a final verification cost for "helped" and "unhelped" procedures
- Prover efficiency in both cases
- Implementation of a memory constrained prover and helper
- Smart-contract implementation of verifiers
- Code cleanup
- Migration for smart-contract compatible transcripts
## Current state
Beta - fast and valid, but breaking API changes are expected
## Completed
- Basic proof modes (helped/unhelped)
- Succinct `S` polynomial evaluation using permutation argument
- High-level API for non-succinct mode that can produce "large enough" SRS from a "global" SRS
- Proving/verifying keys that have additional information about the circuit such as number of gates, linear constraints and public inputs
- Implement non-assigning backends for faster estimation of circuit parameters in un-cached cases
## TODO Plan
- [ ] Make caching proving/verifying key for succinct mode
- [ ] Fix high-level API for both modes
- [ ] Re-structure the package itself<file_sep>/src/sonic/sonic/adaptor.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveProjective};
// this one is for all external interfaces
// use crate::{LinearCombination, ConstraintSystem, Circuit, Variable};
use crate::SynthesisError;
use crate::sonic::srs::SRS;
use crate::sonic::cs::LinearCombination as SonicLinearCombination;
use crate::sonic::cs::Circuit as SonicCircuit;
use crate::sonic::cs::ConstraintSystem as SonicConstraintSystem;
use crate::sonic::cs::Variable as SonicVariable;
use crate::sonic::cs::Coeff;
use std::marker::PhantomData;
pub struct Adaptor<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> {
cs: &'a mut CS,
_marker: PhantomData<E>,
}
impl<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> crate::ConstraintSystem<E>
for Adaptor<'a, E, CS>
{
type Root = Self;
// this is an important change
fn one() -> crate::Variable {
crate::Variable::new_unchecked(crate::Index::Input(1))
}
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let var = self.cs.alloc(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F,
) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let var = self.cs.alloc_input(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
{
fn convert<E: Engine>(lc: crate::LinearCombination<E>) -> SonicLinearCombination<E> {
let mut ret = SonicLinearCombination::zero();
for &(v, coeff) in lc.as_ref().iter() {
let var = match v.get_unchecked() {
crate::Index::Input(i) => SonicVariable::A(i),
crate::Index::Aux(i) => SonicVariable::B(i),
};
ret = ret + (Coeff::Full(coeff), var);
}
ret
}
fn eval<E: Engine, CS: SonicConstraintSystem<E>>(
lc: &SonicLinearCombination<E>,
cs: &CS,
) -> Option<E::Fr> {
let mut ret = E::Fr::zero();
for &(v, coeff) in lc.as_ref().iter() {
let mut tmp = match cs.get_value(v) {
Ok(tmp) => tmp,
Err(_) => return None,
};
coeff.multiply(&mut tmp);
ret.add_assign(&tmp);
}
Some(ret)
}
let a_lc = convert(a(crate::LinearCombination::zero()));
let a_value = eval(&a_lc, &*self.cs);
let b_lc = convert(b(crate::LinearCombination::zero()));
let b_value = eval(&b_lc, &*self.cs);
let c_lc = convert(c(crate::LinearCombination::zero()));
let c_value = eval(&c_lc, &*self.cs);
let (a, b, c) = self
.cs
.multiply(|| Ok((a_value.unwrap(), b_value.unwrap(), c_value.unwrap())))
.unwrap();
self.cs.enforce_zero(a_lc - a);
self.cs.enforce_zero(b_lc - b);
self.cs.enforce_zero(c_lc - c);
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
#[derive(Clone)]
pub struct AdaptorCircuit<T>(pub T);
impl<'a, E: Engine, C: crate::Circuit<E> + Clone> SonicCircuit<E> for AdaptorCircuit<C> {
fn synthesize<CS: SonicConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let mut adaptor = Adaptor {
cs: cs,
_marker: PhantomData,
};
match self.0.clone().synthesize(&mut adaptor) {
Err(_) => return Err(SynthesisError::AssignmentMissing),
Ok(_) => {}
};
Ok(())
}
}<file_sep>/src/sonic/unhelped/permutation_argument.rs
/// Permutation argument allows to prove that a commitment to a vector A is
/// actually a commitment to a vector of values that are equal to `(s^{perm})_i * y^{perm(i)}`
/// for some fixed permutation `perm`
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use crate::pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
use super::wellformed_argument::{WellformednessArgument, WellformednessProof};
use super::grand_product_argument::{GrandProductArgument, GrandProductSignature};
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
#[derive(Clone)]
pub struct SpecializedSRS<E: Engine> {
pub p_1: E::G1Affine,
pub p_2: Vec<E::G1Affine>,
pub p_3: E::G1Affine,
pub p_4: Vec<E::G1Affine>,
n: usize
}
#[derive(Clone)]
pub struct PermutationArgument<E: Engine> {
non_permuted_coefficients: Vec<Vec<E::Fr>>,
non_permuted_at_y_coefficients: Vec<Vec<E::Fr>>,
permuted_at_y_coefficients: Vec<Vec<E::Fr>>,
inverse_permuted_at_y_coefficients: Vec<Vec<E::Fr>>,
permutations: Vec<Vec<usize>>,
n: usize
}
#[derive(Clone)]
pub struct PermutationProof<E: Engine> {
pub v_zy: E::Fr,
pub e_opening: E::G1Affine,
pub f_opening: E::G1Affine,
}
#[derive(Clone)]
pub struct PermutationArgumentProof<E: Engine> {
pub j: usize,
pub s_opening: E::G1Affine,
pub s_zy: E::Fr
}
#[derive(Clone)]
pub struct SignatureOfCorrectComputation<E: Engine> {
pub s_commitments: Vec<E::G1Affine>,
pub s_prime_commitments: Vec<E::G1Affine>,
pub perm_argument_proof: PermutationArgumentProof<E>,
pub perm_proof: PermutationProof<E>,
pub grand_product_signature: GrandProductSignature<E>
}
// fn permute<F: Field>(coeffs: &[F], permutation: & [usize]) -> Vec<F>{
// assert_eq!(coeffs.len(), permutation.len());
// let mut result: Vec<F> = vec![F::zero(); coeffs.len()];
// for (i, j) in permutation.iter().enumerate() {
// // if *j < 1 {
// // // if permutation information is missing coefficient itself must be zero!
// // assert!(coeffs[i].is_zero());
// // continue;
// // }
// result[*j - 1] = coeffs[i];
// }
// result
// }
fn permute_inverse<F: Field>(permuted_coeffs: &[F], permutation: & [usize]) -> Vec<F>{
assert_eq!(permuted_coeffs.len(), permutation.len());
let mut result: Vec<F> = vec![F::zero(); permuted_coeffs.len()];
for (i, j) in permutation.iter().enumerate() {
// if *j < 1 {
// // if permutation information is missing coefficient itself must be zero!
// assert!(coeffs[i].is_zero());
// continue;
// }
result[i] = permuted_coeffs[*j - 1];
}
result
}
impl<E: Engine> PermutationArgument<E> {
pub fn new(coefficients: Vec<Vec<E::Fr>>, permutations: Vec<Vec<usize>>) -> Self {
assert!(coefficients.len() > 0);
assert_eq!(coefficients.len(), permutations.len());
let n = coefficients[0].len();
for (c, p) in coefficients.iter().zip(permutations.iter()) {
assert!(c.len() == p.len());
assert!(c.len() == n);
}
PermutationArgument {
non_permuted_coefficients: coefficients,
non_permuted_at_y_coefficients: vec![],
// permuted_coefficients: vec![],
permuted_at_y_coefficients: vec![],
inverse_permuted_at_y_coefficients: vec![],
permutations: permutations,
n: n
}
}
pub fn make_specialized_srs(non_permuted_coefficients: &Vec<Vec<E::Fr>>, permutations: &Vec<Vec<usize>>, srs: &SRS<E>) -> SpecializedSRS<E> {
assert!(non_permuted_coefficients.len() > 0);
assert_eq!(non_permuted_coefficients.len(), permutations.len());
let n = non_permuted_coefficients[0].len();
// p1 is just a commitment to the powers of x. It's indexed from 0 cause there is no g^0
let p_1 = multiexp(srs.g_positive_x_alpha[0..n].iter(), vec![E::Fr::one(); n].iter()).into_affine();
let mut p_2 = vec![];
let p_3 = {
let values: Vec<E::Fr> = (1..=n).map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
multiexp(srs.g_positive_x_alpha[0..n].iter(), values.iter()).into_affine()
};
let mut p_4 = vec![];
for (c, p) in non_permuted_coefficients.iter().zip(permutations.iter()) {
assert!(c.len() == p.len());
assert!(c.len() == n);
// p2 is a commitment to the s^{perm}_i * x^i
{
let p2 = multiexp(srs.g_positive_x_alpha[0..n].iter(), c.iter()).into_affine();
p_2.push(p2);
}
{
let values: Vec<E::Fr> = p.iter().map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = *el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
let p4 = multiexp(srs.g_positive_x_alpha[0..n].iter(), values.iter()).into_affine();
p_4.push(p4);
}
}
SpecializedSRS {
p_1: p_1,
p_2: p_2,
p_3: p_3,
p_4: p_4,
n: n
}
}
// commit to s and s' at y. Mutates the state
pub fn commit(&mut self, y: E::Fr, srs: &SRS<E>) -> Vec<(E::G1Affine, E::G1Affine)> {
assert!(self.inverse_permuted_at_y_coefficients.len() == 0);
let mut result = vec![];
let n = self.non_permuted_coefficients[0].len();
let mut non_permuted_at_y_coefficients = vec![];
// let mut permuted_coefficients = vec![];
// let mut permuted_at_y_coefficients = vec![];
let mut inverse_permuted_at_y_coefficients = vec![];
// naive algorithms
// for every permutation poly
// -- go throught all variable_idx
// - take coeff from non-permuted coeffs[permutation[variable_idx]]
// - mul by Y^{permutation[variable_idx]}
// - mul by X^{variable_idx + 1}
for (c, p) in self.non_permuted_coefficients.iter().zip(self.permutations.iter()) {
let mut non_permuted_at_y = c.clone();
mut_distribute_consequitive_powers(&mut non_permuted_at_y[..], y, y);
let s_prime = multiexp(srs.g_positive_x_alpha[0..n].iter(), non_permuted_at_y.iter()).into_affine();
// if we pretend that non_permuted_at_y[sigma[i]] = coeffs[sigma[i]] * Y^sigma[i],
// then inverse_permuted_at_y[i] = coeffs[sigma[i]] * Y^sigma[i]
let inverse_permuted_at_y = permute_inverse(&non_permuted_at_y[..], &p[..]);
// let mut t = vec![E::Fr::zero(); inverse_permuted_at_y.len()];
// for i in 0..t.len() {
// let coeff = c[i];
// let sigma_i = p[i];
// let y_sigma_i = y.pow([sigma_i as u64]);
// t[i] = coeff;
// t[i].mul_assign(&y_sigma_i);
// }
// and commit to S
let s = multiexp(srs.g_positive_x_alpha[0..n].iter(), inverse_permuted_at_y.iter()).into_affine();
// let s = multiexp(srs.g_positive_x_alpha[0..n].iter(), t.iter()).into_affine();
result.push((s, s_prime));
non_permuted_at_y_coefficients.push(non_permuted_at_y);
// permuted_coefficients.push(permuted);
// permuted_at_y_coefficients.push(t);
// permuted_at_y_coefficients.push(permuted_at_y);
inverse_permuted_at_y_coefficients.push(inverse_permuted_at_y);
}
self.non_permuted_at_y_coefficients = non_permuted_at_y_coefficients;
// self.permuted_coefficients = permuted_coefficients;
// self.permuted_at_y_coefficients = permuted_at_y_coefficients;
self.inverse_permuted_at_y_coefficients = inverse_permuted_at_y_coefficients;
result
}
pub fn open_commitments_to_s_prime(
&self,
challenges: &Vec<E::Fr>,
y: E::Fr,
z_prime: E::Fr,
srs: &SRS<E>
) -> PermutationProof<E> {
let n = self.non_permuted_coefficients[0].len();
let mut yz = y;
yz.mul_assign(&z_prime);
let mut polynomial: Option<Vec<E::Fr>> = None;
for (p, r) in self.non_permuted_coefficients.iter()
.zip(challenges.iter()) {
if polynomial.is_some() {
if let Some(poly) = polynomial.as_mut() {
mul_add_polynomials(&mut poly[..], &p[..], *r);
}
} else {
let mut poly = p.clone();
mul_polynomial_by_scalar(&mut poly[..], *r);
polynomial = Some(poly);
}
}
let mut polynomial = polynomial.unwrap();
let v = evaluate_at_consequitive_powers(&polynomial[..], yz, yz);
let mut v_neg = v;
v_neg.negate();
let f = polynomial_commitment_opening(
0,
n,
Some(v_neg).iter().chain_ext(polynomial.iter()),
yz,
&srs
);
mut_distribute_consequitive_powers(&mut polynomial[..], y, y);
let e = polynomial_commitment_opening(
0,
n,
Some(v_neg).iter().chain_ext(polynomial.iter()),
z_prime,
&srs
);
PermutationProof {
v_zy: v,
e_opening: e,
f_opening: f
}
}
// Argument a permutation argument. Current implementation consumes, cause extra arguments are required
pub fn make_argument(self,
beta: E::Fr,
gamma: E::Fr,
grand_product_challenges: & Vec<E::Fr>,
wellformed_challenges: & Vec<E::Fr>,
y: E::Fr,
z: E::Fr,
_specialized_srs: &SpecializedSRS<E>,
srs: &SRS<E>
) -> PermutationArgumentProof<E> {
// Sj(P4j)β(P1j)γ is equal to the product of the coefficients of Sj′(P3j)β(P1j)γ
// also open s = \sum self.permuted_coefficients(X, y) at z
let n = self.n;
let j = self.non_permuted_coefficients.len();
assert_eq!(j, grand_product_challenges.len());
assert_eq!(2*j, wellformed_challenges.len());
let mut s_polynomial: Option<Vec<E::Fr>> = None;
for c in self.inverse_permuted_at_y_coefficients.iter()
{
if s_polynomial.is_some() {
if let Some(poly) = s_polynomial.as_mut() {
add_polynomials(&mut poly[..], & c[..]);
}
} else {
s_polynomial = Some(c.clone());
}
}
let s_polynomial = s_polynomial.unwrap();
// evaluate at z
let s_zy = evaluate_at_consequitive_powers(& s_polynomial[..], z, z);
let mut s_zy_neg = s_zy;
s_zy_neg.negate();
let s_zy_opening = polynomial_commitment_opening(
0,
n,
Some(s_zy_neg).iter().chain_ext(s_polynomial.iter()),
z,
&srs
);
// Sj(P4j)^β (P1j)^γ is equal to the product of the coefficients of Sj′(P3j)^β (P1j)^γ
let p_1_values = vec![E::Fr::one(); n];
let p_3_values: Vec<E::Fr> = (1..=n).map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
let mut grand_products = vec![];
for ((non_permuted, inv_permuted), permutation) in self.non_permuted_at_y_coefficients.into_iter()
.zip(self.inverse_permuted_at_y_coefficients.into_iter())
.zip(self.permutations.into_iter())
{
// in S combination at the place i there should be term coeff[sigma(i)] * Y^sigma(i), that we can take
// from non-permuted by inverse_permuting it
// let mut s_combination = permute_inverse(&non_permuted[..], &permutation);
let mut s_combination = inv_permuted;
{
let p_4_values: Vec<E::Fr> = permutation.into_iter().map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
mul_add_polynomials(&mut s_combination[..], & p_4_values[..], beta);
mul_add_polynomials(&mut s_combination[..], & p_1_values[..], gamma);
}
// combination of coeff[i]*Y^i + beta * i + gamma
let mut s_prime_combination = non_permuted.clone();
{
mul_add_polynomials(&mut s_prime_combination[..], & p_3_values[..], beta);
mul_add_polynomials(&mut s_prime_combination[..], & p_1_values[..], gamma);
}
// Sanity check
let s_prime_product = s_prime_combination.iter().fold(E::Fr::one(), |mut sum, x|
{
sum.mul_assign(&x);
sum
});
let s_product = s_combination.iter().fold(E::Fr::one(), |mut sum, x|
{
sum.mul_assign(&x);
sum
});
assert_eq!(s_product, s_prime_product, "product of coefficients must be the same");
grand_products.push((s_combination, s_prime_combination));
}
let mut a_commitments = vec![];
let mut b_commitments = vec![];
for (a, b) in grand_products.iter() {
let (c_a, c_b) = GrandProductArgument::commit_for_individual_products(& a[..], & b[..], &srs);
a_commitments.push(c_a);
b_commitments.push(c_b);
}
{
let mut all_polys = vec![];
for p in grand_products.iter() {
let (a, b) = p;
all_polys.push(a.clone());
all_polys.push(b.clone());
}
let wellformed_argument = WellformednessArgument::new(all_polys);
let commitments = wellformed_argument.commit(&srs);
let proof = wellformed_argument.make_argument(wellformed_challenges.clone(), &srs);
let valid = WellformednessArgument::verify(n, &wellformed_challenges, &commitments, &proof, &srs);
assert!(valid, "wellformedness argument must be valid");
}
let mut grand_product_argument = GrandProductArgument::new(grand_products);
let c_commitments = grand_product_argument.commit_to_individual_c_polynomials(&srs);
let t_commitment = grand_product_argument.commit_to_t_polynomial(&grand_product_challenges, y, &srs);
let grand_product_openings = grand_product_argument.open_commitments_for_grand_product(y, z, &srs);
let a_zy: Vec<E::Fr> = grand_product_openings.iter().map(|el| el.0.clone()).collect();
let proof = grand_product_argument.make_argument(&a_zy, &grand_product_challenges, y, z, &srs);
{
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let randomness = (0..j).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify_ab_commitment(n,
& randomness,
& a_commitments,
& b_commitments,
&grand_product_openings,
y,
z,
&srs);
assert!(valid, "ab part of grand product argument must be valid");
let randomness = (0..3).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify(n,
&randomness,
&a_zy,
&grand_product_challenges,
t_commitment,
&c_commitments,
&proof,
y,
z,
&srs);
assert!(valid, "grand product argument must be valid");
}
PermutationArgumentProof {
j: j,
s_opening: s_zy_opening,
s_zy: s_zy
}
}
pub fn verify_s_prime_commitment(
_n: usize,
randomness: & Vec<E::Fr>,
challenges: & Vec<E::Fr>,
commitments: &Vec<E::G1Affine>,
proof: &PermutationProof<E>,
y: E::Fr,
z_prime: E::Fr,
specialized_srs: &SpecializedSRS<E>,
srs: &SRS<E>
) -> bool {
assert_eq!(randomness.len(), 2);
assert_eq!(challenges.len(), commitments.len());
// e(E,hαx)e(E−z′,hα) = e(Mj=1Sj′rj,h)e(g−v,hα)
// e(F,hαx)e(F−yz′,hα) = e(Mj=1P2jrj,h)e(g−v,hα)
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let mut value = E::Fr::zero();
for r in randomness.iter() {
value.add_assign(&r);
}
value.mul_assign(&proof.v_zy);
let mut minus_yz = z_prime;
minus_yz.mul_assign(&y);
minus_yz.negate();
let mut minus_z_prime = z_prime;
minus_z_prime.negate();
let f_yz = proof.f_opening.mul(minus_yz.into_repr());
let e_z = proof.e_opening.mul(minus_z_prime.into_repr());
let mut h_alpha_term = multiexp(
vec![e_z.into_affine(), f_yz.into_affine()].iter(),
randomness.iter(),
);
let g_v = g.mul(value.into_repr());
h_alpha_term.add_assign(&g_v);
let h_alpha_x_term = multiexp(
Some(proof.e_opening).iter()
.chain_ext(Some(proof.f_opening).iter()),
randomness.iter(),
).into_affine();
let s_r = multiexp(
commitments.iter(),
challenges.iter()
).into_affine();
let p2_r = multiexp(
specialized_srs.p_2.iter(),
challenges.iter()
).into_affine();
let h_term = multiexp(
Some(s_r).iter()
.chain_ext(Some(p2_r).iter()),
randomness.iter()
).into_affine();
E::final_exponentiation(&E::miller_loop(&[
(&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
(&h_alpha_term.into_affine().prepare(), &h_alpha_precomp),
(&h_term.prepare(), &h_prep),
])).unwrap() == E::Fqk::one()
}
pub fn verify(
s_commitments: &Vec<E::G1Affine>,
proof: &PermutationArgumentProof<E>,
z: E::Fr,
srs: &SRS<E>
) -> bool {
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let mut minus_z = z;
minus_z.negate();
let opening_z = proof.s_opening.mul(minus_z.into_repr());
let mut h_alpha_term = opening_z;
let g_s = g.mul(proof.s_zy.into_repr());
h_alpha_term.add_assign(&g_s);
let h_alpha_x_term = proof.s_opening;
let mut s = E::G1::zero();
for p in s_commitments {
s.add_assign_mixed(&p);
}
let h_term = s.into_affine();
E::final_exponentiation(&E::miller_loop(&[
(&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
(&h_alpha_term.into_affine().prepare(), &h_alpha_precomp),
(&h_term.prepare(), &h_prep),
])).unwrap() == E::Fqk::one()
}
pub fn make_signature(
coefficients: Vec<Vec<E::Fr>>,
permutations: Vec<Vec<usize>>,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>,
) -> SignatureOfCorrectComputation<E> {
let mut argument = PermutationArgument::new(coefficients, permutations);
let commitments = argument.commit(y, &srs);
let mut transcript = Transcript::new(&[]);
let mut s_commitments = vec![];
let mut s_prime_commitments = vec![];
let mut challenges = vec![];
let num_commitments = commitments.len();
for (s, s_prime) in commitments.into_iter() {
transcript.commit_point(&s);
transcript.commit_point(&s_prime);
s_commitments.push(s);
s_prime_commitments.push(s_prime);
}
// get challenges for a full batch
for _ in 0..num_commitments {
let c: E::Fr = transcript.get_challenge_scalar();
challenges.push(c);
}
let z_prime = transcript.get_challenge_scalar();
let s_prime_commitments_opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs);
let (proof, grand_product_signature) = {
let (proof, grand_product_signature) = argument.make_argument_with_transcript(
&mut transcript,
y,
z,
&srs
);
(proof, grand_product_signature)
};
SignatureOfCorrectComputation {
s_commitments,
s_prime_commitments,
perm_argument_proof: proof,
perm_proof: s_prime_commitments_opening,
grand_product_signature
}
}
// Argument a permutation argument. Current implementation consumes, cause extra arguments are required
pub fn make_argument_with_transcript(self,
transcript: &mut Transcript,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>
) -> (PermutationArgumentProof<E>, GrandProductSignature<E>) {
// create random beta and gamma for every single permutation argument
let mut betas = vec![];
let mut gammas = vec![];
for _ in 0..self.permutations.len() {
let beta: E::Fr = transcript.get_challenge_scalar();
let gamma: E::Fr = transcript.get_challenge_scalar();
betas.push(beta);
gammas.push(gamma);
}
// Sj(P4j)β(P1j)γ is equal to the product of the coefficients of Sj′(P3j)β(P1j)γ
// also open s = \sum self.permuted_coefficients(X, y) at z
let n = self.n;
let j = self.non_permuted_coefficients.len();
let mut s_polynomial: Option<Vec<E::Fr>> = None;
for c in self.inverse_permuted_at_y_coefficients.iter()
{
if s_polynomial.is_some() {
if let Some(poly) = s_polynomial.as_mut() {
add_polynomials(&mut poly[..], & c[..]);
}
} else {
s_polynomial = Some(c.clone());
}
}
let s_polynomial = s_polynomial.unwrap();
// evaluate at z
let s_zy = evaluate_at_consequitive_powers(& s_polynomial[..], z, z);
let mut s_zy_neg = s_zy;
s_zy_neg.negate();
let s_zy_opening = polynomial_commitment_opening(
0,
n,
Some(s_zy_neg).iter().chain_ext(s_polynomial.iter()),
z,
&srs
);
// Sj(P4j)^β (P1j)^γ is equal to the product of the coefficients of Sj′(P3j)^β (P1j)^γ
let p_1_values = vec![E::Fr::one(); n];
let p_3_values: Vec<E::Fr> = (1..=n).map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
let mut grand_products = vec![];
for ((((non_permuted, inv_permuted), permutation), beta), gamma) in
self.non_permuted_at_y_coefficients.into_iter()
.zip(self.inverse_permuted_at_y_coefficients.into_iter())
.zip(self.permutations.into_iter())
.zip(betas.into_iter())
.zip(gammas.into_iter())
{
// in S combination at the place i there should be term coeff[sigma(i)] * Y^sigma(i), that we can take
// from non-permuted by inverse_permuting it
// let mut s_combination = permute_inverse(&non_permuted[..], &permutation);
let mut s_combination = inv_permuted;
{
let p_4_values: Vec<E::Fr> = permutation.into_iter().map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
mul_add_polynomials(&mut s_combination[..], & p_4_values[..], beta);
mul_add_polynomials(&mut s_combination[..], & p_1_values[..], gamma);
}
// combination of coeff[i]*Y^i + beta * i + gamma
let mut s_prime_combination = non_permuted.clone();
{
mul_add_polynomials(&mut s_prime_combination[..], & p_3_values[..], beta);
mul_add_polynomials(&mut s_prime_combination[..], & p_1_values[..], gamma);
}
// Sanity check
let s_prime_product = s_prime_combination.iter().fold(E::Fr::one(), |mut sum, x|
{
sum.mul_assign(&x);
sum
});
let s_product = s_combination.iter().fold(E::Fr::one(), |mut sum, x|
{
sum.mul_assign(&x);
sum
});
assert_eq!(s_product, s_prime_product, "product of coefficients must be the same");
assert!(!s_product.is_zero(), "grand products must not be zero");
grand_products.push((s_combination, s_prime_combination));
}
let grand_product_signature = GrandProductArgument::create_signature(
transcript,
grand_products,
y,
z,
&srs
);
let proof = PermutationArgumentProof {
j: j,
s_opening: s_zy_opening,
s_zy: s_zy
};
(proof, grand_product_signature)
}
}
#[test]
fn test_permutation_argument() {
use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
// let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let srs = SRS::<Bls12>::new(128, srs_x, srs_alpha);
let n: usize = 1 << 4;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
coeffs[2] = Fr::zero(); // edge case
let mut permutation = (1..=n).collect::<Vec<_>>();
rng.shuffle(&mut permutation);
let coeffs = vec![coeffs];
let permutations = vec![permutation];
let specialized_srs = PermutationArgument::make_specialized_srs(&coeffs, &permutations, &srs);
let mut argument = PermutationArgument::new(coeffs, permutations);
let y : Fr = rng.gen();
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let commitments = argument.commit(y, &srs);
let mut s_commitments = vec![];
let mut s_prime_commitments = vec![];
for (s, s_prime) in commitments.into_iter() {
s_commitments.push(s);
s_prime_commitments.push(s_prime);
}
let z_prime : Fr = rng.gen();
let opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs);
let randomness = (0..2).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let valid = PermutationArgument::verify_s_prime_commitment(n,
&randomness,
&challenges,
&s_prime_commitments,
&opening,
y,
z_prime,
&specialized_srs,
&srs);
assert!(valid, "s' commitment must be valid");
let beta : Fr = rng.gen();
let gamma : Fr = rng.gen();
let grand_product_challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let wellformed_challenges = (0..2).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let z : Fr = rng.gen();
let proof = argument.make_argument(
beta,
gamma,
& grand_product_challenges,
& wellformed_challenges,
y,
z,
&specialized_srs, &srs);
let valid = PermutationArgument::verify(&s_commitments, &proof, z, &srs);
assert!(valid, "permutation argument must be valid");
}
<file_sep>/src/sonic/helped/batch.rs
//! Our protocol allows the verification of multiple proofs and even
//! of individual proofs to batch the pairing operations such that
//! only a smaller, fixed number of pairings must occur for an entire
//! batch of proofs. This is possible because G2 elements are fixed
//! in our protocol and never appear in proofs; everything can be
//! combined probabilistically.
//!
//! This submodule contains the `Batch` abstraction for creating a
//! context for batch verification.
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::SynthesisError;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit};
use super::parameters::VerifyingKey;
use crate::sonic::srs::SRS;
use crate::sonic::util::multiexp;
use std::marker::PhantomData;
// One of the primary functions of the `Batch` abstraction is handling
// Kate commitment openings:
//
// e(P', [\alpha(x - z)] H) = e(P, H) e([-v] G, [\alpha] H)
// ==> e(P', [\alpha x] H) e([-z] P', [\alpha] H) = e(P, H) e([-v] G, [\alpha] H)
//
// Many of these can be opened simultaneously by sampling random `r` and
// accumulating...
//
// e([r] P', [\alpha x] H)
// e([-rz] P', [\alpha] H)
// e([r] P, -H)
// e([rv] G, [\alpha] H)
//
// ... and checking that the result is the identity in the target group.
pub struct Batch<E: Engine> {
alpha_x: Vec<(E::G1Affine, E::Fr)>,
alpha_x_precomp: <E::G2Affine as CurveAffine>::Prepared,
alpha: Vec<(E::G1Affine, E::Fr)>,
alpha_precomp: <E::G2Affine as CurveAffine>::Prepared,
neg_h: Vec<(E::G1Affine, E::Fr)>,
neg_h_precomp: <E::G2Affine as CurveAffine>::Prepared,
neg_x_n_minus_d: Vec<(E::G1Affine, E::Fr)>,
neg_x_n_minus_d_precomp: <E::G2Affine as CurveAffine>::Prepared,
// The value paired with [\alpha] H, accumulated in the field
// to save group operations.
value: E::Fr,
g: E::G1Affine,
}
impl<E: Engine> Batch<E> {
pub fn new(srs: &SRS<E>, n: usize) -> Self {
Batch {
alpha_x: vec![],
alpha_x_precomp: srs.h_positive_x_alpha[1].prepare(),
alpha: vec![],
alpha_precomp: srs.h_positive_x_alpha[0].prepare(),
neg_h: vec![],
neg_h_precomp: {
let mut tmp = srs.h_negative_x[0];
tmp.negate();
tmp.prepare()
},
neg_x_n_minus_d: vec![],
neg_x_n_minus_d_precomp: {
let mut tmp = srs.h_negative_x[srs.d - n];
tmp.negate();
tmp.prepare()
},
value: E::Fr::zero(),
g: srs.g_positive_x[0],
}
}
pub fn new_from_key(vk: &VerifyingKey<E>) -> Self {
Batch {
alpha_x: vec![],
alpha_x_precomp: vk.alpha_x.prepare(),
alpha: vec![],
alpha_precomp: vk.alpha.prepare(),
neg_h: vec![],
neg_h_precomp: vk.neg_h.prepare(),
neg_x_n_minus_d: vec![],
neg_x_n_minus_d_precomp: vk.neg_x_n_minus_d.prepare(),
value: E::Fr::zero(),
g: E::G1Affine::one(),
}
}
/// add `(r*P) to the h^(alpha*x) terms, add -(r*point)*P to h^(alpha) terms
pub fn add_opening(&mut self, p: E::G1Affine, mut r: E::Fr, point: E::Fr) {
self.alpha_x.push((p, r));
r.mul_assign(&point);
r.negate();
self.alpha.push((p, r));
}
/// add (r*P) to -h^(x) terms
pub fn add_commitment(&mut self, p: E::G1Affine, r: E::Fr) {
self.neg_h.push((p, r));
}
/// add (r*P) to -h^(d-n) terms
pub fn add_commitment_max_n(&mut self, p: E::G1Affine, r: E::Fr) {
self.neg_x_n_minus_d.push((p, r));
}
/// add (r*point) to g terms for later pairing with h^(alpha)
pub fn add_opening_value(&mut self, mut r: E::Fr, point: E::Fr) {
r.mul_assign(&point);
self.value.add_assign(&r);
}
pub fn check_all(mut self) -> bool {
self.alpha.push((self.g, self.value));
let alpha_x = multiexp(
self.alpha_x.iter().map(|x| &x.0),
self.alpha_x.iter().map(|x| &x.1),
).into_affine();
let alpha_x = alpha_x.prepare();
let alpha = multiexp(
self.alpha.iter().map(|x| &x.0),
self.alpha.iter().map(|x| &x.1),
).into_affine();
let alpha = alpha.prepare();
let neg_h = multiexp(
self.neg_h.iter().map(|x| &x.0),
self.neg_h.iter().map(|x| &x.1),
).into_affine();
let neg_h = neg_h.prepare();
let neg_x_n_minus_d = multiexp(
self.neg_x_n_minus_d.iter().map(|x| &x.0),
self.neg_x_n_minus_d.iter().map(|x| &x.1),
).into_affine();
let neg_x_n_minus_d = neg_x_n_minus_d.prepare();
E::final_exponentiation(&E::miller_loop(&[
(&alpha_x, &self.alpha_x_precomp),
(&alpha, &self.alpha_precomp),
(&neg_h, &self.neg_h_precomp),
(&neg_x_n_minus_d, &self.neg_x_n_minus_d_precomp),
])).unwrap() == E::Fqk::one()
}
}<file_sep>/src/plonk/commitments/transparent/fri/coset_combining_fri/mod.rs
pub mod fri;
// pub mod query_producer;
// pub mod verifier;
pub mod precomputation;
use crate::SynthesisError;
use crate::worker::Worker;
use crate::ff::PrimeField;
use crate::plonk::commitments::transparent::iop_compiler::*;
use crate::plonk::polynomials::*;
use crate::plonk::commitments::transcript::Prng;
pub trait FriProofPrototype<F: PrimeField, I: IopInstance<F>> {
fn get_roots(&self) -> Vec<I::Commitment>;
fn get_final_root(&self) -> I::Commitment;
fn get_final_coefficients(&self) -> Vec<F>;
}
pub trait FriProof<F: PrimeField, I: IopInstance<F>> {
fn get_final_coefficients(&self) -> &[F];
fn get_queries(&self) -> &Vec<Vec<I::Query>>;
}
pub trait FriPrecomputations<F: PrimeField> {
fn new_for_domain_size(size: usize) -> Self;
fn omegas_inv_bitreversed(&self) -> &[F];
fn domain_size(&self) -> usize;
}
pub trait FriIop<F: PrimeField> {
const DEGREE: usize;
type IopType: IopInstance<F>;
type ProofPrototype: FriProofPrototype<F, Self::IopType>;
type Proof: FriProof<F, Self::IopType>;
type Params: Clone + std::fmt::Debug;
fn proof_from_lde<P: Prng<F, Input = <Self::IopType as IopInstance<F>>::Commitment>,
C: FriPrecomputations<F>
>(
lde_values: &Polynomial<F, Values>,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
precomputations: &C,
worker: &Worker,
prng: &mut P,
params: &Self::Params
) -> Result<Self::ProofPrototype, SynthesisError>;
fn prototype_into_proof(
prototype: Self::ProofPrototype,
iop_values: &Polynomial<F, Values>,
natural_first_element_indexes: Vec<usize>,
params: &Self::Params
) -> Result<Self::Proof, SynthesisError>;
fn get_fri_challenges<P: Prng<F, Input = <Self::IopType as IopInstance<F>>::Commitment>>(
proof: &Self::Proof,
prng: &mut P,
params: &Self::Params
) -> Vec<F>;
fn verify_proof_with_challenges(
proof: &Self::Proof,
natural_element_indexes: Vec<usize>,
expected_value: &[F],
fri_challenges: &[F],
params: &Self::Params
) -> Result<bool, SynthesisError>;
}<file_sep>/src/plonk/better_better_cs/gates/mod.rs
use super::*;
use super::cs::*;
use crate::smallvec::SmallVec;
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::bit_vec::BitVec;
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::worker::Worker;
use crate::plonk::domains::*;
use crate::plonk::polynomials::*;
use crate::plonk::cs::variable::*;
use crate::plonk::better_cs::utils::*;
use crate::plonk::fft::cooley_tukey_ntt::*;
pub mod selector_optimized_with_d_next;
pub mod main_gate_with_d_next;<file_sep>/src/sonic/sonic/backends.rs
use crate::pairing::{Engine};
use crate::sonic::cs::Backend;
use std::marker::PhantomData;
use crate::SynthesisError;
use crate::sonic::cs::SynthesisDriver;
use crate::sonic::cs::{Circuit, ConstraintSystem, Variable, LinearCombination};
use crate::pairing::ff::Field;
pub struct Preprocess<E: Engine> {
pub k_map: Vec<usize>,
pub n: usize,
pub q: usize,
_marker: PhantomData<E>
}
impl<'a, E: Engine> Backend<E> for &'a mut Preprocess<E> {
type LinearConstraintIndex = ();
fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () }
fn new_k_power(&mut self, index: usize) {
self.k_map.push(index);
}
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
fn new_linear_constraint(&mut self) {
self.q += 1;
()
}
}
impl<E: Engine> Preprocess<E> {
pub fn new() -> Self {
Preprocess {
k_map: vec![],
n: 0,
q: 0,
_marker: PhantomData
}
}
}
pub struct Wires<E: Engine> {
pub a: Vec<E::Fr>,
pub b: Vec<E::Fr>,
pub c: Vec<E::Fr>
}
impl<'a, E: Engine> Backend<E> for &'a mut Wires<E> {
type LinearConstraintIndex = ();
fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex { () }
fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () }
fn new_multiplication_gate(&mut self) {
self.a.push(E::Fr::zero());
self.b.push(E::Fr::zero());
self.c.push(E::Fr::zero());
}
fn get_var(&self, variable: Variable) -> Option<E::Fr> {
Some(match variable {
Variable::A(index) => {
self.a[index - 1]
},
Variable::B(index) => {
self.b[index - 1]
},
Variable::C(index) => {
self.c[index - 1]
}
})
}
fn set_var<F>(&mut self, variable: Variable, value: F) -> Result<(), SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
match variable {
Variable::A(index) => {
self.a[index - 1] = value;
},
Variable::B(index) => {
self.b[index - 1] = value;
},
Variable::C(index) => {
self.c[index - 1] = value;
}
}
Ok(())
}
}
impl<E: Engine> Wires<E> {
pub fn new() -> Self {
Wires {
a: vec![],
b: vec![],
c: vec![],
}
}
}
pub struct CountNandQ<S: SynthesisDriver> {
pub n: usize,
pub q: usize,
_marker: std::marker::PhantomData<S>
}
impl<'a, E: Engine, S: SynthesisDriver> Backend<E> for &'a mut CountNandQ<S> {
type LinearConstraintIndex = ();
fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () }
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex {
self.q += 1;
()
}
}
impl<S: SynthesisDriver> CountNandQ<S> {
pub fn new() -> Self {
Self {
n: 0,
q: 0,
_marker: std::marker::PhantomData
}
}
}
pub struct CountN<S: SynthesisDriver> {
pub n: usize,
_marker: std::marker::PhantomData<S>
}
impl<'a, E: Engine, S: SynthesisDriver> Backend<E> for &'a mut CountN<S> {
type LinearConstraintIndex = ();
fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex { () }
fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () }
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
}
impl<S: SynthesisDriver> CountN<S> {
pub fn new() -> Self {
Self {
n: 0,
_marker: std::marker::PhantomData
}
}
}
<file_sep>/src/plonk/utils.rs
use crate::worker::*;
use crate::pairing::ff::PrimeField;
pub(crate) fn convert_to_field_elements<F: PrimeField>(indexes: &[usize], worker: &Worker) -> Vec<F> {
let mut result = vec![F::zero(); indexes.len()];
worker.scope(indexes.len(), |scope, chunk| {
for (idx, fe) in indexes.chunks(chunk)
.zip(result.chunks_mut(chunk)) {
scope.spawn(move |_| {
let mut repr = F::zero().into_repr();
for (idx, fe) in idx.iter().zip(fe.iter_mut()) {
repr.as_mut()[0] = *idx as u64;
*fe = F::from_repr(repr).expect("is a valid representation");
}
});
}
});
result
}
<file_sep>/src/plonk/better_better_cs/proof/mod.rs
use super::cs::*;
use super::data_structures::{self, *};
use crate::pairing::ff::*;
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::plonk::polynomials::*;
use std::collections::HashMap;
use crate::plonk::domains::*;
use crate::worker::Worker;
use crate::SynthesisError;
use crate::kate_commitment::*;
use super::super::better_cs::utils::*;
use super::setup::*;
use super::utils::*;
use crate::plonk::fft::cooley_tukey_ntt::*;
use crate::byteorder::BigEndian;
use crate::byteorder::ReadBytesExt;
use crate::byteorder::WriteBytesExt;
use std::io::{Read, Write};
use crate::plonk::better_cs::keys::*;
pub fn write_tuple_with_one_index<F: PrimeField, W: Write>(
tuple: &(usize, F),
mut writer: W
) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(tuple.0 as u64)?;
write_fr(&tuple.1, &mut writer)?;
Ok(())
}
pub fn write_tuple_with_one_index_vec<F: PrimeField, W: Write>(p: &[(usize, F)], mut writer: W) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(p.len() as u64)?;
for p in p.iter() {
write_tuple_with_one_index(p, &mut writer)?;
}
Ok(())
}
pub fn read_tuple_with_one_index<F: PrimeField, R: Read>(mut reader: R) -> std::io::Result<(usize, F)> {
let index = reader.read_u64::<BigEndian>()?;
let el = read_fr(&mut reader)?;
Ok((index as usize, el))
}
pub fn read_tuple_with_one_index_vec<F: PrimeField, R: Read>(mut reader: R) -> std::io::Result<Vec<(usize, F)>> {
let num_elements = reader.read_u64::<BigEndian>()?;
let mut elements = vec![];
for _ in 0..num_elements {
let el = read_tuple_with_one_index(&mut reader)?;
elements.push(el);
}
Ok(elements)
}
pub fn write_tuple_with_two_indexes<F: PrimeField, W: Write>(
tuple: &(usize, usize, F),
mut writer: W
) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(tuple.0 as u64)?;
writer.write_u64::<BigEndian>(tuple.1 as u64)?;
write_fr(&tuple.2, &mut writer)?;
Ok(())
}
pub fn write_tuple_with_two_indexes_vec<F: PrimeField, W: Write>(p: &[(usize, usize, F)], mut writer: W) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(p.len() as u64)?;
for p in p.iter() {
write_tuple_with_two_indexes(p, &mut writer)?;
}
Ok(())
}
pub fn read_tuple_with_two_indexes<F: PrimeField, R: Read>(mut reader: R) -> std::io::Result<(usize, usize, F)> {
let index_0 = reader.read_u64::<BigEndian>()?;
let index_1 = reader.read_u64::<BigEndian>()?;
let el = read_fr(&mut reader)?;
Ok((index_0 as usize, index_1 as usize, el))
}
pub fn read_tuple_with_two_indexes_vec<F: PrimeField, R: Read>(mut reader: R) -> std::io::Result<Vec<(usize, usize, F)>> {
let num_elements = reader.read_u64::<BigEndian>()?;
let mut elements = vec![];
for _ in 0..num_elements {
let el = read_tuple_with_two_indexes(&mut reader)?;
elements.push(el);
}
Ok(elements)
}
#[derive(Clone, PartialEq, Debug, Eq, serde::Serialize, serde::Deserialize)]
pub struct Proof<E: Engine, C: Circuit<E>> {
pub n: usize,
pub inputs: Vec<E::Fr>,
pub state_polys_commitments: Vec<E::G1Affine>,
pub witness_polys_commitments: Vec<E::G1Affine>,
pub copy_permutation_grand_product_commitment: E::G1Affine,
pub lookup_s_poly_commitment: Option<E::G1Affine>,
pub lookup_grand_product_commitment: Option<E::G1Affine>,
pub quotient_poly_parts_commitments: Vec<E::G1Affine>,
pub state_polys_openings_at_z: Vec<E::Fr>,
pub state_polys_openings_at_dilations: Vec<(usize, usize, E::Fr)>,
pub witness_polys_openings_at_z: Vec<E::Fr>,
pub witness_polys_openings_at_dilations: Vec<(usize, usize, E::Fr)>,
pub gate_setup_openings_at_z: Vec<(usize, usize, E::Fr)>,
pub gate_selectors_openings_at_z: Vec<(usize, E::Fr)>,
pub copy_permutation_polys_openings_at_z: Vec<E::Fr>,
pub copy_permutation_grand_product_opening_at_z_omega: E::Fr,
pub lookup_s_poly_opening_at_z_omega: Option<E::Fr>,
pub lookup_grand_product_opening_at_z_omega: Option<E::Fr>,
pub lookup_t_poly_opening_at_z: Option<E::Fr>,
pub lookup_t_poly_opening_at_z_omega: Option<E::Fr>,
pub lookup_selector_poly_opening_at_z: Option<E::Fr>,
pub lookup_table_type_poly_opening_at_z: Option<E::Fr>,
pub quotient_poly_opening_at_z: E::Fr,
pub linearization_poly_opening_at_z: E::Fr,
pub opening_proof_at_z: E::G1Affine,
pub opening_proof_at_z_omega: E::G1Affine,
#[serde(skip_serializing, default)]
#[serde(bound(serialize = ""))]
#[serde(bound(deserialize = ""))]
_marker: std::marker::PhantomData<C>
}
impl<E: Engine, C: Circuit<E>> Proof<E, C> {
pub fn empty() -> Self {
Self {
n: 0,
inputs: vec![],
state_polys_commitments: vec![],
witness_polys_commitments: vec![],
copy_permutation_grand_product_commitment: E::G1Affine::zero(),
lookup_s_poly_commitment: None,
lookup_grand_product_commitment: None,
quotient_poly_parts_commitments: vec![],
state_polys_openings_at_z: vec![],
state_polys_openings_at_dilations: vec![],
witness_polys_openings_at_z: vec![],
witness_polys_openings_at_dilations: vec![],
gate_setup_openings_at_z: vec![],
gate_selectors_openings_at_z: vec![],
copy_permutation_polys_openings_at_z: vec![],
copy_permutation_grand_product_opening_at_z_omega: E::Fr::zero(),
lookup_s_poly_opening_at_z_omega: None,
lookup_grand_product_opening_at_z_omega: None,
lookup_t_poly_opening_at_z: None,
lookup_t_poly_opening_at_z_omega: None,
lookup_selector_poly_opening_at_z: None,
lookup_table_type_poly_opening_at_z: None,
quotient_poly_opening_at_z: E::Fr::zero(),
linearization_poly_opening_at_z: E::Fr::zero(),
opening_proof_at_z: E::G1Affine::zero(),
opening_proof_at_z_omega: E::G1Affine::zero(),
_marker: std::marker::PhantomData
}
}
pub fn write<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(self.n as u64)?;
write_fr_vec(&self.inputs, &mut writer)?;
write_curve_affine_vec(&self.state_polys_commitments, &mut writer)?;
write_curve_affine_vec(&self.witness_polys_commitments, &mut writer)?;
write_curve_affine(&self.copy_permutation_grand_product_commitment, &mut writer)?;
write_optional_curve_affine(&self.lookup_s_poly_commitment, &mut writer)?;
write_optional_curve_affine(&self.lookup_grand_product_commitment, &mut writer)?;
write_curve_affine_vec(&self.quotient_poly_parts_commitments, &mut writer)?;
write_fr_vec(&self.state_polys_openings_at_z, &mut writer)?;
write_tuple_with_two_indexes_vec(&self.state_polys_openings_at_dilations, &mut writer)?;
write_fr_vec(&self.witness_polys_openings_at_z, &mut writer)?;
write_tuple_with_two_indexes_vec(&self.witness_polys_openings_at_dilations, &mut writer)?;
write_tuple_with_two_indexes_vec(&self.gate_setup_openings_at_z, &mut writer)?;
write_tuple_with_one_index_vec(&self.gate_selectors_openings_at_z, &mut writer)?;
write_fr_vec(&self.copy_permutation_polys_openings_at_z, &mut writer)?;
write_fr(&self.copy_permutation_grand_product_opening_at_z_omega, &mut writer)?;
write_optional_fr(&self.lookup_s_poly_opening_at_z_omega, &mut writer)?;
write_optional_fr(&self.lookup_grand_product_opening_at_z_omega, &mut writer)?;
write_optional_fr(&self.lookup_t_poly_opening_at_z, &mut writer)?;
write_optional_fr(&self.lookup_t_poly_opening_at_z_omega, &mut writer)?;
write_optional_fr(&self.lookup_selector_poly_opening_at_z, &mut writer)?;
write_optional_fr(&self.lookup_table_type_poly_opening_at_z, &mut writer)?;
write_fr(&self.quotient_poly_opening_at_z, &mut writer)?;
write_fr(&self.linearization_poly_opening_at_z, &mut writer)?;
write_curve_affine(&self.opening_proof_at_z, &mut writer)?;
write_curve_affine(&self.opening_proof_at_z_omega, &mut writer)?;
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> std::io::Result<Self> {
let new = Self {
n: reader.read_u64::<BigEndian>()? as usize,
inputs: read_fr_vec(&mut reader)?,
state_polys_commitments: read_curve_affine_vector(&mut reader)?,
witness_polys_commitments: read_curve_affine_vector(&mut reader)?,
copy_permutation_grand_product_commitment: read_curve_affine(&mut reader)?,
lookup_s_poly_commitment: read_optional_curve_affine(&mut reader)?,
lookup_grand_product_commitment: read_optional_curve_affine(&mut reader)?,
quotient_poly_parts_commitments: read_curve_affine_vector(&mut reader)?,
state_polys_openings_at_z: read_fr_vec(&mut reader)?,
state_polys_openings_at_dilations: read_tuple_with_two_indexes_vec(&mut reader)?,
witness_polys_openings_at_z: read_fr_vec(&mut reader)?,
witness_polys_openings_at_dilations: read_tuple_with_two_indexes_vec(&mut reader)?,
gate_setup_openings_at_z: read_tuple_with_two_indexes_vec(&mut reader)?,
gate_selectors_openings_at_z: read_tuple_with_one_index_vec(&mut reader)?,
copy_permutation_polys_openings_at_z: read_fr_vec(&mut reader)?,
copy_permutation_grand_product_opening_at_z_omega: read_fr(&mut reader)?,
lookup_s_poly_opening_at_z_omega: read_optional_fr(&mut reader)?,
lookup_grand_product_opening_at_z_omega: read_optional_fr(&mut reader)?,
lookup_t_poly_opening_at_z: read_optional_fr(&mut reader)?,
lookup_t_poly_opening_at_z_omega: read_optional_fr(&mut reader)?,
lookup_selector_poly_opening_at_z: read_optional_fr(&mut reader)?,
lookup_table_type_poly_opening_at_z: read_optional_fr(&mut reader)?,
quotient_poly_opening_at_z: read_fr(&mut reader)?,
linearization_poly_opening_at_z: read_fr(&mut reader)?,
opening_proof_at_z: read_curve_affine(&mut reader)?,
opening_proof_at_z_omega: read_curve_affine(&mut reader)?,
_marker: std::marker::PhantomData
};
Ok(new)
}
}
use super::cs::*;
use crate::plonk::commitments::transcript::*;
impl<E: Engine, P: PlonkConstraintSystemParams<E>, MG: MainGate<E>, S: SynthesisMode> Assembly<E, P, MG, S> {
pub fn create_proof<C: Circuit<E>, T: Transcript<E::Fr>>(
self,
worker: &Worker,
setup: &Setup<E, C>,
mon_crs: &Crs<E, CrsForMonomialForm>,
transcript_params: Option<T::InitializationParameters>,
) -> Result<Proof<E, C>, SynthesisError> {
self.create_proof_by_ref::<_, T>(worker, setup, mon_crs, transcript_params)
}
pub fn create_proof_by_ref<C: Circuit<E>, T: Transcript<E::Fr>>(
&self,
worker: &Worker,
setup: &Setup<E, C>,
mon_crs: &Crs<E, CrsForMonomialForm>,
transcript_params: Option<T::InitializationParameters>,
) -> Result<Proof<E, C>, SynthesisError> {
assert!(S::PRODUCE_WITNESS);
assert!(self.is_finalized);
let mut transcript = if let Some(params) = transcript_params {
T::new_from_params(params)
} else {
T::new()
};
let mut proof = Proof::<E, C>::empty();
let input_values = self.input_assingments.clone();
proof.n = self.n();
proof.inputs = input_values.clone();
for inp in input_values.iter() {
transcript.commit_field_element(inp);
}
let num_state_polys = <Self as ConstraintSystem<E>>::Params::STATE_WIDTH;
let num_witness_polys = <Self as ConstraintSystem<E>>::Params::WITNESS_WIDTH;
let mut values_storage = self.make_assembled_poly_storage(worker, true)?;
let required_domain_size = self.n() + 1;
assert!(required_domain_size.is_power_of_two());
let omegas_bitreversed = BitReversedOmegas::<E::Fr>::new_for_domain_size(required_domain_size);
let omegas_inv_bitreversed = <OmegasInvBitreversed::<E::Fr> as CTPrecomputations::<E::Fr>>::new_for_domain_size(required_domain_size);
// if we simultaneously produce setup then grab permutation polys in values forms
if S::PRODUCE_SETUP {
let permutation_polys = self.make_permutations(&worker)?;
assert_eq!(permutation_polys.len(), num_state_polys);
for (idx, poly) in permutation_polys.into_iter().enumerate() {
let key = PolyIdentifier::PermutationPolynomial(idx);
let poly = PolynomialProxy::from_owned(poly);
values_storage.setup_map.insert(key, poly);
}
} else {
// compute from setup
for idx in 0..num_state_polys {
let key = PolyIdentifier::PermutationPolynomial(idx);
// let vals = setup.permutation_monomials[idx].clone().fft(&worker).into_coeffs();
let vals = setup.permutation_monomials[idx].clone().fft_using_bitreversed_ntt(
&worker,
&omegas_bitreversed,
&E::Fr::one()
)?.into_coeffs();
let poly = Polynomial::from_values_unpadded(vals)?;
let poly = PolynomialProxy::from_owned(poly);
values_storage.setup_map.insert(key, poly);
}
}
let mut ldes_storage = AssembledPolynomialStorage::<E>::new(
true,
self.max_constraint_degree.next_power_of_two()
);
let mut monomials_storage = Self::create_monomial_storage(
&worker,
&omegas_inv_bitreversed,
&values_storage,
true
)?;
monomials_storage.extend_from_setup(setup)?;
// step 1 - commit state and witness, enumerated. Also commit sorted polynomials for table arguments
for i in 0..num_state_polys {
let key = PolyIdentifier::VariablesPolynomial(i);
let poly_ref = monomials_storage.get_poly(key);
let commitment = commit_using_monomials(
poly_ref,
mon_crs,
&worker
)?;
commit_point_as_xy::<E, T>(
&mut transcript,
&commitment
);
proof.state_polys_commitments.push(commitment);
}
for i in 0..num_witness_polys {
let key = PolyIdentifier::VariablesPolynomial(i);
let poly_ref = monomials_storage.get_poly(key);
let commitment = commit_using_monomials(
poly_ref,
mon_crs,
&worker
)?;
commit_point_as_xy::<E, T>(
&mut transcript,
&commitment
);
proof.witness_polys_commitments.push(commitment);
}
let mut lookup_events = HashMap::<[E::Fr; 4], usize>::new();
// step 1.5 - if there are lookup tables then draw random "eta" to linearlize over tables
let mut lookup_data: Option<data_structures::LookupDataHolder<E>> = if self.tables.len() > 0 {
let eta = transcript.get_challenge();
// these are selected rows from witness (where lookup applies)
let (selector_poly, table_type_mononial, table_type_values) = if S::PRODUCE_SETUP {
let selector_for_lookup_values = self.calculate_lookup_selector_values()?;
assert!((selector_for_lookup_values.len() + 1).is_power_of_two());
let table_type_values = self.calculate_table_type_values()?;
assert_eq!(selector_for_lookup_values.len(), table_type_values.len());
let table_type_poly_monomial = {
let mon = Polynomial::from_values(table_type_values.clone())?;
let mon = mon.ifft_using_bitreversed_ntt(
&worker,
&omegas_inv_bitreversed,
&E::Fr::one()
)?;
mon
};
let selector_poly = Polynomial::<E::Fr, Values>::from_values(selector_for_lookup_values)?.ifft_using_bitreversed_ntt(
&worker,
&omegas_inv_bitreversed,
&E::Fr::one()
)?;
let selector_poly = PolynomialProxy::from_owned(selector_poly);
let table_type_poly = PolynomialProxy::from_owned(table_type_poly_monomial);
(selector_poly, table_type_poly, table_type_values)
} else {
let selector_poly_ref = setup.lookup_selector_monomial.as_ref().expect("setup must contain lookup selector poly");
let selector_poly = PolynomialProxy::from_borrowed(selector_poly_ref);
let table_type_poly_ref = setup.lookup_table_type_monomial.as_ref().expect("setup must contain lookup table type poly");
let table_type_poly = PolynomialProxy::from_borrowed(table_type_poly_ref);
// let mut table_type_values = table_type_poly_ref.clone().fft(&worker).into_coeffs();
let mut table_type_values = table_type_poly_ref.clone().fft_using_bitreversed_ntt(
&worker,
&omegas_bitreversed,
&E::Fr::one()
)?.into_coeffs();
table_type_values.pop().unwrap();
(selector_poly, table_type_poly, table_type_values)
};
assert!((table_type_values.len() + 1).is_power_of_two());
let witness_len = required_domain_size - 1;
assert!((witness_len + 1).is_power_of_two());
assert_eq!(table_type_values.len(), witness_len);
let f_poly_values_aggregated = {
let mut table_contributions_values = if S::PRODUCE_SETUP && S::PRODUCE_WITNESS {
let masked_entries_using_bookkept_bitmasks = self.calculate_masked_lookup_entries(&values_storage)?;
let typical_len = masked_entries_using_bookkept_bitmasks[0].len();
assert!((typical_len+1).is_power_of_two());
masked_entries_using_bookkept_bitmasks
} else {
assert!(S::PRODUCE_WITNESS);
// let selector_values = PolynomialProxy::from_owned(selector_poly.as_ref().clone().fft(&worker));
let selector_values = selector_poly.as_ref().clone().fft_using_bitreversed_ntt(
&worker,
&omegas_bitreversed,
&E::Fr::one()
)?;
let selector_values = PolynomialProxy::from_owned(selector_values);
self.calculate_masked_lookup_entries_using_selector(
&values_storage,
&selector_values
)?
};
assert_eq!(table_type_values.len(), table_contributions_values[0].len());
assert_eq!(table_contributions_values.len(), 3);
assert_eq!(witness_len, table_contributions_values[0].len());
let mut f_poly_values_aggregated = table_contributions_values.drain(0..1).collect::<Vec<_>>().pop().unwrap();
let mut current = eta;
for t in table_contributions_values.into_iter() {
let op = BinopAddAssignScaled::new(current);
binop_over_slices(&worker, &op, &mut f_poly_values_aggregated, &t);
current.mul_assign(&eta);
}
// add table type marker
let op = BinopAddAssignScaled::new(current);
binop_over_slices(&worker, &op, &mut f_poly_values_aggregated, &table_type_values);
Polynomial::from_values_unpadded(f_poly_values_aggregated)?
};
let (t_poly_values, t_poly_values_shifted, t_poly_monomial) = if S::PRODUCE_SETUP {
// these are unsorted rows of lookup tables
let mut t_poly_ends = self.calculate_t_polynomial_values_for_single_application_tables()?;
assert_eq!(t_poly_ends.len(), 4);
let mut t_poly_values_aggregated = t_poly_ends.drain(0..1).collect::<Vec<_>>().pop().unwrap();
let mut current = eta;
for t in t_poly_ends.into_iter() {
let op = BinopAddAssignScaled::new(current);
binop_over_slices(&worker, &op, &mut t_poly_values_aggregated, &t);
current.mul_assign(&eta);
}
let copy_start = witness_len - t_poly_values_aggregated.len();
let mut full_t_poly_values = vec![E::Fr::zero(); witness_len];
let mut full_t_poly_values_shifted = full_t_poly_values.clone();
full_t_poly_values[copy_start..].copy_from_slice(&t_poly_values_aggregated);
full_t_poly_values_shifted[(copy_start - 1)..(witness_len-1)].copy_from_slice(&t_poly_values_aggregated);
assert!(full_t_poly_values[0].is_zero());
let t_poly_monomial = {
let mon = Polynomial::from_values(full_t_poly_values.clone())?;
let mon = mon.ifft_using_bitreversed_ntt(
&worker,
&omegas_inv_bitreversed,
&E::Fr::one()
)?;
mon
};
(
PolynomialProxy::from_owned(Polynomial::from_values_unpadded(full_t_poly_values)?),
PolynomialProxy::from_owned(Polynomial::from_values_unpadded(full_t_poly_values_shifted)?),
PolynomialProxy::from_owned(t_poly_monomial)
)
} else {
let mut t_poly_values_monomial_aggregated = setup.lookup_tables_monomials[0].clone();
let mut current = eta;
for idx in 1..4 {
let to_aggregate_ref = &setup.lookup_tables_monomials[idx];
t_poly_values_monomial_aggregated.add_assign_scaled(
&worker,
to_aggregate_ref,
¤t
);
current.mul_assign(&eta);
}
assert!(t_poly_values_monomial_aggregated.size().is_power_of_two());
let mut t_poly_values = t_poly_values_monomial_aggregated.clone().fft_using_bitreversed_ntt(
&worker,
&omegas_bitreversed,
&E::Fr::one()
)?;
assert!(t_poly_values.as_ref().last().unwrap().is_zero());
assert!(t_poly_values.size().is_power_of_two());
// let mut t_values_shifted_coeffs = vec![E::Fr::zero(); t_poly_values.size()];
// // manually shift by 1
// t_values_shifted_coeffs[1..].copy_from_slice(&t_poly_values.as_ref()[0..(t_poly_values.size()-1)]);
// t_values_shifted_coeffs[0] = t_poly_values.as_ref()[(t_poly_values.size()-1)];
let mut t_values_shifted_coeffs = t_poly_values.clone().into_coeffs();
let _last = t_poly_values.pop_last()?;
assert!(_last.is_zero());
let _: Vec<_> = t_values_shifted_coeffs.drain(0..1).collect();
let t_poly_values_shifted = Polynomial::from_values_unpadded(t_values_shifted_coeffs)?;
assert_eq!(witness_len, t_poly_values.size());
assert_eq!(witness_len, t_poly_values_shifted.size());
(
PolynomialProxy::from_owned(t_poly_values),
PolynomialProxy::from_owned(t_poly_values_shifted),
PolynomialProxy::from_owned(t_poly_values_monomial_aggregated)
)
};
let (s_poly_monomial, s_poly_unpadded_values, s_shifted_unpadded_values) = {
let s_poly_values_aggregated = self.calculate_s_poly_contributions_from_witness(eta)?;
let sorted_copy_start = witness_len - s_poly_values_aggregated.len();
let mut full_s_poly_values = vec![E::Fr::zero(); witness_len];
let mut full_s_poly_values_shifted = full_s_poly_values.clone();
full_s_poly_values[sorted_copy_start..].copy_from_slice(&s_poly_values_aggregated);
full_s_poly_values_shifted[(sorted_copy_start - 1)..(witness_len-1)].copy_from_slice(&s_poly_values_aggregated);
assert!(full_s_poly_values[0].is_zero());
let s_poly_monomial = {
let mon = Polynomial::from_values(full_s_poly_values.clone())?;
let mon = mon.ifft_using_bitreversed_ntt(
&worker,
&omegas_inv_bitreversed,
&E::Fr::one()
)?;
mon
};
(
s_poly_monomial,
Polynomial::from_values_unpadded(full_s_poly_values)?,
Polynomial::from_values_unpadded(full_s_poly_values_shifted)?
)
};
let s_poly_commitment = commit_using_monomials(
&s_poly_monomial,
mon_crs,
&worker
)?;
commit_point_as_xy::<E, T>(
&mut transcript,
&s_poly_commitment
);
proof.lookup_s_poly_commitment = Some(s_poly_commitment);
let data = data_structures::LookupDataHolder::<E> {
eta,
f_poly_unpadded_values: Some(f_poly_values_aggregated),
t_poly_unpadded_values: Some(t_poly_values),
t_shifted_unpadded_values: Some(t_poly_values_shifted),
s_poly_unpadded_values: Some(s_poly_unpadded_values),
s_shifted_unpadded_values: Some(s_shifted_unpadded_values),
t_poly_monomial: Some(t_poly_monomial),
s_poly_monomial: Some(s_poly_monomial),
selector_poly_monomial: Some(selector_poly),
table_type_poly_monomial: Some(table_type_mononial),
};
Some(data)
} else {
None
};
if self.multitables.len() > 0 {
unimplemented!("do not support multitables yet")
}
// step 2 - grand product arguments
let beta_for_copy_permutation = transcript.get_challenge();
let gamma_for_copy_permutation = transcript.get_challenge();
// copy permutation grand product argument
let mut grand_products_protos_with_gamma = vec![];
for i in 0..num_state_polys {
let id = PolyIdentifier::VariablesPolynomial(i);
let mut p = values_storage.state_map.get(&id).unwrap().as_ref().clone();
p.add_constant(&worker, &gamma_for_copy_permutation);
grand_products_protos_with_gamma.push(p);
}
let required_domain_size = required_domain_size;
let domain = Domain::new_for_size(required_domain_size as u64)?;
let mut domain_elements = materialize_domain_elements_with_natural_enumeration(
&domain,
&worker
);
domain_elements.pop().expect("must pop last element for omega^i");
let non_residues = make_non_residues::<E::Fr>(num_state_polys - 1);
let mut domain_elements_poly_by_beta = Polynomial::from_values_unpadded(domain_elements)?;
domain_elements_poly_by_beta.scale(&worker, beta_for_copy_permutation);
// we take A, B, C, ... values and form (A + beta * X * non_residue + gamma), etc and calculate their grand product
let mut z_num = {
let mut grand_products_proto_it = grand_products_protos_with_gamma.iter().cloned();
let mut z_1 = grand_products_proto_it.next().unwrap();
z_1.add_assign(&worker, &domain_elements_poly_by_beta);
for (mut p, non_res) in grand_products_proto_it.zip(non_residues.iter()) {
p.add_assign_scaled(&worker, &domain_elements_poly_by_beta, non_res);
z_1.mul_assign(&worker, &p);
}
z_1
};
// we take A, B, C, ... values and form (A + beta * perm_a + gamma), etc and calculate their grand product
let mut permutation_polynomials_values_of_size_n_minus_one = vec![];
for idx in 0..num_state_polys {
let key = PolyIdentifier::PermutationPolynomial(idx);
let mut coeffs = values_storage.get_poly(key).clone().into_coeffs();
coeffs.pop().unwrap();
let p = Polynomial::from_values_unpadded(coeffs)?;
permutation_polynomials_values_of_size_n_minus_one.push(p);
}
let z_den = {
assert_eq!(
permutation_polynomials_values_of_size_n_minus_one.len(),
grand_products_protos_with_gamma.len()
);
let mut grand_products_proto_it = grand_products_protos_with_gamma.into_iter();
let mut permutation_polys_it = permutation_polynomials_values_of_size_n_minus_one.iter();
let mut z_2 = grand_products_proto_it.next().unwrap();
z_2.add_assign_scaled(&worker, permutation_polys_it.next().unwrap(), &beta_for_copy_permutation);
for (mut p, perm) in grand_products_proto_it
.zip(permutation_polys_it) {
// permutation polynomials
p.add_assign_scaled(&worker, &perm, &beta_for_copy_permutation);
z_2.mul_assign(&worker, &p);
}
z_2.batch_inversion(&worker)?;
z_2
};
z_num.mul_assign(&worker, &z_den);
drop(z_den);
let z = z_num.calculate_shifted_grand_product(&worker)?;
drop(z_num);
assert!(z.size().is_power_of_two());
assert!(z.as_ref()[0] == E::Fr::one());
let copy_permutation_z_in_monomial_form = z.ifft_using_bitreversed_ntt(
&worker,
&omegas_inv_bitreversed,
&E::Fr::one()
)?;
let copy_permutation_z_poly_commitment = commit_using_monomials(
©_permutation_z_in_monomial_form,
mon_crs,
&worker
)?;
commit_point_as_xy::<E, T>(
&mut transcript,
©_permutation_z_poly_commitment
);
proof.copy_permutation_grand_product_commitment = copy_permutation_z_poly_commitment;
let mut beta_for_lookup = None;
let mut gamma_for_lookup = None;
let lookup_z_poly_in_monomial_form = if let Some(data) = lookup_data.as_mut() {
let beta_for_lookup_permutation = transcript.get_challenge();
let gamma_for_lookup_permutation = transcript.get_challenge();
// let beta_for_lookup_permutation = E::Fr::from_str("789").unwrap();
// let gamma_for_lookup_permutation = E::Fr::from_str("1230").unwrap();
beta_for_lookup = Some(beta_for_lookup_permutation);
gamma_for_lookup = Some(gamma_for_lookup_permutation);
let mut beta_plus_one = beta_for_lookup_permutation;
beta_plus_one.add_assign(&E::Fr::one());
let mut gamma_beta = gamma_for_lookup_permutation;
gamma_beta.mul_assign(&beta_plus_one);
let expected = gamma_beta.pow([(required_domain_size-1) as u64]);
let f_poly_unpadded_values = data.f_poly_unpadded_values.take().unwrap();
let t_poly_unpadded_values = data.t_poly_unpadded_values.take().unwrap();
let t_shifted_unpadded_values = data.t_shifted_unpadded_values.take().unwrap();
let s_poly_unpadded_values = data.s_poly_unpadded_values.take().unwrap();
let s_shifted_unpadded_values = data.s_shifted_unpadded_values.take().unwrap();
// Z(x*omega) = Z(x) *
// (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) /
// (\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
let mut z_num = {
// (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega))
let mut t = t_poly_unpadded_values.as_ref().clone();
t.add_assign_scaled(&worker, t_shifted_unpadded_values.as_ref(), &beta_for_lookup_permutation);
t.add_constant(&worker, &gamma_beta);
let mut tmp = f_poly_unpadded_values.clone();
tmp.add_constant(&worker, &gamma_for_lookup_permutation);
tmp.scale(&worker, beta_plus_one);
t.mul_assign(&worker, &tmp);
drop(tmp);
t
};
let z_den = {
// (\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
let mut t = s_poly_unpadded_values.clone();
t.add_assign_scaled(&worker, &s_shifted_unpadded_values, &beta_for_lookup_permutation);
t.add_constant(&worker, &gamma_beta);
t.batch_inversion(&worker)?;
t
};
z_num.mul_assign(&worker, &z_den);
drop(z_den);
let z = z_num.calculate_shifted_grand_product(&worker)?;
drop(z_num);
assert!(z.size().is_power_of_two());
assert_eq!(z.as_ref()[0], E::Fr::one());
assert_eq!(*z.as_ref().last().unwrap(), expected);
// let t_poly_monomial = t_poly_unpadded_values.as_ref().clone_padded_to_domain()?.ifft_using_bitreversed_ntt(
// &worker,
// &omegas_inv_bitreversed,
// &E::Fr::one()
// )?;
// let s_poly_monomial = s_poly_unpadded_values.clone_padded_to_domain()?.ifft_using_bitreversed_ntt(
// &worker,
// &omegas_inv_bitreversed,
// &E::Fr::one()
// )?;
// data.t_poly_monomial = Some(t_poly_monomial);
// data.s_poly_monomial = Some(s_poly_monomial);
let z = z.ifft_using_bitreversed_ntt(
&worker,
&omegas_inv_bitreversed,
&E::Fr::one()
)?;
let lookup_z_poly_commitment = commit_using_monomials(
&z,
mon_crs,
&worker
)?;
commit_point_as_xy::<E, T>(
&mut transcript,
&lookup_z_poly_commitment
);
proof.lookup_grand_product_commitment = Some(lookup_z_poly_commitment);
Some(z)
} else {
None
};
// now draw alpha and add all the contributions to the quotient polynomial
let alpha = transcript.get_challenge();
// let alpha = E::Fr::from_str("1234567890").unwrap();
let mut total_powers_of_alpha_for_gates = 0;
for g in self.sorted_gates.iter() {
total_powers_of_alpha_for_gates += g.num_quotient_terms();
}
// println!("Have {} terms from {} gates", total_powers_of_alpha_for_gates, self.sorted_gates.len());
let mut current_alpha = E::Fr::one();
let mut powers_of_alpha_for_gates = Vec::with_capacity(total_powers_of_alpha_for_gates);
powers_of_alpha_for_gates.push(current_alpha);
for _ in 1..total_powers_of_alpha_for_gates {
current_alpha.mul_assign(&alpha);
powers_of_alpha_for_gates.push(current_alpha);
}
assert_eq!(powers_of_alpha_for_gates.len(), total_powers_of_alpha_for_gates);
let mut all_gates = self.sorted_gates.clone();
let num_different_gates = self.sorted_gates.len();
let mut challenges_slice = &powers_of_alpha_for_gates[..];
let mut lde_factor = num_state_polys;
for g in self.sorted_gates.iter() {
let degree = g.degree();
if degree > lde_factor {
lde_factor = degree;
}
}
assert!(lde_factor <= 4);
let coset_factor = E::Fr::multiplicative_generator();
let mut t_poly = {
let gate = all_gates.drain(0..1).into_iter().next().unwrap();
assert!(<Self as ConstraintSystem<E>>::MainGate::default().into_internal() == gate);
let gate = <Self as ConstraintSystem<E>>::MainGate::default();
let num_challenges = gate.num_quotient_terms();
let (for_gate, rest) = challenges_slice.split_at(num_challenges);
challenges_slice = rest;
let input_values = self.input_assingments.clone();
let mut t = gate.contribute_into_quotient_for_public_inputs(
required_domain_size,
&input_values,
&mut ldes_storage,
&monomials_storage,
for_gate,
&omegas_bitreversed,
&omegas_inv_bitreversed,
&worker
)?;
if num_different_gates > 1 {
// we have to multiply by the masking poly (selector)
let key = PolyIdentifier::GateSelector(gate.name());
let monomial_selector = monomials_storage.gate_selectors.get(&key).unwrap().as_ref();
let selector_lde = monomial_selector.clone_padded_to_domain()?.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
t.mul_assign(&worker, &selector_lde);
drop(selector_lde);
}
t
};
let non_main_gates = all_gates;
for gate in non_main_gates.into_iter() {
let num_challenges = gate.num_quotient_terms();
let (for_gate, rest) = challenges_slice.split_at(num_challenges);
challenges_slice = rest;
let mut contribution = gate.contribute_into_quotient(
required_domain_size,
&mut ldes_storage,
&monomials_storage,
for_gate,
&omegas_bitreversed,
&omegas_inv_bitreversed,
&worker
)?;
{
// we have to multiply by the masking poly (selector)
let key = PolyIdentifier::GateSelector(gate.name());
let monomial_selector = monomials_storage.gate_selectors.get(&key).unwrap().as_ref();
let selector_lde = monomial_selector.clone_padded_to_domain()?.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
contribution.mul_assign(&worker, &selector_lde);
drop(selector_lde);
}
t_poly.add_assign(&worker, &contribution);
}
assert_eq!(challenges_slice.len(), 0);
// println!("Power of alpha for a start of normal permutation argument = {}", total_powers_of_alpha_for_gates);
// perform copy-permutation argument
// we precompute L_{0} here cause it's necessary for both copy-permutation and lookup permutation
// z(omega^0) - 1 == 0
let l_0 = calculate_lagrange_poly::<E::Fr>(&worker, required_domain_size.next_power_of_two(), 0)?;
let l_0_coset_lde_bitreversed = l_0.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
let mut copy_grand_product_alphas = None;
let x_poly_lde_bitreversed = {
// now compute the permutation argument
// bump alpha
current_alpha.mul_assign(&alpha);
let alpha_0 = current_alpha;
let z_coset_lde_bitreversed = copy_permutation_z_in_monomial_form.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
assert!(z_coset_lde_bitreversed.size() == required_domain_size*lde_factor);
let z_shifted_coset_lde_bitreversed = z_coset_lde_bitreversed.clone_shifted_assuming_bitreversed(
lde_factor,
&worker,
)?;
assert!(z_shifted_coset_lde_bitreversed.size() == required_domain_size*lde_factor);
// For both Z_1 and Z_2 we first check for grand products
// z*(X)(A + beta*X + gamma)(B + beta*k_1*X + gamma)(C + beta*K_2*X + gamma) -
// - (A + beta*perm_a(X) + gamma)(B + beta*perm_b(X) + gamma)(C + beta*perm_c(X) + gamma)*Z(X*Omega)== 0
// we use evaluations of the polynomial X and K_i * X on a large domain's coset
let mut contrib_z = z_coset_lde_bitreversed.clone();
// precompute x poly
let mut x_poly = Polynomial::from_values(vec![
coset_factor;
required_domain_size*lde_factor
])?;
x_poly.distribute_powers(&worker, z_shifted_coset_lde_bitreversed.omega);
x_poly.bitreverse_enumeration(&worker);
assert_eq!(x_poly.size(), required_domain_size * lde_factor);
// A + beta*X + gamma
let mut tmp = ldes_storage.state_map.get(&PolyIdentifier::VariablesPolynomial(0)).unwrap().as_ref().clone();
tmp.add_constant(&worker, &gamma_for_copy_permutation);
tmp.add_assign_scaled(&worker, &x_poly, &beta_for_copy_permutation);
contrib_z.mul_assign(&worker, &tmp);
assert_eq!(non_residues.len() + 1, num_state_polys);
for (poly_idx, non_res) in (1..num_state_polys).zip(non_residues.iter()) {
let mut factor = beta_for_copy_permutation;
factor.mul_assign(&non_res);
let key = PolyIdentifier::VariablesPolynomial(poly_idx);
tmp.reuse_allocation(&ldes_storage.state_map.get(&key).unwrap().as_ref());
tmp.add_constant(&worker, &gamma_for_copy_permutation);
tmp.add_assign_scaled(&worker, &x_poly, &factor);
contrib_z.mul_assign(&worker, &tmp);
}
t_poly.add_assign_scaled(&worker, &contrib_z, ¤t_alpha);
drop(contrib_z);
let mut contrib_z = z_shifted_coset_lde_bitreversed;
// A + beta*perm_a + gamma
for idx in 0..num_state_polys {
let key = PolyIdentifier::VariablesPolynomial(idx);
tmp.reuse_allocation(&ldes_storage.state_map.get(&key).unwrap().as_ref());
tmp.add_constant(&worker, &gamma_for_copy_permutation);
let key = PolyIdentifier::PermutationPolynomial(idx);
let perm = monomials_storage.get_poly(key).clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
tmp.add_assign_scaled(&worker, &perm, &beta_for_copy_permutation);
contrib_z.mul_assign(&worker, &tmp);
drop(perm);
}
t_poly.sub_assign_scaled(&worker, &contrib_z, ¤t_alpha);
drop(contrib_z);
drop(tmp);
// Z(x) * L_{0}(x) - 1 == 0
current_alpha.mul_assign(&alpha);
let alpha_1 = current_alpha;
{
let mut z_minus_one_by_l_0 = z_coset_lde_bitreversed;
z_minus_one_by_l_0.sub_constant(&worker, &E::Fr::one());
z_minus_one_by_l_0.mul_assign(&worker, &l_0_coset_lde_bitreversed);
t_poly.add_assign_scaled(&worker, &z_minus_one_by_l_0, ¤t_alpha);
}
copy_grand_product_alphas = Some([alpha_0, alpha_1]);
x_poly
};
// add contribution from grand product for loopup polys if there is one
let mut lookup_grand_product_alphas = None;
if let Some(z_poly_in_monomial_form) = lookup_z_poly_in_monomial_form.as_ref() {
let beta_for_lookup_permutation = beta_for_lookup.unwrap();
let gamma_for_lookup_permutation = gamma_for_lookup.unwrap();
let mut beta_plus_one = beta_for_lookup_permutation;
beta_plus_one.add_assign(&E::Fr::one());
let mut gamma_beta = gamma_for_lookup_permutation;
gamma_beta.mul_assign(&beta_plus_one);
let expected = gamma_beta.pow([(required_domain_size-1) as u64]);
current_alpha.mul_assign(&alpha);
let alpha_0 = current_alpha;
// same grand product argument for lookup permutation except divisor is now with one point cut
let z_lde = z_poly_in_monomial_form.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
let z_lde_shifted = z_lde.clone_shifted_assuming_bitreversed(
lde_factor,
&worker
)?;
// We make an small ad-hoc modification here and instead of dividing some contributions by
// (X^n - 1)/(X - omega^{n-1}) we move (X - omega^{n-1}) to the numerator and join the divisions
// Numerator degree is at max 4n, so it's < 4n after division
// ( Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) -
// - Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) )*(X - omega^{n-1})
let data = lookup_data.as_ref().unwrap();
let s_lde = data.s_poly_monomial.as_ref().unwrap().clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
let s_lde_shifted = s_lde.clone_shifted_assuming_bitreversed(
lde_factor,
&worker
)?;
// Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
let mut contribution = s_lde;
contribution.add_assign_scaled(&worker, &s_lde_shifted, &beta_for_lookup_permutation);
contribution.add_constant(&worker, &gamma_beta);
contribution.mul_assign(&worker, &z_lde_shifted);
drop(s_lde_shifted);
drop(z_lde_shifted);
let t_lde = data.t_poly_monomial.as_ref().unwrap().as_ref().clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
let t_lde_shifted = t_lde.clone_shifted_assuming_bitreversed(
lde_factor,
&worker
)?;
let f_lde = {
// add up ldes of a,b,c and table_type poly and multiply by selector
let a_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
&ldes_storage
);
let mut tmp = a_ref.clone();
drop(a_ref);
let eta = lookup_data.as_ref().unwrap().eta;
let mut current = eta;
let b_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)),
&ldes_storage
);
tmp.add_assign_scaled(&worker, b_ref, ¤t);
drop(b_ref);
current.mul_assign(&eta);
let c_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)),
&ldes_storage
);
tmp.add_assign_scaled(&worker, c_ref, ¤t);
drop(c_ref);
current.mul_assign(&eta);
let table_type_lde = lookup_data.as_ref().unwrap().table_type_poly_monomial.as_ref().unwrap().as_ref().clone()
.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
tmp.add_assign_scaled(&worker, &table_type_lde, ¤t);
drop(table_type_lde);
let lookup_selector_lde = lookup_data.as_ref().unwrap().selector_poly_monomial.as_ref().unwrap().as_ref().clone()
.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
tmp.mul_assign(&worker, &lookup_selector_lde);
drop(lookup_selector_lde);
tmp
};
// - Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega))
let mut tmp = f_lde;
tmp.add_constant(&worker, &gamma_for_lookup_permutation);
tmp.mul_assign(&worker, &z_lde);
tmp.scale(&worker, beta_plus_one);
let mut t = t_lde;
t.add_assign_scaled(&worker, &t_lde_shifted, &beta_for_lookup_permutation);
t.add_constant(&worker, &gamma_beta);
tmp.mul_assign(&worker, &t);
drop(t);
drop(t_lde_shifted);
contribution.sub_assign(&worker, &tmp);
contribution.scale(&worker, current_alpha);
// multiply by (X - omega^{n-1})
let last_omega = domain.generator.pow(&[(required_domain_size - 1) as u64]);
let mut x_minus_last_omega = x_poly_lde_bitreversed;
x_minus_last_omega.sub_constant(&worker, &last_omega);
contribution.mul_assign(&worker, &x_minus_last_omega);
drop(x_minus_last_omega);
// we do not need to do addition multiplications for terms below cause multiplication by lagrange poly
// does everything for us
// check that (Z(x) - 1) * L_{0} == 0
current_alpha.mul_assign(&alpha);
let alpha_1 = current_alpha;
tmp.reuse_allocation(&z_lde);
tmp.sub_constant(&worker, &E::Fr::one());
tmp.mul_assign(&worker, &l_0_coset_lde_bitreversed);
drop(l_0_coset_lde_bitreversed);
contribution.add_assign_scaled(&worker, &tmp, ¤t_alpha);
// check that (Z(x) - expected) * L_{n-1} == 0
current_alpha.mul_assign(&alpha);
let alpha_2 = current_alpha;
let l_last = calculate_lagrange_poly::<E::Fr>(&worker, required_domain_size.next_power_of_two(), required_domain_size - 1)?;
let l_last_coset_lde_bitreversed = l_last.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
&omegas_bitreversed,
&coset_factor
)?;
tmp.reuse_allocation(&z_lde);
tmp.sub_constant(&worker, &expected);
tmp.mul_assign(&worker, &l_last_coset_lde_bitreversed);
drop(l_last_coset_lde_bitreversed);
contribution.add_assign_scaled(&worker, &tmp, ¤t_alpha);
drop(tmp);
drop(z_lde);
t_poly.add_assign(&worker, &contribution);
drop(contribution);
lookup_grand_product_alphas = Some([alpha_0, alpha_1, alpha_2]);
} else {
drop(x_poly_lde_bitreversed);
drop(l_0_coset_lde_bitreversed);
}
// perform the division
let inverse_divisor_on_coset_lde_natural_ordering = {
let mut vanishing_poly_inverse_bitreversed =
evaluate_vanishing_polynomial_of_degree_on_domain_size::<E::Fr>(
required_domain_size as u64,
&E::Fr::multiplicative_generator(),
(required_domain_size * lde_factor) as u64,
&worker,
)?;
vanishing_poly_inverse_bitreversed.batch_inversion(&worker)?;
// vanishing_poly_inverse_bitreversed.bitreverse_enumeration(&worker)?;
vanishing_poly_inverse_bitreversed
};
// don't forget to bitreverse
t_poly.bitreverse_enumeration(&worker);
t_poly.mul_assign(&worker, &inverse_divisor_on_coset_lde_natural_ordering);
drop(inverse_divisor_on_coset_lde_natural_ordering);
let t_poly = t_poly.icoset_fft_for_generator(&worker, &coset_factor);
// println!("Lde factor = {}", lde_factor);
// println!("Quotient poly = {:?}", t_poly.as_ref());
{
// degree is 4n-4
let l = t_poly.as_ref().len();
// assert_eq!(&t_poly.as_ref()[(l-4)..], &[E::Fr::zero(); 4][..], "quotient degree is too large");
if &t_poly.as_ref()[(l-4)..] != &[E::Fr::zero(); 4][..] {
println!("End coeffs are {:?}", &t_poly.as_ref()[(l-4)..]);
return Err(SynthesisError::Unsatisfiable);
}
}
// println!("Quotient poly degree = {}", get_degree::<E::Fr>(&t_poly));
let mut t_poly_parts = t_poly.break_into_multiples(required_domain_size)?;
for part in t_poly_parts.iter() {
let commitment = commit_using_monomials(
part,
mon_crs,
&worker
)?;
commit_point_as_xy::<E, T>(
&mut transcript,
&commitment
);
proof.quotient_poly_parts_commitments.push(commitment);
}
// draw opening point
let z = transcript.get_challenge();
// let z = E::Fr::from_str("333444555").unwrap();
let omega = domain.generator;
// evaluate quotient at z
let quotient_at_z = {
let mut result = E::Fr::zero();
let mut current = E::Fr::one();
let z_in_domain_size = z.pow(&[required_domain_size as u64]);
for p in t_poly_parts.iter() {
let mut subvalue_at_z = p.evaluate_at(&worker, z);
subvalue_at_z.mul_assign(¤t);
result.add_assign(&subvalue_at_z);
current.mul_assign(&z_in_domain_size);
}
result
};
// commit quotient value
transcript.commit_field_element("ient_at_z);
proof.quotient_poly_opening_at_z = quotient_at_z;
// Now perform the linearization.
// First collect and evalute all the polynomials that are necessary for linearization
// and construction of the verification equation
const MAX_DILATION: usize = 1;
let queries_with_linearization = sort_queries_for_linearization(&self.sorted_gates, MAX_DILATION);
let mut query_values_map = std::collections::HashMap::new();
// go over all required queries
for (dilation_value, ids) in queries_with_linearization.state_polys.iter().enumerate() {
for id in ids.into_iter() {
let (poly_ref, poly_idx) = if let PolyIdentifier::VariablesPolynomial(idx) = id {
(monomials_storage.state_map.get(&id).unwrap().as_ref(), idx)
}
else {
unreachable!();
};
let mut opening_point = z;
for _ in 0..dilation_value {
opening_point.mul_assign(&omega);
}
let value = poly_ref.evaluate_at(&worker, opening_point);
transcript.commit_field_element(&value);
if dilation_value == 0 {
proof.state_polys_openings_at_z.push(value);
} else {
proof.state_polys_openings_at_dilations.push((dilation_value, *poly_idx, value));
}
let key = PolynomialInConstraint::from_id_and_dilation(*id, dilation_value);
query_values_map.insert(key, value);
}
}
for (dilation_value, ids) in queries_with_linearization.witness_polys.iter().enumerate() {
for id in ids.into_iter() {
let (poly_ref, poly_idx) = if let PolyIdentifier::WitnessPolynomial(idx) = id {
(monomials_storage.witness_map.get(&id).unwrap().as_ref(), idx)
}
else {
unreachable!();
};
let mut opening_point = z;
for _ in 0..dilation_value {
opening_point.mul_assign(&omega);
}
let value = poly_ref.evaluate_at(&worker, opening_point);
transcript.commit_field_element(&value);
if dilation_value == 0 {
proof.witness_polys_openings_at_z.push(value);
} else {
proof.witness_polys_openings_at_dilations.push((dilation_value, *poly_idx, value));
}
let key = PolynomialInConstraint::from_id_and_dilation(*id, dilation_value);
query_values_map.insert(key, value);
}
}
for (gate_idx, queries) in queries_with_linearization.gate_setup_polys.iter().enumerate() {
for (dilation_value, ids) in queries.iter().enumerate() {
for id in ids.into_iter() {
let (poly_ref, poly_idx) = if let PolyIdentifier::GateSetupPolynomial(_, idx) = id {
(monomials_storage.setup_map.get(&id).unwrap().as_ref(), idx)
}
else {
unreachable!();
};
let mut opening_point = z;
for _ in 0..dilation_value {
opening_point.mul_assign(&omega);
}
let value = poly_ref.evaluate_at(&worker, opening_point);
transcript.commit_field_element(&value);
if dilation_value == 0 {
proof.gate_setup_openings_at_z.push((gate_idx, *poly_idx, value));
} else {
unimplemented!("gate setup polynomials can not be time dilated");
}
let key = PolynomialInConstraint::from_id_and_dilation(*id, dilation_value);
query_values_map.insert(key, value);
}
}
}
// also open selectors
let mut selector_values = vec![];
for s in queries_with_linearization.gate_selectors.iter() {
let gate_index = self.sorted_gates.iter().position(|r| r == s).unwrap();
let key = PolyIdentifier::GateSelector(s.name());
let poly_ref = monomials_storage.gate_selectors.get(&key).unwrap().as_ref();
let value = poly_ref.evaluate_at(&worker, z);
transcript.commit_field_element(&value);
proof.gate_selectors_openings_at_z.push((gate_index, value));
selector_values.push(value);
}
// copy-permutation polynomials queries
let mut copy_permutation_queries = vec![];
for idx in 0..(num_state_polys-1) {
let key = PolyIdentifier::PermutationPolynomial(idx);
let value = monomials_storage.get_poly(key).evaluate_at(&worker, z);
transcript.commit_field_element(&value);
proof.copy_permutation_polys_openings_at_z.push(value);
copy_permutation_queries.push(value);
}
// copy-permutation grand product query
let mut z_omega = z;
z_omega.mul_assign(&domain.generator);
let copy_permutation_z_at_z_omega = copy_permutation_z_in_monomial_form.evaluate_at(&worker, z_omega);
transcript.commit_field_element(©_permutation_z_at_z_omega);
proof.copy_permutation_grand_product_opening_at_z_omega = copy_permutation_z_at_z_omega;
// we've computed everything, so perform linearization
let mut challenges_slice = &powers_of_alpha_for_gates[..];
let mut all_gates = self.sorted_gates.clone();
let mut r_poly = {
let gate = all_gates.drain(0..1).into_iter().next().unwrap();
assert!(gate.benefits_from_linearization(), "main gate is expected to benefit from linearization!");
assert!(<Self as ConstraintSystem<E>>::MainGate::default().into_internal() == gate);
let gate = <Self as ConstraintSystem<E>>::MainGate::default();
let num_challenges = gate.num_quotient_terms();
let (for_gate, rest) = challenges_slice.split_at(num_challenges);
challenges_slice = rest;
let input_values = self.input_assingments.clone();
let mut r = gate.contribute_into_linearization_for_public_inputs(
required_domain_size,
&input_values,
z,
&query_values_map,
&monomials_storage,
for_gate,
&worker
)?;
let mut selectors_it = selector_values.clone().into_iter();
if num_different_gates > 1 {
// first multiply r by the selector value at z
r.scale(&worker, selectors_it.next().unwrap());
}
// now proceed per gate
for gate in all_gates.into_iter() {
let num_challenges = gate.num_quotient_terms();
let (for_gate, rest) = challenges_slice.split_at(num_challenges);
challenges_slice = rest;
if gate.benefits_from_linearization() {
// gate benefits from linearization, so make temporary value
let tmp = gate.contribute_into_linearization(
required_domain_size,
z,
&query_values_map,
&monomials_storage,
for_gate,
&worker
)?;
let selector_value = selectors_it.next().unwrap();
r.add_assign_scaled(&worker, &tmp, &selector_value);
} else {
// we linearize over the selector, so take a selector and scale it
let gate_value_at_z = gate.contribute_into_verification_equation(
required_domain_size,
z,
&query_values_map,
for_gate
)?;
let key = PolyIdentifier::GateSelector(gate.name());
let gate_selector_ref = monomials_storage.gate_selectors.get(&key).expect("must get monomial form of gate selector").as_ref();
r.add_assign_scaled(&worker, gate_selector_ref, &gate_value_at_z);
}
}
assert!(selectors_it.next().is_none());
assert_eq!(challenges_slice.len(), 0);
r
};
// add contributions from copy-permutation and lookup-permutation
// copy-permutation linearization comtribution
{
// + (a(z) + beta*z + gamma)*()*()*()*Z(x)
let [alpha_0, alpha_1] = copy_grand_product_alphas.expect("there must be powers of alpha for copy permutation");
let some_one = Some(E::Fr::one());
let mut non_residues_iterator = some_one.iter().chain(&non_residues);
let mut factor = alpha_0;
for idx in 0..num_state_polys {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let wire_value = query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
let mut t = z;
let non_res = non_residues_iterator.next().unwrap();
t.mul_assign(&non_res);
t.mul_assign(&beta_for_copy_permutation);
t.add_assign(&wire_value);
t.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&t);
}
assert!(non_residues_iterator.next().is_none());
r_poly.add_assign_scaled(&worker, ©_permutation_z_in_monomial_form, &factor);
// - (a(z) + beta*perm_a + gamma)*()*()*z(z*omega) * beta * perm_d(X)
let mut factor = alpha_0;
factor.mul_assign(&beta_for_copy_permutation);
factor.mul_assign(©_permutation_z_at_z_omega);
for idx in 0..(num_state_polys-1) {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let wire_value = query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
let permutation_at_z = copy_permutation_queries[idx];
let mut t = permutation_at_z;
t.mul_assign(&beta_for_copy_permutation);
t.add_assign(&wire_value);
t.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&t);
}
let key = PolyIdentifier::PermutationPolynomial(num_state_polys - 1);
let last_permutation_poly_ref = monomials_storage.get_poly(key);
r_poly.sub_assign_scaled(&worker, last_permutation_poly_ref, &factor);
// + L_0(z) * Z(x)
let mut factor = evaluate_l0_at_point(required_domain_size as u64, z)?;
factor.mul_assign(&alpha_1);
r_poly.add_assign_scaled(&worker, ©_permutation_z_in_monomial_form, &factor);
}
// lookup grand product linearization
// due to separate divisor it's not obvious if this is beneficial without some tricks
// like multiplication by (1 - L_{n-1}) or by (x - omega^{n-1})
// Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) -
// Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) == 0
// check that (Z(x) - 1) * L_{0} == 0
// check that (Z(x) - expected) * L_{n-1} == 0, or (Z(x*omega) - expected)* L_{n-2} == 0
// f(x) does not need to be opened as it's made of table selector and witnesses
// if we pursue the strategy from the linearization of a copy-permutation argument
// then we leave something like s(x) from the Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) term,
// and Z(x) from Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) term,
// with terms with lagrange polys as multipliers left intact
let lookup_queries = if let Some(lookup_z_poly) = lookup_z_poly_in_monomial_form.as_ref() {
let [alpha_0, alpha_1, alpha_2] = lookup_grand_product_alphas.expect("there must be powers of alpha for lookup permutation");
let s_at_z_omega = lookup_data.as_ref().unwrap().s_poly_monomial.as_ref().unwrap().evaluate_at(&worker, z_omega);
let grand_product_at_z_omega = lookup_z_poly.evaluate_at(&worker, z_omega);
let t_at_z = lookup_data.as_ref().unwrap().t_poly_monomial.as_ref().unwrap().as_ref().evaluate_at(&worker, z);
let t_at_z_omega = lookup_data.as_ref().unwrap().t_poly_monomial.as_ref().unwrap().as_ref().evaluate_at(&worker, z_omega);
let selector_at_z = lookup_data.as_ref().unwrap().selector_poly_monomial.as_ref().unwrap().as_ref().evaluate_at(&worker, z);
let table_type_at_z = lookup_data.as_ref().unwrap().table_type_poly_monomial.as_ref().unwrap().as_ref().evaluate_at(&worker, z);
let l_0_at_z = evaluate_lagrange_poly_at_point(0, &domain, z)?;
let l_n_minus_one_at_z = evaluate_lagrange_poly_at_point(required_domain_size - 1, &domain, z)?;
let beta_for_lookup_permutation = beta_for_lookup.unwrap();
let gamma_for_lookup_permutation = gamma_for_lookup.unwrap();
let mut beta_plus_one = beta_for_lookup_permutation;
beta_plus_one.add_assign(&E::Fr::one());
let mut gamma_beta = gamma_for_lookup_permutation;
gamma_beta.mul_assign(&beta_plus_one);
// (Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) -
// Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)))*(X - omega^{n-1})
let last_omega = domain.generator.pow(&[(required_domain_size - 1) as u64]);
let mut z_minus_last_omega = z;
z_minus_last_omega.sub_assign(&last_omega);
// s(x) from the Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
let mut factor = grand_product_at_z_omega; // we do not need to account for additive terms
factor.mul_assign(&alpha_0);
factor.mul_assign(&z_minus_last_omega);
r_poly.add_assign_scaled(&worker, lookup_data.as_ref().unwrap().s_poly_monomial.as_ref().unwrap(), &factor);
// Z(x) from - alpha_0 * Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega))
// + alpha_1 * Z(x) * L_{0}(z) + alpha_2 * Z(x) * L_{n-1}(z)
// accumulate coefficient
let mut factor = t_at_z_omega;
factor.mul_assign(&beta_for_lookup_permutation);
factor.add_assign(&t_at_z);
factor.add_assign(&gamma_beta);
// (\gamma + f(x))
let mut f_reconstructed = E::Fr::zero();
let mut current = E::Fr::one();
let eta = lookup_data.as_ref().unwrap().eta;
// a,b,c
for idx in 0..(num_state_polys-1) {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let mut value = *query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
value.mul_assign(¤t);
f_reconstructed.add_assign(&value);
current.mul_assign(&eta);
}
// and table type
let mut t = table_type_at_z;
t.mul_assign(¤t);
f_reconstructed.add_assign(&t);
f_reconstructed.mul_assign(&selector_at_z);
f_reconstructed.add_assign(&gamma_for_lookup_permutation);
// end of (\gamma + f(x)) part
factor.mul_assign(&f_reconstructed);
factor.mul_assign(&beta_plus_one);
factor.negate(); // don't forget minus sign
factor.mul_assign(&alpha_0);
// Multiply by (z - omega^{n-1})
factor.mul_assign(&z_minus_last_omega);
// L_{0}(z) in front of Z(x)
let mut tmp = l_0_at_z;
tmp.mul_assign(&alpha_1);
factor.add_assign(&tmp);
// L_{n-1}(z) in front of Z(x)
let mut tmp = l_n_minus_one_at_z;
tmp.mul_assign(&alpha_2);
factor.add_assign(&tmp);
r_poly.add_assign_scaled(&worker, lookup_z_poly, &factor);
let query = LookupQuery::<E> {
s_at_z_omega,
grand_product_at_z_omega,
t_at_z,
t_at_z_omega,
selector_at_z,
table_type_at_z,
};
Some(query)
} else {
None
};
if let Some(queries) = lookup_queries.as_ref() {
// first commit values at z, and then at z*omega
transcript.commit_field_element(&queries.t_at_z);
transcript.commit_field_element(&queries.selector_at_z);
transcript.commit_field_element(&queries.table_type_at_z);
// now at z*omega
transcript.commit_field_element(&queries.s_at_z_omega);
transcript.commit_field_element(&queries.grand_product_at_z_omega);
transcript.commit_field_element(&queries.t_at_z_omega);
proof.lookup_s_poly_opening_at_z_omega = Some(queries.s_at_z_omega);
proof.lookup_grand_product_opening_at_z_omega = Some(queries.grand_product_at_z_omega);
proof.lookup_t_poly_opening_at_z = Some(queries.t_at_z);
proof.lookup_t_poly_opening_at_z_omega = Some(queries.t_at_z_omega);
proof.lookup_selector_poly_opening_at_z = Some(queries.selector_at_z);
proof.lookup_table_type_poly_opening_at_z = Some(queries.table_type_at_z);
}
let linearization_at_z = r_poly.evaluate_at(&worker, z);
transcript.commit_field_element(&linearization_at_z);
proof.linearization_poly_opening_at_z = linearization_at_z;
// linearization is done, now perform sanity check
// this is effectively a verification procedure
{
let vanishing_at_z = evaluate_vanishing_for_size(&z, required_domain_size as u64);
// first let's aggregate gates
let mut t_num_on_full_domain = E::Fr::zero();
let challenges_slice = &powers_of_alpha_for_gates[..];
let mut all_gates = self.sorted_gates.clone();
// we've suffered and linearization polynomial captures all the gates except the public input!
{
let mut tmp = linearization_at_z;
// add input values
let gate = all_gates.drain(0..1).into_iter().next().unwrap();
assert!(gate.benefits_from_linearization(), "main gate is expected to benefit from linearization!");
assert!(<Self as ConstraintSystem<E>>::MainGate::default().into_internal() == gate);
let gate = <Self as ConstraintSystem<E>>::MainGate::default();
let num_challenges = gate.num_quotient_terms();
let (for_gate, _) = challenges_slice.split_at(num_challenges);
let input_values = self.input_assingments.clone();
let mut inputs_term = gate.add_inputs_into_quotient(
required_domain_size,
&input_values,
z,
for_gate,
)?;
if num_different_gates > 1 {
let selector_value = selector_values[0];
inputs_term.mul_assign(&selector_value);
}
tmp.add_assign(&inputs_term);
t_num_on_full_domain.add_assign(&tmp);
}
// now aggregate leftovers from grand product for copy permutation
{
// - alpha_0 * (a + perm(z) * beta + gamma)*()*(d + gamma) * z(z*omega)
let [alpha_0, alpha_1] = copy_grand_product_alphas.expect("there must be powers of alpha for copy permutation");
let mut factor = alpha_0;
factor.mul_assign(©_permutation_z_at_z_omega);
for idx in 0..(num_state_polys-1) {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let wire_value = query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
let permutation_at_z = copy_permutation_queries[idx];
let mut t = permutation_at_z;
t.mul_assign(&beta_for_copy_permutation);
t.add_assign(&wire_value);
t.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&t);
}
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(num_state_polys-1));
let mut tmp = *query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
tmp.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&tmp);
t_num_on_full_domain.sub_assign(&factor);
// - L_0(z) * alpha_1
let mut l_0_at_z = evaluate_l0_at_point(required_domain_size as u64, z)?;
l_0_at_z.mul_assign(&alpha_1);
t_num_on_full_domain.sub_assign(&l_0_at_z);
}
// and if exists - grand product for lookup permutation
{
if lookup_queries.is_some() {
let [alpha_0, alpha_1, alpha_2] = lookup_grand_product_alphas.expect("there must be powers of alpha for lookup permutation");
let lookup_queries = lookup_queries.clone().expect("lookup queries must be made");
let beta_for_lookup_permutation = beta_for_lookup.unwrap();
let gamma_for_lookup_permutation = gamma_for_lookup.unwrap();
let mut beta_plus_one = beta_for_lookup_permutation;
beta_plus_one.add_assign(&E::Fr::one());
let mut gamma_beta = gamma_for_lookup_permutation;
gamma_beta.mul_assign(&beta_plus_one);
let expected = gamma_beta.pow([(required_domain_size-1) as u64]);
// in a linearization we've taken terms:
// - s(x) from the alpha_0 * Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
// - and Z(x) from - alpha_0 * Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) (term in full) +
// + alpha_1 * (Z(x) - 1) * L_{0}(z) + alpha_2 * (Z(x) - expected) * L_{n-1}(z)
// first make alpha_0 * Z(x*omega)*(\gamma*(1 + \beta) + \beta * s(x*omega)))
let mut tmp = lookup_queries.s_at_z_omega;
tmp.mul_assign(&beta_for_lookup_permutation);
tmp.add_assign(&gamma_beta);
tmp.mul_assign(&lookup_queries.grand_product_at_z_omega);
tmp.mul_assign(&alpha_0);
// (z - omega^{n-1}) for this part
let last_omega = domain.generator.pow(&[(required_domain_size - 1) as u64]);
let mut z_minus_last_omega = z;
z_minus_last_omega.sub_assign(&last_omega);
tmp.mul_assign(&z_minus_last_omega);
t_num_on_full_domain.add_assign(&tmp);
// // - alpha_1 * L_{0}(z)
let mut l_0_at_z = evaluate_l0_at_point(required_domain_size as u64, z)?;
l_0_at_z.mul_assign(&alpha_1);
t_num_on_full_domain.sub_assign(&l_0_at_z);
// // - alpha_2 * expected L_{n-1}(z)
let mut l_n_minus_one_at_z = evaluate_lagrange_poly_at_point(required_domain_size - 1, &domain, z)?;
l_n_minus_one_at_z.mul_assign(&expected);
l_n_minus_one_at_z.mul_assign(&alpha_2);
t_num_on_full_domain.sub_assign(&l_n_minus_one_at_z);
}
}
let mut lhs = quotient_at_z;
lhs.mul_assign(&vanishing_at_z);
let rhs = t_num_on_full_domain;
if lhs != rhs {
dbg!("Circuit is not satisfied");
return Err(SynthesisError::Unsatisfiable);
}
}
let v = transcript.get_challenge();
// now construct two polynomials that are opened at z and z*omega
let mut multiopening_challenge = E::Fr::one();
let mut poly_to_divide_at_z = t_poly_parts.drain(0..1).collect::<Vec<_>>().pop().unwrap();
let z_in_domain_size = z.pow(&[required_domain_size as u64]);
let mut power_of_z = z_in_domain_size;
for t_part in t_poly_parts.into_iter() {
poly_to_divide_at_z.add_assign_scaled(&worker, &t_part, &power_of_z);
power_of_z.mul_assign(&z_in_domain_size);
}
// linearization polynomial
multiopening_challenge.mul_assign(&v);
poly_to_divide_at_z.add_assign_scaled(&worker, &r_poly, &multiopening_challenge);
debug_assert_eq!(multiopening_challenge, v.pow(&[1 as u64]));
// now proceed over all queries
const THIS_STEP_DILATION: usize = 0;
for id in queries_with_linearization.state_polys[THIS_STEP_DILATION].iter() {
multiopening_challenge.mul_assign(&v);
let poly_ref = monomials_storage.get_poly(*id);
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
for id in queries_with_linearization.witness_polys[THIS_STEP_DILATION].iter() {
multiopening_challenge.mul_assign(&v);
let poly_ref = monomials_storage.get_poly(*id);
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
for queries in queries_with_linearization.gate_setup_polys.iter() {
for id in queries[THIS_STEP_DILATION].iter() {
multiopening_challenge.mul_assign(&v);
let poly_ref = monomials_storage.get_poly(*id);
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
}
// also open selectors at z
for s in queries_with_linearization.gate_selectors.iter() {
multiopening_challenge.mul_assign(&v);
let key = PolyIdentifier::GateSelector(s.name());
let poly_ref = monomials_storage.get_poly(key);
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
for idx in 0..(num_state_polys-1) {
multiopening_challenge.mul_assign(&v);
let key = PolyIdentifier::PermutationPolynomial(idx);
let poly_ref = monomials_storage.get_poly(key);
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
// if lookup is present - add it
if let Some(data) = lookup_data.as_ref() {
// we need to add t(x), selector(x) and table type(x)
multiopening_challenge.mul_assign(&v);
let poly_ref = data.t_poly_monomial.as_ref().unwrap().as_ref();
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
multiopening_challenge.mul_assign(&v);
let poly_ref = data.selector_poly_monomial.as_ref().unwrap().as_ref();
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
multiopening_challenge.mul_assign(&v);
let poly_ref = data.table_type_poly_monomial.as_ref().unwrap().as_ref();
poly_to_divide_at_z.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
// now proceed at z*omega
multiopening_challenge.mul_assign(&v);
let mut poly_to_divide_at_z_omega = copy_permutation_z_in_monomial_form;
poly_to_divide_at_z_omega.scale(&worker, multiopening_challenge);
const NEXT_STEP_DILATION: usize = 1;
for id in queries_with_linearization.state_polys[NEXT_STEP_DILATION].iter() {
multiopening_challenge.mul_assign(&v);
let poly_ref = monomials_storage.get_poly(*id);
poly_to_divide_at_z_omega.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
for id in queries_with_linearization.witness_polys[NEXT_STEP_DILATION].iter() {
multiopening_challenge.mul_assign(&v);
let poly_ref = monomials_storage.get_poly(*id);
poly_to_divide_at_z_omega.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
for queries in queries_with_linearization.gate_setup_polys.iter() {
for id in queries[NEXT_STEP_DILATION].iter() {
multiopening_challenge.mul_assign(&v);
let poly_ref = monomials_storage.get_poly(*id);
poly_to_divide_at_z_omega.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
}
if let Some(data) = lookup_data {
// we need to add s(x), grand_product(x) and t(x)
multiopening_challenge.mul_assign(&v);
let poly_ref = data.s_poly_monomial.as_ref().unwrap();
poly_to_divide_at_z_omega.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
multiopening_challenge.mul_assign(&v);
let poly_ref = lookup_z_poly_in_monomial_form.as_ref().unwrap();
poly_to_divide_at_z_omega.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
multiopening_challenge.mul_assign(&v);
let poly_ref = data.t_poly_monomial.as_ref().unwrap().as_ref();
poly_to_divide_at_z_omega.add_assign_scaled(&worker, poly_ref, &multiopening_challenge);
}
// division in monomial form is sequential, so we parallelize the divisions
let mut z_by_omega = z;
z_by_omega.mul_assign(&domain.generator);
let mut polys = vec![(poly_to_divide_at_z, z), (poly_to_divide_at_z_omega, z_by_omega)];
worker.scope(polys.len(), |scope, chunk| {
for p in polys.chunks_mut(chunk) {
scope.spawn(move |_| {
let (poly, at) = &p[0];
let at = *at;
let result = divide_single::<E>(poly.as_ref(), at);
p[0] = (Polynomial::from_coeffs(result).unwrap(), at);
});
}
});
let open_at_z_omega = polys.pop().unwrap().0;
let open_at_z = polys.pop().unwrap().0;
let opening_at_z = commit_using_monomials(
&open_at_z,
&mon_crs,
&worker
)?;
let opening_at_z_omega = commit_using_monomials(
&open_at_z_omega,
&mon_crs,
&worker
)?;
proof.opening_proof_at_z = opening_at_z;
proof.opening_proof_at_z_omega = opening_at_z_omega;
Ok(proof)
}
}
#[derive(Debug)]
pub struct SortedGateQueries<E: Engine>{
pub state_polys: Vec<Vec<PolyIdentifier>>,
pub witness_polys: Vec<Vec<PolyIdentifier>>,
pub gate_selectors: Vec<Box<dyn GateInternal<E>>>,
pub gate_setup_polys: Vec<Vec<Vec<PolyIdentifier>>>,
}
/// we sort queries by:
/// - witness first
/// - gate selectors
/// - gate setups in order of gates appearing
/// - additionally we split them into buckets of different dilation
pub fn sort_queries_for_linearization<E: Engine>(gates: & Vec<Box<dyn GateInternal<E>>>, max_dilation: usize) -> SortedGateQueries<E>{
let state_polys_sorted_by_dilation = vec![vec![]; max_dilation+1];
let witness_polys_sorted_by_dilation = vec![vec![]; max_dilation+1];
let gate_setup_polys_by_gate_and_dilation = vec![vec![vec![]; max_dilation+1]; gates.len()];
let mut queries = SortedGateQueries::<E> {
state_polys: state_polys_sorted_by_dilation,
witness_polys: witness_polys_sorted_by_dilation,
gate_selectors: vec![],
gate_setup_polys: gate_setup_polys_by_gate_and_dilation,
};
let mut opening_requests_before_linearization = std::collections::HashSet::new();
let mut all_queries = std::collections::HashSet::new();
let mut sorted_opening_requests = vec![];
let mut sorted_selector_for_opening = vec![];
let mut polys_in_linearization = std::collections::HashSet::new();
let num_gate_types = gates.len();
for (gate_idx, gate) in gates.iter().enumerate() {
for q in gate.all_queried_polynomials().into_iter() {
all_queries.insert(q);
}
let queries_to_add = if gate.benefits_from_linearization() {
if num_gate_types > 1 {
// there are various gates, so we need to query the selector
sorted_selector_for_opening.push(gate.box_clone());
}
// it's better to linearize the gate
for q in gate.linearizes_over().into_iter() {
polys_in_linearization.insert(q);
}
gate.needs_opened_for_linearization()
} else {
// we will linearize over the selector, so we do not need to query it
// and instead have to query all other polynomials
// we blindly add all queried polys
gate.all_queried_polynomials()
};
for q in queries_to_add.into_iter() {
if !opening_requests_before_linearization.contains(q) {
opening_requests_before_linearization.insert(q.clone());
// push into the corresponding bucket
let (id, dilation_value) = q.into_id_and_raw_dilation();
match id {
p @ PolyIdentifier::VariablesPolynomial(..) => {
queries.state_polys[dilation_value].push(p);
},
p @ PolyIdentifier::WitnessPolynomial(..) => {
queries.witness_polys[dilation_value].push(p);
},
p @ PolyIdentifier::GateSetupPolynomial(..) => {
queries.gate_setup_polys[gate_idx][dilation_value].push(p);
},
_ => {
unreachable!();
}
};
sorted_opening_requests.push(q);
}
}
}
// Sanity check: we open everything either in linearization or in plain text!
{
let must_open_without_linearization: Vec<_> = all_queries.difference(&polys_in_linearization).collect();
for p in must_open_without_linearization.into_iter() {
assert!(opening_requests_before_linearization.contains(&p));
}
}
// gate selectors are always sorted by the gate order
queries.gate_selectors = sorted_selector_for_opening;
queries
}<file_sep>/src/plonk/better_cs/test_assembly.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use super::cs::*;
#[derive(Debug, Clone)]
pub struct TestAssembly<E: Engine, P: PlonkConstraintSystemParams<E>> {
m: usize,
n: usize,
num_inputs: usize,
num_aux: usize,
input_assingments: Vec<E::Fr>,
aux_assingments: Vec<E::Fr>,
inputs_map: Vec<usize>,
is_finalized: bool,
next_step_leftover_from_previous_gate: Option<(E::Fr, P::NextTraceStepCoefficients)>,
_marker: std::marker::PhantomData<P>
}
impl<E: Engine, P: PlonkConstraintSystemParams<E>> ConstraintSystem<E, P> for TestAssembly<E, P> {
// allocate a variable
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_aux += 1;
let index = self.num_aux;
self.aux_assingments.push(value);
// println!("Allocated variable Aux({}) with value {}", index, value);
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_inputs += 1;
let index = self.num_inputs;
self.input_assingments.push(value);
let input_var = Variable(Index::Input(index));
self.n += 1;
Ok(input_var)
}
// allocate an abstract gate
fn new_gate(&mut self,
variables: P::StateVariables,
this_step_coeffs: P::ThisTraceStepCoefficients,
next_step_coeffs: P::NextTraceStepCoefficients
) -> Result<(), SynthesisError> {
// check that leftover of this gate is satisfied
if let Some((value_leftover, coeffs)) = self.next_step_leftover_from_previous_gate.take() {
let mut leftover = value_leftover;
for (&var, coeff) in variables.as_ref().iter().rev()
.zip(coeffs.as_ref().iter())
{
let mut value = self.get_value(var)?;
value.mul_assign(&coeff);
leftover.add_assign(&value);
}
if leftover.is_zero() == false {
return Err(SynthesisError::Unsatisfiable);
}
}
// now check for THIS gate
let mut gate_value = E::Fr::zero();
let mut this_step_coeffs_iter = this_step_coeffs.as_ref().iter();
// first take an LC
for (&var, coeff) in variables.as_ref().iter()
.zip(&mut this_step_coeffs_iter)
{
let mut value = self.get_value(var)?;
value.mul_assign(&coeff);
gate_value.add_assign(&value);
}
// multiplication
let mut q_m = *(this_step_coeffs_iter.next().unwrap());
q_m.mul_assign(&self.get_value(variables.as_ref()[0])?);
q_m.mul_assign(&self.get_value(variables.as_ref()[1])?);
gate_value.add_assign(&q_m);
// constant
gate_value.add_assign(this_step_coeffs_iter.next().unwrap());
assert!(next_step_coeffs.as_ref().len() <= 1);
if next_step_coeffs.as_ref().len() != 0 {
assert!(P::CAN_ACCESS_NEXT_TRACE_STEP == true);
if next_step_coeffs.as_ref()[0].is_zero() == false {
self.next_step_leftover_from_previous_gate = Some((gate_value, next_step_coeffs));
}
// assert!(self.next_step_vars.is_some());
// let next_step_vars = self.next_step_vars.take().expect("must have some next step variables")
// for (&var, coeff) in variables.as_ref().iter().rev()
// .zip(next_step_coeffs.as_ref().iter())
// {
// let mut value = self.get_value(var)?;
// value.mul_assign(&coeff);
// gate_value.add_assign(&value);
// }
} else {
if gate_value.is_zero() == false {
return Err(SynthesisError::Unsatisfiable);
}
}
self.n += 1;
Ok(())
}
fn get_value(&self, var: Variable) -> Result<E::Fr, SynthesisError> {
let value = match var {
Variable(Index::Aux(0)) => {
E::Fr::zero()
// return Err(SynthesisError::AssignmentMissing);
}
Variable(Index::Input(0)) => {
return Err(SynthesisError::AssignmentMissing);
}
Variable(Index::Input(input)) => {
self.input_assingments[input - 1]
},
Variable(Index::Aux(aux)) => {
self.aux_assingments[aux - 1]
}
};
Ok(value)
}
fn get_dummy_variable(&self) -> Variable {
self.dummy_variable()
}
}
impl<E: Engine, P: PlonkConstraintSystemParams<E>> TestAssembly<E, P> {
pub fn new() -> Self {
let tmp = Self {
n: 0,
m: 0,
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: vec![],
inputs_map: vec![],
is_finalized: false,
next_step_leftover_from_previous_gate: None,
_marker: std::marker::PhantomData
};
tmp
}
pub fn new_with_size_hints(num_inputs: usize, num_aux: usize) -> Self {
let tmp = Self {
n: 0,
m: 0,
num_inputs: 0,
num_aux: 0,
input_assingments: Vec::with_capacity(num_inputs),
aux_assingments: Vec::with_capacity(num_aux),
inputs_map: Vec::with_capacity(num_inputs),
is_finalized: false,
next_step_leftover_from_previous_gate: None,
_marker: std::marker::PhantomData
};
tmp
}
// return variable that is not in a constraint formally, but has some value
fn dummy_variable(&self) -> Variable {
Variable(Index::Aux(0))
}
pub fn is_well_formed(&self) -> bool {
// check that last gate does not chain further!
self.next_step_leftover_from_previous_gate.is_none()
}
// pub fn is_satisfied(&self, in_a_middle: bool) -> bool {
// // expect a small number of inputs
// for (i, gate) in self.input_gates.iter().enumerate()
// {
// let Gate::<E::Fr> {
// variables: [a_var, b_var, c_var],
// coefficients: [q_l, q_r, q_o, q_m, q_c, q_c_next]
// } = *gate;
// assert!(q_c.is_zero(), "should not hardcode a constant into the input gate");
// assert!(q_c_next.is_zero(), "input gates should not link to the next gate");
// let a_value = self.get_value(a_var).expect("must get a variable value");
// let b_value = self.get_value(b_var).expect("must get a variable value");
// let c_value = self.get_value(c_var).expect("must get a variable value");
// let input_value = self.input_assingments[i];
// let mut res = input_value;
// res.negate();
// let mut tmp = q_l;
// tmp.mul_assign(&a_value);
// res.add_assign(&tmp);
// let mut tmp = q_r;
// tmp.mul_assign(&b_value);
// res.add_assign(&tmp);
// let mut tmp = q_o;
// tmp.mul_assign(&c_value);
// res.add_assign(&tmp);
// let mut tmp = q_m;
// tmp.mul_assign(&a_value);
// tmp.mul_assign(&b_value);
// res.add_assign(&tmp);
// if !res.is_zero() {
// println!("Unsatisfied at input gate {}: {:?}", i+1, gate);
// println!("A value = {}, B value = {}, C value = {}", a_value, b_value, c_value);
// return false;
// }
// }
// for (i, gate_pair) in self.aux_gates.windows(2).enumerate()
// {
// let this_gate = gate_pair[0];
// let next_gate = &gate_pair[1];
// let Gate::<E::Fr> {
// variables: [a_var, b_var, c_var],
// coefficients: [q_l, q_r, q_o, q_m, q_c, q_c_next]
// } = this_gate;
// let a_value = self.get_value(a_var).expect("must get a variable value");
// let b_value = self.get_value(b_var).expect("must get a variable value");
// let c_value = self.get_value(c_var).expect("must get a variable value");
// let next_gate_c_var = next_gate.variables[2];
// let c_next_value = self.get_value(next_gate_c_var).expect("must get a variable value");
// let mut res = q_c;
// let mut tmp = q_l;
// tmp.mul_assign(&a_value);
// res.add_assign(&tmp);
// let mut tmp = q_r;
// tmp.mul_assign(&b_value);
// res.add_assign(&tmp);
// let mut tmp = q_o;
// tmp.mul_assign(&c_value);
// res.add_assign(&tmp);
// let mut tmp = q_m;
// tmp.mul_assign(&a_value);
// tmp.mul_assign(&b_value);
// res.add_assign(&tmp);
// let mut tmp = q_c_next;
// tmp.mul_assign(&c_next_value);
// res.add_assign(&tmp);
// if !res.is_zero() {
// println!("Unsatisfied at aux gate {}", i+1);
// println!("Gate {:?}", this_gate);
// println!("A = {}, B = {}, C = {}", a_value, b_value, c_value);
// return false;
// }
// }
// if !in_a_middle {
// let i = self.aux_gates.len();
// let last_gate = *self.aux_gates.last().unwrap();
// let Gate::<E::Fr> {
// variables: [a_var, b_var, c_var],
// coefficients: [q_l, q_r, q_o, q_m, q_c, q_c_next]
// } = last_gate;
// let a_value = self.get_value(a_var).expect("must get a variable value");
// let b_value = self.get_value(b_var).expect("must get a variable value");
// let c_value = self.get_value(c_var).expect("must get a variable value");
// assert!(q_c_next.is_zero(), "last gate should not be linked to the next one");
// let mut res = q_c;
// let mut tmp = q_l;
// tmp.mul_assign(&a_value);
// res.add_assign(&tmp);
// let mut tmp = q_r;
// tmp.mul_assign(&b_value);
// res.add_assign(&tmp);
// let mut tmp = q_o;
// tmp.mul_assign(&c_value);
// res.add_assign(&tmp);
// let mut tmp = q_m;
// tmp.mul_assign(&a_value);
// tmp.mul_assign(&b_value);
// res.add_assign(&tmp);
// if !res.is_zero() {
// println!("Unsatisfied at aux gate {}", i+1);
// println!("Gate {:?}", last_gate);
// println!("A = {}, B = {}, C = {}", a_value, b_value, c_value);
// return false;
// }
// }
// true
// }
pub fn num_gates(&self) -> usize {
self.n
}
}
<file_sep>/src/plonk/better_better_cs/gadgets/mod.rs
mod num;
mod rescue;
mod assignment;<file_sep>/src/sonic/helped/adapted_prover.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::{Circuit};
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::srs::SRS;
use crate::sonic::sonic::Basic;
use super::prover::create_advice as create_advice_sonic_circuit;
use super::prover::create_advice_on_information_and_srs as create_advice_on_information_and_srs_sonic_circuit;
use super::prover::create_proof_on_srs as create_proof_on_srs_sonic_circuit;
use crate::sonic::sonic::CountN;
// pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
proof: &Proof<E>,
srs: &SRS<E>,
n: usize
) -> Result<SxyAdvice<E>, SynthesisError>
{
let adapted_circuit = AdaptorCircuit(circuit);
create_advice_on_information_and_srs_sonic_circuit::<_, _, Basic>(&adapted_circuit, proof, srs, n)
}
// pub fn create_advice<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_advice<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
proof: &Proof<E>,
parameters: &Parameters<E>,
) -> Result<SxyAdvice<E>, SynthesisError>
{
let n = parameters.vk.n;
create_advice_on_information_and_srs::<E, C>(circuit, proof, ¶meters.srs, n)
}
// pub fn create_advice_on_srs<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_advice_on_srs<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
proof: &Proof<E>,
srs: &SRS<E>
) -> Result<SxyAdvice<E>, SynthesisError>
{
use crate::sonic::sonic::Nonassigning;
let adapted_circuit = AdaptorCircuit(circuit.clone());
// annoying, but we need n to compute s(z, y), and this isn't
// precomputed anywhere yet
let n = {
let mut tmp = CountN::<Nonassigning>::new();
Nonassigning::synthesize(&mut tmp, &adapted_circuit)?;
tmp.n
};
create_advice_on_information_and_srs::<E, C>(circuit, proof, srs, n)
}
// pub fn create_proof<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_proof<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
parameters: &Parameters<E>
) -> Result<Proof<E>, SynthesisError> {
create_proof_on_srs::<E, C>(circuit, ¶meters.srs)
}
// pub fn create_proof_on_srs<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_proof_on_srs<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
srs: &SRS<E>
) -> Result<Proof<E>, SynthesisError>
{
let adapted_circuit = AdaptorCircuit(circuit);
create_proof_on_srs_sonic_circuit::<_, _, Basic>(&adapted_circuit, srs)
}
// #[test]
// fn my_fun_circuit_test() {
// use crate::pairing::ff::PrimeField;
// use crate::pairing::bls12_381::{Bls12, Fr};
// use super::*;
// use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination};
// struct MyCircuit;
// impl<E: Engine> Circuit<E> for MyCircuit {
// fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// let (a, b, _) = cs.multiply(|| {
// Ok((
// E::Fr::from_str("10").unwrap(),
// E::Fr::from_str("20").unwrap(),
// E::Fr::from_str("200").unwrap(),
// ))
// })?;
// cs.enforce_zero(LinearCombination::from(a) + a - b);
// //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
// //cs.enforce_zero(LinearCombination::from(b) - multiplier);
// Ok(())
// }
// }
// let srs = SRS::<Bls12>::new(
// 20,
// Fr::from_str("22222").unwrap(),
// Fr::from_str("33333333").unwrap(),
// );
// let proof = create_proof_on_srs::<Bls12, _, Basic>(&MyCircuit, &srs).unwrap();
// use std::time::{Instant};
// let start = Instant::now();
// let mut batch = MultiVerifier::<Bls12, _, Basic>::new(MyCircuit, &srs).unwrap();
// for _ in 0..1 {
// batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None);
// }
// assert!(batch.check_all());
// let elapsed = start.elapsed();
// println!("time to verify: {:?}", elapsed);
// }
<file_sep>/src/sonic/helped/parameters.rs
use crate::pairing::ff::{
Field,
PrimeField,
PrimeFieldRepr
};
use crate::pairing::{
Engine,
CurveAffine,
EncodedPoint
};
use crate::{
SynthesisError
};
use crate::source::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
pub const NUM_BLINDINGS: usize = 6;
// pub const NUM_BLINDINGS: usize = 0;
#[derive(Clone, Debug, Eq)]
pub struct SxyAdvice<E: Engine> {
pub s: E::G1Affine,
pub opening: E::G1Affine,
pub szy: E::Fr,
}
impl<E: Engine> PartialEq for SxyAdvice<E> {
fn eq(&self, other: &SxyAdvice<E>) -> bool {
self.s == other.s &&
self.opening == other.opening &&
self.szy == other.szy
}
}
#[derive(Clone, Debug, Eq)]
pub struct Proof<E: Engine> {
pub r: E::G1Affine,
pub t: E::G1Affine,
pub rz: E::Fr,
pub rzy: E::Fr,
pub z_opening: E::G1Affine,
pub zy_opening: E::G1Affine
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Proof<E>) -> bool {
self.r == other.r &&
self.t == other.t &&
self.rz == other.rz &&
self.rzy == other.rzy &&
self.z_opening == other.z_opening &&
self.zy_opening == other.zy_opening
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
use crate::pairing::ff::{PrimeField, PrimeFieldRepr};
writer.write_all(self.r.into_compressed().as_ref())?;
writer.write_all(self.t.into_compressed().as_ref())?;
let mut buffer = vec![];
self.rz.into_repr().write_be(&mut buffer)?;
writer.write_all(&buffer[..])?;
let mut buffer = vec![];
self.rzy.into_repr().write_be(&mut buffer)?;
writer.write_all(&buffer[..])?;
writer.write_all(self.z_opening.into_compressed().as_ref())?;
writer.write_all(self.zy_opening.into_compressed().as_ref())?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Compressed::empty();
let mut fr_repr = E::Fr::zero().into_repr();
reader.read_exact(g1_repr.as_mut())?;
let r = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let t = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
fr_repr.read_be(&mut reader)?;
let rz = E::Fr::from_repr(fr_repr)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "field element is zero"))
} else {
Ok(e)
})?;
fr_repr.read_be(&mut reader)?;
let rzy = E::Fr::from_repr(fr_repr)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "field element is zero"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let z_opening = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let zy_opening = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
Ok(Proof {
r: r,
t: t,
rz: rz,
rzy: rzy,
z_opening: z_opening,
zy_opening: zy_opening
})
}
}
#[derive(Clone, Debug, Eq)]
pub struct VerifyingKey<E: Engine> {
pub alpha_x: E::G2Affine,
pub alpha: E::G2Affine,
pub neg_h: E::G2Affine,
pub neg_x_n_minus_d: E::G2Affine,
pub k_map: Vec<usize>,
pub n: usize,
pub q: usize
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &VerifyingKey<E>) -> bool {
self.alpha_x == other.alpha_x &&
self.alpha == other.alpha &&
self.neg_h == other.neg_h &&
self.neg_x_n_minus_d == other.neg_x_n_minus_d &&
self.k_map == other.k_map &&
self.n == other.n &&
self.q == other.q
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.alpha_x.into_uncompressed().as_ref())?;
writer.write_all(self.alpha.into_uncompressed().as_ref())?;
writer.write_all(self.neg_h.into_uncompressed().as_ref())?;
writer.write_all(self.neg_x_n_minus_d.into_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.k_map.len() as u32)?;
for k in &self.k_map {
writer.write_u32::<BigEndian>(*k as u32)?;
}
writer.write_u32::<BigEndian>(self.n as u32)?;
writer.write_u32::<BigEndian>(self.q as u32)?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g2_repr.as_mut())?;
let alpha_x = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let alpha = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let neg_h = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let neg_x_n_minus_d = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let k_map_len = reader.read_u32::<BigEndian>()? as usize;
let mut k_map = vec![];
for _ in 0..k_map_len {
let k = reader.read_u32::<BigEndian>()? as usize;
k_map.push(k);
}
let n = reader.read_u32::<BigEndian>()? as usize;
let q = reader.read_u32::<BigEndian>()? as usize;
Ok(VerifyingKey {
alpha_x: alpha_x,
alpha: alpha,
neg_h: neg_h,
neg_x_n_minus_d: neg_x_n_minus_d,
k_map: k_map,
n: n,
q: q
})
}
}
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::srs::SRS;
use crate::sonic::cs::Circuit as SonicCircuit;
use crate::sonic::sonic::{Basic, Preprocess};
use std::marker::PhantomData;
impl<E: Engine> VerifyingKey<E> {
pub fn new<C: SonicCircuit<E>, S: SynthesisDriver>(circuit: C, srs: &SRS<E>) -> Result<Self, SynthesisError> {
let mut preprocess = Preprocess::new();
S::synthesize(&mut preprocess, &circuit)?;
Ok(Self {
alpha_x: srs.h_positive_x_alpha[1],
alpha: srs.h_positive_x_alpha[0],
neg_h: {
let mut tmp = srs.h_negative_x[0];
tmp.negate();
tmp
},
neg_x_n_minus_d: {
let mut tmp = srs.h_negative_x[srs.d - preprocess.n];
tmp.negate();
tmp
},
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q
})
}
}
pub struct PreparedVerifyingKey<E: Engine> {
alpha_x: <E::G2Affine as CurveAffine>::Prepared,
alpha: <E::G2Affine as CurveAffine>::Prepared,
neg_h: <E::G2Affine as CurveAffine>::Prepared,
neg_x_n_minus_d: <E::G2Affine as CurveAffine>::Prepared,
k_map: Vec<usize>,
n: usize,
q: usize
}
#[derive(Clone, Eq)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
pub srs: SRS<E>,
// pub d: usize,
// // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
// pub g_negative_x: Arc<Vec<E::G1Affine>>,
// // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
// pub g_positive_x: Arc<Vec<E::G1Affine>>,
// // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
// pub h_negative_x: Arc<Vec<E::G2Affine>>,
// // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
// pub h_positive_x: Arc<Vec<E::G2Affine>>,
// // alpha*(g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
// pub g_negative_x_alpha: Arc<Vec<E::G1Affine>>,
// // alpha*(g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
// pub g_positive_x_alpha: Arc<Vec<E::G1Affine>>,
// // alpha*(h^{x^0}, h^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
// pub h_negative_x_alpha: Arc<Vec<E::G2Affine>>,
// // alpha*(h^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
// pub h_positive_x_alpha: Arc<Vec<E::G2Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Parameters<E>) -> bool {
self.vk == other.vk &&
self.srs == other.srs
}
}
impl<E: Engine> Parameters<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
self.vk.write(&mut writer)?;
self.srs.write(&mut writer)?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R,
checked: bool
) -> io::Result<Self>
{
let vk = VerifyingKey::<E>::read(&mut reader)?;
let srs = SRS::<E>::read(&mut reader, checked)?;
Ok(Parameters {
vk: vk,
srs: srs
})
}
}
#[test]
fn parameters_generation() {
use crate::{ConstraintSystem, Circuit};
use crate::pairing::bls12_381::{Bls12, Fr};
#[derive(Clone)]
struct MySillyCircuit<E: Engine> {
a: Option<E::Fr>,
b: Option<E::Fr>
}
impl<E: Engine> Circuit<E> for MySillyCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
let c = cs.alloc_input(|| "c", || {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
a.mul_assign(&b);
Ok(a)
})?;
cs.enforce(
|| "a*b=c",
|lc| lc + a,
|lc| lc + b,
|lc| lc + c
);
Ok(())
}
}
use rand::{Rng, Rand, thread_rng};
use super::{generate_parameters, get_circuit_parameters, generate_srs, generate_parameters_on_srs_and_information};
use super::adapted_prover::create_proof;
let info = get_circuit_parameters::<Bls12, _>(MySillyCircuit { a: None, b: None }).expect("Must get circuit info");
println!("{:?}", info);
let rng = &mut thread_rng();
let x: Fr = rng.gen();
let alpha: Fr = rng.gen();
let params = generate_parameters::<Bls12, _>(MySillyCircuit { a: None, b: None }, alpha, x).unwrap();
let srs = generate_srs::<Bls12>(alpha, x, info.n * 100).unwrap();
let naive_srs = SRS::<Bls12>::new(
info.n * 100,
x,
alpha,
);
assert!(srs == naive_srs);
let params_on_srs = generate_parameters_on_srs_and_information::<Bls12>(&srs, info.clone()).unwrap();
assert!(params == params_on_srs);
{
let mut v = vec![];
params.write(&mut v).unwrap();
let de_params = Parameters::read(&v[..], true).unwrap();
assert!(params == de_params);
let de_params = Parameters::read(&v[..], false).unwrap();
assert!(params == de_params);
}
for _ in 0..100 {
let a = Fr::rand(rng);
let b = Fr::rand(rng);
let mut c = a;
c.mul_assign(&b);
let proof = create_proof (
MySillyCircuit {
a: Some(a),
b: Some(b)
},
¶ms,
).unwrap();
let mut v = vec![];
proof.write(&mut v).unwrap();
assert_eq!(v.len(), 256);
let de_proof = Proof::read(&v[..]).unwrap();
assert!(proof == de_proof);
// assert!(verify_proof(&pvk, &proof, &[c]).unwrap());
// assert!(!verify_proof(&pvk, &proof, &[a]).unwrap());
}
}<file_sep>/src/sonic/unhelped/mod.rs
/// Largeley this module is implementation of provable evaluation of s(z, y), that is represented in two parts
/// s2(X, Y) = \sum_{i=1}^{N} (Y^{-i} + Y^{i})X^{i}
/// s1(X, Y) = ...
/// s1 part requires grand product and permutation arguments, that are also implemented
mod s2_proof;
mod wellformed_argument;
pub mod grand_product_argument;
mod permutation_argument;
mod verifier;
pub mod permutation_structure;
mod aggregate;
pub use self::wellformed_argument::{WellformednessArgument, WellformednessProof};
pub use self::permutation_argument::{PermutationArgument, PermutationProof, PermutationArgumentProof};
pub use self::verifier::SuccinctMultiVerifier;
pub use self::aggregate::*;<file_sep>/src/plonk/better_better_cs/gates/selector_optimized_with_d_next.rs
use super::*;
#[derive(Clone, Debug, Hash, Default, serde::Serialize, serde::Deserialize)]
pub struct SelectorOptimizedWidth4MainGateWithDNext;
impl SelectorOptimizedWidth4MainGateWithDNext {
pub const AB_MULTIPLICATION_TERM_COEFF_INDEX: usize = 4;
pub const AC_MULTIPLICATION_TERM_COEFF_INDEX: usize = 5;
pub const CONSTANT_TERM_COEFF_INDEX: usize = 6;
pub const D_NEXT_TERM_COEFF_INDEX: usize = 7;
}
const GATE_NAME: &'static str = "main gate of width 4 with D_next and selector optimization";
impl<E: Engine> GateInternal<E> for SelectorOptimizedWidth4MainGateWithDNext {
fn name(&self) -> &'static str {
GATE_NAME
}
fn degree(&self) -> usize {
3
}
fn can_include_public_inputs(&self) -> bool {
true
}
#[inline]
fn all_queried_polynomials(&self) -> &'static [PolynomialInConstraint] {
const ALL_QUERIED: [PolynomialInConstraint; 13] = [
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 0)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 1)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 2)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 3)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 4)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 5)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 6)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 7)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(3)),
PolynomialInConstraint::from_id_and_dilation(PolyIdentifier::VariablesPolynomial(3), 1),
];
&ALL_QUERIED
}
#[inline]
fn setup_polynomials(&self) -> &'static [PolyIdentifier] {
const SETUP_POLYS: [PolyIdentifier; 8] =[
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 0),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 1),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 2),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 3),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 4),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 5),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 6),
PolyIdentifier::GateSetupPolynomial(GATE_NAME, 7),
];
&SETUP_POLYS
}
#[inline]
fn variable_polynomials(&self) -> &'static [PolyIdentifier] {
const VARIABLE_POLYS: [PolyIdentifier; 4] = [
PolyIdentifier::VariablesPolynomial(0),
PolyIdentifier::VariablesPolynomial(1),
PolyIdentifier::VariablesPolynomial(2),
PolyIdentifier::VariablesPolynomial(3),
];
&VARIABLE_POLYS
}
#[inline]
fn benefits_from_linearization(&self) -> bool {
true
}
#[inline]
fn linearizes_over(&self) -> &'static [PolynomialInConstraint] {
const LINEARIZED_OVER_POLYS: [PolynomialInConstraint; 8] = [
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 0)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 1)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 2)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 3)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 4)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 5)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 6)),
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(GATE_NAME, 7)),
];
&LINEARIZED_OVER_POLYS
}
#[inline]
fn needs_opened_for_linearization(&self) -> &'static [PolynomialInConstraint] {
const ALL_OPENED_FOR_LINEARIZATION: [PolynomialInConstraint; 5] = [
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)),
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(3)),
PolynomialInConstraint::from_id_and_dilation(PolyIdentifier::VariablesPolynomial(3), 1),
];
&ALL_OPENED_FOR_LINEARIZATION
}
#[inline]
fn num_quotient_terms(&self) -> usize {
1
}
fn verify_on_row(&self, row: usize, poly_storage: &AssembledPolynomialStorage<E>, last_row: bool) -> E::Fr {
let name = <Self as GateInternal<E>>::name(&self);
let q_a = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, 0), row);
let q_b = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, 1), row);
let q_c = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, 2), row);
let q_d = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, 3), row);
let q_m_ab = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, Self::AB_MULTIPLICATION_TERM_COEFF_INDEX), row);
let q_m_ac = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, Self::AC_MULTIPLICATION_TERM_COEFF_INDEX), row);
let q_const = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, Self::CONSTANT_TERM_COEFF_INDEX), row);
let q_d_next = poly_storage.get_poly_at_step(PolyIdentifier::GateSetupPolynomial(name, 7), row);
// println!("{}*A + {}*B + {}*C + {}*D + {} + {}*A*B + {}*A*C {}*D_next", q_a, q_b, q_c, q_d, q_const, q_m_ab, q_m_ac, q_d_next);
let a_value = poly_storage.get_poly_at_step(PolyIdentifier::VariablesPolynomial(0), row);
let b_value = poly_storage.get_poly_at_step(PolyIdentifier::VariablesPolynomial(1), row);
let c_value = poly_storage.get_poly_at_step(PolyIdentifier::VariablesPolynomial(2), row);
let d_value = poly_storage.get_poly_at_step(PolyIdentifier::VariablesPolynomial(3), row);
let d_next_value = if last_row == false {
Some(poly_storage.get_poly_at_step(PolyIdentifier::VariablesPolynomial(3), row+1))
} else {
None
};
// println!("A = {}, B = {}, C = {}, D = {}, D_Next = {:?}", a_value, b_value, c_value, d_value, d_next_value);
let mut total = E::Fr::zero();
for (q, v) in [a_value, b_value, c_value, d_value].iter()
.zip([q_a, q_b, q_c, q_d].iter())
{
let mut tmp = *q;
tmp.mul_assign(v);
total.add_assign(&tmp);
}
total.add_assign(&q_const);
let mut tmp = q_m_ab;
tmp.mul_assign(&a_value);
tmp.mul_assign(&b_value);
total.add_assign(&tmp);
let mut tmp = q_m_ac;
tmp.mul_assign(&a_value);
tmp.mul_assign(&c_value);
total.add_assign(&tmp);
if last_row == false {
let mut tmp = d_next_value.expect("must be able to get d_next");
tmp.mul_assign(&q_d_next);
total.add_assign(&tmp);
} else {
assert!(q_d_next.is_zero());
}
if !total.is_zero() {
println!("{}*A + {}*B + {}*C + {}*D + {} + {}*A*B + {}*A*C + {}*D_next", q_a, q_b, q_c, q_d, q_const, q_m_ab, q_m_ac, q_d_next);
println!("A = {}, B = {}, C = {}, D = {}, D_Next = {:?}", a_value, b_value, c_value, d_value, d_next_value);
}
total
}
fn contribute_into_quotient(
&self,
_domain_size: usize,
_poly_storage: &mut AssembledPolynomialStorage<E>,
_monomial_storage: & AssembledPolynomialStorageForMonomialForms<E>,
_challenges: &[E::Fr],
_omegas_bitreversed: &BitReversedOmegas<E::Fr>,
_omegas_inv_bitreversed: &OmegasInvBitreversed<E::Fr>,
_worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError> {
unreachable!("this type of gate can only be used as a main gate");
}
fn contribute_into_linearization(
&self,
_domain_size: usize,
_at: E::Fr,
_queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
_monomials_storage: & AssembledPolynomialStorageForMonomialForms<E>,
_challenges: &[E::Fr],
_worker: &Worker
) -> Result<Polynomial<E::Fr, Coefficients>, SynthesisError> {
unreachable!("this gate is indended to be the main gate and should use main gate functions")
}
fn contribute_into_verification_equation(
&self,
_domain_size: usize,
_at: E::Fr,
_queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
_challenges: &[E::Fr],
) -> Result<E::Fr, SynthesisError> {
unreachable!("this gate is indended to be the main gate and should use main gate functions")
}
fn put_public_inputs_into_selector_id(&self) -> Option<usize> {
Some(Self::CONSTANT_TERM_COEFF_INDEX)
}
fn box_clone(&self) -> Box<dyn GateInternal<E>> {
Box::from(self.clone())
}
fn contribute_into_linearization_commitment(
&self,
domain_size: usize,
at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
commitments_storage: &std::collections::HashMap<PolyIdentifier, E::G1Affine>,
challenges: &[E::Fr],
) -> Result<E::G1, SynthesisError> {
unreachable!("this gate is indended to be the main gate and should use main gate functions")
}
}
impl<E: Engine> Gate<E> for SelectorOptimizedWidth4MainGateWithDNext {
}
impl<E: Engine> MainGate<E> for SelectorOptimizedWidth4MainGateWithDNext {
const NUM_LINEAR_TERMS: usize = 4;
const NUM_VARIABLES: usize = 4;
const NUM_VARIABLES_ON_NEXT_STEP: usize = 1;
fn range_of_multiplicative_term() -> std::ops::Range<usize> {
4..6
}
fn range_of_linear_terms() -> std::ops::Range<usize> {
0..4
}
fn index_for_constant_term() -> usize {
6
}
fn range_of_next_step_linear_terms() -> std::ops::Range<usize> {
7..8
}
fn format_term(mut instance: MainGateTerm<E>, padding: Variable) -> Result<(SmallVec<[Variable; DEFAULT_SMALLVEC_CAPACITY]>, SmallVec<[E::Fr; DEFAULT_SMALLVEC_CAPACITY]>), SynthesisError> {
assert!(instance.num_multiplicative_terms <= 1,
"should not use term formatting by this gate if you are using it for optimized selector with 2 multiplicative terms"
);
let mut flattened_variables = smallvec::smallvec![padding; 4];
let mut flattened_coefficients = smallvec::smallvec![E::Fr::zero(); 8];
let mut bitmap = SimpleBitmap::new();
let allowed_linear = 4;
let allowed_multiplications = 2;
let allowed_constants = 1;
let mut used_in_multiplication = [padding; 2];
debug_assert!(instance.num_constant_terms <= allowed_constants, "must not containt more constants than allowed");
debug_assert!(instance.num_multiplicative_terms <= allowed_multiplications, "must not containt more multiplications than allowed");
debug_assert!(instance.terms.len() <= allowed_constants + allowed_multiplications + allowed_linear, "gate can not fit that many terms");
if instance.num_multiplicative_terms != 0 {
let index = instance.terms.iter().position(
|t| {
match t {
ArithmeticTerm::Product(_, _) => true,
_ => false,
}
}
).unwrap();
let term = instance.terms.swap_remove(index);
match term {
ArithmeticTerm::Product(vars, coeff) => {
debug_assert_eq!(vars.len(), 2, "multiplicative terms must contain two variables");
flattened_variables[0] = vars[0];
flattened_variables[1] = vars[1];
used_in_multiplication[0] = vars[0];
used_in_multiplication[1] = vars[1];
flattened_coefficients[Self::AB_MULTIPLICATION_TERM_COEFF_INDEX] = coeff;
bitmap.set(0);
bitmap.set(1);
},
_ => {
unreachable!("must be multiplicative term");
}
}
}
if instance.num_constant_terms != 0 {
let index = instance.terms.iter().position(
|t| {
match t {
ArithmeticTerm::Constant(_) => true,
_ => false,
}
}
).unwrap();
let term = instance.terms.swap_remove(index);
match term {
ArithmeticTerm::Constant(coeff) => {
flattened_coefficients[Self::CONSTANT_TERM_COEFF_INDEX] = coeff;
},
_ => {
unreachable!("must be constant term");
}
}
}
// only additions left
for term in instance.terms.into_iter() {
match term {
ArithmeticTerm::SingleVariable(var, coeff) => {
let index = flattened_variables.iter().position(
|&t| t == var
);
if let Some(index) = index {
// there is some variable there already, so it must have come from multiplication
assert!(used_in_multiplication[0] == var || used_in_multiplication[1] == var,
"variable in linear term must only happen already if it was in multiplication");
flattened_coefficients[index] = coeff;
} else {
let idx = bitmap.get_next_unused();
flattened_variables[idx] = var;
flattened_coefficients[idx] = coeff;
bitmap.set(idx);
}
},
_ => {
unreachable!("must be additive term");
}
}
}
Ok((flattened_variables, flattened_coefficients))
}
fn format_linear_term_with_duplicates(mut instance: MainGateTerm<E>, padding: Variable) -> Result<(SmallVec<[Variable; DEFAULT_SMALLVEC_CAPACITY]>, SmallVec<[E::Fr; DEFAULT_SMALLVEC_CAPACITY]>), SynthesisError> {
assert!(instance.num_multiplicative_terms <= 1,
"should not use term formatting by this gate if you are using it for optimized selector with 2 multiplicative terms"
);
let mut flattened_variables = smallvec::smallvec![padding; 4];
let mut flattened_coefficients = smallvec::smallvec![E::Fr::zero(); 8];
let mut bitmap = SimpleBitmap::new();
let allowed_linear = 4;
let allowed_multiplications = 0;
let allowed_constants = 1;
debug_assert!(instance.num_constant_terms <= allowed_constants, "must not containt more constants than allowed");
assert!(instance.num_multiplicative_terms <= allowed_multiplications, "must not containt multiplications");
debug_assert!(instance.terms.len() <= allowed_constants + allowed_multiplications + allowed_linear, "gate can not fit that many terms");
if instance.num_constant_terms != 0 {
let index = instance.terms.iter().position(
|t| {
match t {
ArithmeticTerm::Constant(_) => true,
_ => false,
}
}
).unwrap();
let term = instance.terms.swap_remove(index);
match term {
ArithmeticTerm::Constant(coeff) => {
flattened_coefficients[Self::CONSTANT_TERM_COEFF_INDEX] = coeff;
},
_ => {
unreachable!("must be multiplicative term");
}
}
}
for term in instance.terms.into_iter() {
match term {
ArithmeticTerm::SingleVariable(var, coeff) => {
let idx = bitmap.get_next_unused();
flattened_variables[idx] = var;
flattened_coefficients[idx] = coeff;
bitmap.set(idx);
},
_ => {
unreachable!("must be multiplicative term");
}
}
}
Ok((flattened_variables, flattened_coefficients))
}
fn dummy_vars_to_inscribe(dummy: Variable) -> SmallVec<[Variable; DEFAULT_SMALLVEC_CAPACITY]> {
smallvec::smallvec![dummy; 4]
}
fn empty_coefficients() -> SmallVec<[E::Fr; DEFAULT_SMALLVEC_CAPACITY]> {
smallvec::smallvec![E::Fr::zero(); 8]
}
fn contribute_into_quotient_for_public_inputs<'a, 'b>(
&self,
domain_size: usize,
public_inputs: &[E::Fr],
poly_storage: &mut AssembledPolynomialStorage<'a, E>,
monomials_storage: & AssembledPolynomialStorageForMonomialForms<'b, E>,
challenges: &[E::Fr],
omegas_bitreversed: &BitReversedOmegas<E::Fr>,
omegas_inv_bitreversed: &OmegasInvBitreversed<E::Fr>,
worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError> {
assert!(domain_size.is_power_of_two());
assert_eq!(challenges.len(), <Self as GateInternal<E>>::num_quotient_terms(&self));
let lde_factor = poly_storage.lde_factor;
assert!(lde_factor.is_power_of_two());
assert!(poly_storage.is_bitreversed);
let coset_factor = E::Fr::multiplicative_generator();
// Include the public inputs
let mut inputs_poly = Polynomial::<E::Fr, Values>::new_for_size(domain_size)?;
for (idx, &input) in public_inputs.iter().enumerate() {
inputs_poly.as_mut()[idx] = input;
}
// go into monomial form
let mut inputs_poly = inputs_poly.ifft_using_bitreversed_ntt(&worker, omegas_inv_bitreversed, &E::Fr::one())?;
// add constants selectors vector
let name = <Self as GateInternal<E>>::name(&self);
let key = PolyIdentifier::GateSetupPolynomial(name, Self::CONSTANT_TERM_COEFF_INDEX);
let constants_poly_ref = monomials_storage.get_poly(key);
inputs_poly.add_assign(&worker, constants_poly_ref);
drop(constants_poly_ref);
// LDE
let mut t_1 = inputs_poly.bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
omegas_bitreversed,
&coset_factor
)?;
for &p in <Self as GateInternal<E>>::all_queried_polynomials(&self).into_iter() {
// skip public constants poly (was used in public inputs)
if p == PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, Self::CONSTANT_TERM_COEFF_INDEX)) {
continue;
}
ensure_in_map_or_create(&worker,
p,
domain_size,
omegas_bitreversed,
lde_factor,
coset_factor,
monomials_storage,
poly_storage
)?;
}
let ldes_storage = &*poly_storage;
// Q_A * A
let q_a_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, 0)),
ldes_storage
);
let a_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
ldes_storage
);
let mut tmp = q_a_ref.clone();
tmp.mul_assign(&worker, a_ref);
t_1.add_assign(&worker, &tmp);
drop(q_a_ref);
drop(a_ref);
// Q_B * B
let q_b_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, 1)),
ldes_storage
);
let b_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)),
ldes_storage
);
tmp.reuse_allocation(q_b_ref);
tmp.mul_assign(&worker, b_ref);
t_1.add_assign(&worker, &tmp);
drop(q_b_ref);
drop(b_ref);
// // Q_C * C
let q_c_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, 2)),
ldes_storage
);
let c_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)),
ldes_storage
);
tmp.reuse_allocation(q_c_ref);
tmp.mul_assign(&worker, c_ref);
t_1.add_assign(&worker, &tmp);
drop(q_c_ref);
drop(c_ref);
// // Q_D * D
let q_d_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, 3)),
ldes_storage
);
let d_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(3)),
ldes_storage
);
tmp.reuse_allocation(q_d_ref);
tmp.mul_assign(&worker, d_ref);
t_1.add_assign(&worker, &tmp);
drop(q_d_ref);
drop(d_ref);
// Q_M_AB * A * B
let q_m_ab_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, Self::AB_MULTIPLICATION_TERM_COEFF_INDEX)),
ldes_storage
);
let a_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
ldes_storage
);
let b_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)),
ldes_storage
);
tmp.reuse_allocation(q_m_ab_ref);
tmp.mul_assign(&worker, a_ref);
tmp.mul_assign(&worker, b_ref);
t_1.add_assign(&worker, &tmp);
drop(q_m_ab_ref);
drop(a_ref);
drop(b_ref);
// Q_M_AC * A * C
let q_m_ac_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, Self::AC_MULTIPLICATION_TERM_COEFF_INDEX)),
ldes_storage
);
let a_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
ldes_storage
);
let c_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)),
ldes_storage
);
tmp.reuse_allocation(q_m_ac_ref);
tmp.mul_assign(&worker, a_ref);
tmp.mul_assign(&worker, c_ref);
t_1.add_assign(&worker, &tmp);
drop(q_m_ac_ref);
drop(a_ref);
drop(c_ref);
// Q_D_next * D_next
let q_d_next_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::GateSetupPolynomial(name, 7)),
ldes_storage
);
let d_next_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id_and_dilation(PolyIdentifier::VariablesPolynomial(3), 1),
ldes_storage
);
tmp.reuse_allocation(q_d_next_ref);
tmp.mul_assign(&worker, d_next_ref);
t_1.add_assign(&worker, &tmp);
drop(q_d_next_ref);
drop(d_next_ref);
t_1.scale(&worker, challenges[0]);
Ok(t_1)
}
fn contribute_into_linearization_for_public_inputs(
&self,
_domain_size: usize,
_public_inputs: &[E::Fr],
_at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
monomials_storage: & AssembledPolynomialStorageForMonomialForms<E>,
challenges: &[E::Fr],
worker: &Worker
) -> Result<Polynomial<E::Fr, Coefficients>, SynthesisError> {
// we actually do not depend on public inputs, but we use this form for consistency
assert_eq!(challenges.len(), 1);
let a_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)))
.ok_or(SynthesisError::AssignmentMissing)?;
let b_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)))
.ok_or(SynthesisError::AssignmentMissing)?;
let c_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)))
.ok_or(SynthesisError::AssignmentMissing)?;
let d_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(3)))
.ok_or(SynthesisError::AssignmentMissing)?;
let d_next_value = *queried_values.get(&PolynomialInConstraint::from_id_and_dilation(PolyIdentifier::VariablesPolynomial(3), 1))
.ok_or(SynthesisError::AssignmentMissing)?;
let name = <Self as GateInternal<E>>::name(&self);
// Q_a * A
let mut result = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, 0)).clone();
result.scale(&worker, a_value);
// Q_b * B
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, 1));
result.add_assign_scaled(&worker, poly_ref, &b_value);
// Q_c * C
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, 2));
result.add_assign_scaled(&worker, poly_ref, &c_value);
// Q_d * D
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, 3));
result.add_assign_scaled(&worker, poly_ref, &d_value);
// Q_m_AB * A*B
let mut tmp = a_value;
tmp.mul_assign(&b_value);
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, Self::AB_MULTIPLICATION_TERM_COEFF_INDEX));
result.add_assign_scaled(&worker, poly_ref, &tmp);
// Q_m_AC * A*C
let mut tmp = a_value;
tmp.mul_assign(&c_value);
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, Self::AC_MULTIPLICATION_TERM_COEFF_INDEX));
result.add_assign_scaled(&worker, poly_ref, &tmp);
// Q_const
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, Self::CONSTANT_TERM_COEFF_INDEX));
result.add_assign(&worker, poly_ref);
// Q_dNext * D_next
let poly_ref = monomials_storage.get_poly(PolyIdentifier::GateSetupPolynomial(name, 7));
result.add_assign_scaled(&worker, poly_ref, &d_next_value);
result.scale(&worker, challenges[0]);
Ok(result)
}
fn add_inputs_into_quotient(
&self,
domain_size: usize,
public_inputs: &[E::Fr],
at: E::Fr,
challenges: &[E::Fr],
) -> Result<E::Fr, SynthesisError> {
if public_inputs.len() == 0 {
return Ok(E::Fr::zero());
}
assert_eq!(challenges.len(), 1);
// just evaluate L_{i}(z) * value
let mut contribution = E::Fr::zero();
let domain = Domain::<E::Fr>::new_for_size(domain_size as u64)?;
for (idx, inp) in public_inputs.iter().enumerate() {
let mut tmp = evaluate_lagrange_poly_at_point(idx, &domain, at)?;
tmp.mul_assign(&inp);
contribution.add_assign(&tmp);
}
contribution.mul_assign(&challenges[0]);
Ok(contribution)
}
fn contribute_into_linearization_commitment_for_public_inputs<'a>(
&self,
_domain_size: usize,
_public_inputs: &[E::Fr],
_at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
commitments_storage: &std::collections::HashMap<PolyIdentifier, E::G1Affine>,
challenges: &[E::Fr],
) -> Result<E::G1, SynthesisError> {
// we actually do not depend on public inputs, but we use this form for consistency
assert_eq!(challenges.len(), 1);
let mut aggregate = E::G1::zero();
let a_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)))
.ok_or(SynthesisError::AssignmentMissing)?;
let b_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(1)))
.ok_or(SynthesisError::AssignmentMissing)?;
let c_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(2)))
.ok_or(SynthesisError::AssignmentMissing)?;
let d_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(3)))
.ok_or(SynthesisError::AssignmentMissing)?;
let d_next_value = *queried_values.get(&PolynomialInConstraint::from_id_and_dilation(PolyIdentifier::VariablesPolynomial(3), 1))
.ok_or(SynthesisError::AssignmentMissing)?;
let name = <Self as GateInternal<E>>::name(&self);
// Q_a * A
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, 0)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(a_value.into_repr());
aggregate.add_assign(&scaled);
// Q_b * B
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, 1)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(b_value.into_repr());
aggregate.add_assign(&scaled);
// Q_c * C
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, 2)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(c_value.into_repr());
aggregate.add_assign(&scaled);
// Q_d * D
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, 3)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(d_value.into_repr());
aggregate.add_assign(&scaled);
// Q_m_AB * A*B
let mut tmp = a_value;
tmp.mul_assign(&b_value);
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, Self::AB_MULTIPLICATION_TERM_COEFF_INDEX)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(tmp.into_repr());
aggregate.add_assign(&scaled);
// Q_m_AC * A*B
let mut tmp = a_value;
tmp.mul_assign(&c_value);
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, Self::AC_MULTIPLICATION_TERM_COEFF_INDEX)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(tmp.into_repr());
aggregate.add_assign(&scaled);
// Q_const
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, Self::CONSTANT_TERM_COEFF_INDEX)).ok_or(SynthesisError::AssignmentMissing)?;
aggregate.add_assign_mixed(&commitment);
// Q_dNext * D_next
let commitment = commitments_storage.get(&PolyIdentifier::GateSetupPolynomial(name, 7)).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = commitment.mul(d_next_value.into_repr());
aggregate.add_assign(&scaled);
aggregate.mul_assign(challenges[0]);
Ok(aggregate)
}
}<file_sep>/src/plonk/verifier/mod.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
use crate::worker::*;
use super::polynomials::*;
use super::domains::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::utils::*;
use crate::plonk::generator::*;
use crate::plonk::prover::*;
fn evaluate_inverse_vanishing_poly<E: Engine>(vahisning_size: usize, point: E::Fr) -> E::Fr {
assert!(vahisning_size.is_power_of_two());
// update from the paper - it should not hold for the last generator, omega^(n) in original notations
// Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64).expect("should fit");
let n_domain_omega = domain.generator;
let root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
let mut numerator = point;
numerator.sub_assign(&root);
let mut denominator = point.pow([vahisning_size as u64]);
denominator.sub_assign(&E::Fr::one());
let denominator = denominator.inverse().expect("must exist");
numerator.mul_assign(&denominator);
numerator
}
fn evaluate_lagrange_poly<E: Engine>(vahisning_size:usize, poly_number: usize, at: E::Fr) -> E::Fr {
assert!(vahisning_size.is_power_of_two());
let mut repr = E::Fr::zero().into_repr();
repr.as_mut()[0] = vahisning_size as u64;
let size_fe = E::Fr::from_repr(repr).expect("is a valid representation");
// let size_inv = n_fe.inverse().expect("must exist");
// L_0(X) = (Z_H(X) / (X - 1)).(1/n) and L_0(1) = 1
// L_1(omega) = 1 = L_0(omega * omega^-1)
let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64).expect("domain of this size should exist");
let omega = domain.generator;
let omega_inv = omega.inverse().expect("must exist");
let argument_multiplier = omega_inv.pow([poly_number as u64]);
let mut argument = at;
argument.mul_assign(&argument_multiplier);
let mut numerator = argument.pow([vahisning_size as u64]);
numerator.sub_assign(&E::Fr::one());
let mut denom = argument;
denom.sub_assign(&E::Fr::one());
denom.mul_assign(&size_fe);
let denom_inv = denom.inverse().expect("must exist");
numerator.mul_assign(&denom_inv);
numerator
}
pub fn verify_nonhomomorphic<E: Engine, S: CommitmentScheme<E::Fr, Prng = T>, T: Transcript<E::Fr, Input = S::Commitment>>(
setup: &PlonkSetup<E, S>,
proof: &PlonkNonhomomorphicProof<E, S>,
meta: S::Meta,
large_meta: S::Meta
) -> Result<bool, SynthesisError> {
assert!(S::IS_HOMOMORPHIC == false);
let num_gates = setup.n;
let committer = S::new_for_size(num_gates.next_power_of_two(), meta);
let large_committer = S::new_for_size(4 * num_gates.next_power_of_two(), large_meta);
let mut transcript = T::new();
// we need n+1 to be a power of two and can not have n to be power of two
let required_domain_size = setup.n + 1;
assert!(required_domain_size.is_power_of_two());
transcript.commit_input(&proof.a_commitment);
transcript.commit_input(&proof.b_commitment);
transcript.commit_input(&proof.c_commitment);
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
transcript.commit_input(&proof.z_1_commitment);
transcript.commit_input(&proof.z_2_commitment);
// we do not commit those cause those are known already
let n_fe = E::Fr::from_str(&setup.n.to_string()).expect("must be valid field element");
let mut two_n_fe = n_fe;
two_n_fe.double();
let alpha = transcript.get_challenge();
transcript.commit_input(&proof.t_commitment);
let z = transcript.get_challenge();
// this is a sanity check
let a_at_z = proof.a_opening_value;
let b_at_z = proof.b_opening_value;
let c_at_z = proof.c_opening_value;
let q_l_at_z = proof.q_l_opening_value;
let q_r_at_z = proof.q_r_opening_value;
let q_o_at_z = proof.q_o_opening_value;
let q_m_at_z = proof.q_m_opening_value;
let q_c_at_z = proof.q_c_opening_value;
let s_id_at_z = proof.s_id_opening_value;
let sigma_1_at_z = proof.sigma_1_opening_value;
let sigma_2_at_z = proof.sigma_2_opening_value;
let sigma_3_at_z = proof.sigma_3_opening_value;
let mut inverse_vanishing_at_z = evaluate_inverse_vanishing_poly::<E>(required_domain_size.next_power_of_two(), z);
let z_1_at_z = proof.z_1_unshifted_opening_value;
let z_2_at_z = proof.z_2_unshifted_opening_value;
let z_1_shifted_at_z = proof.z_1_shifted_opening_value;
let z_2_shifted_at_z = proof.z_2_shifted_opening_value;
let l_0_at_z = evaluate_lagrange_poly::<E>(required_domain_size.next_power_of_two(), 0, z);
let l_n_minus_one_at_z = evaluate_lagrange_poly::<E>(required_domain_size.next_power_of_two(), setup.n - 1, z);
let t_at_z = proof.t_opening_value;
{
transcript.commit_field_element(&a_at_z);
transcript.commit_field_element(&b_at_z);
transcript.commit_field_element(&c_at_z);
transcript.commit_field_element(&q_l_at_z);
transcript.commit_field_element(&q_r_at_z);
transcript.commit_field_element(&q_o_at_z);
transcript.commit_field_element(&q_m_at_z);
transcript.commit_field_element(&q_c_at_z);
transcript.commit_field_element(&s_id_at_z);
transcript.commit_field_element(&sigma_1_at_z);
transcript.commit_field_element(&sigma_2_at_z);
transcript.commit_field_element(&sigma_3_at_z);
transcript.commit_field_element(&t_at_z);
transcript.commit_field_element(&z_1_at_z);
transcript.commit_field_element(&z_2_at_z);
transcript.commit_field_element(&z_1_shifted_at_z);
transcript.commit_field_element(&z_2_shifted_at_z);
}
let aggregation_challenge = transcript.get_challenge();
// let shifted_opening_aggregation_challenge = transcript.get_challenge();
// TODO: add public inputs
// verify by blindly assembling a t poly
let mut t_1 = {
let mut res = q_c_at_z;
let mut tmp = q_l_at_z;
tmp.mul_assign(&a_at_z);
res.add_assign(&tmp);
let mut tmp = q_r_at_z;
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
let mut tmp = q_o_at_z;
tmp.mul_assign(&c_at_z);
res.add_assign(&tmp);
let mut tmp = q_m_at_z;
tmp.mul_assign(&a_at_z);
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
res
};
{
let mut res = z_1_at_z;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_1_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_2_at_z;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_2_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_shifted_at_z;
res.sub_assign(&z_2_shifted_at_z);
res.mul_assign(&l_n_minus_one_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_at_z;
res.sub_assign(&z_2_at_z);
res.mul_assign(&l_0_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
let domain = Domain::<E::Fr>::new_for_size(required_domain_size as u64)?;
let mut z_by_omega = z;
z_by_omega.mul_assign(&domain.generator);
let commitments = vec![
&proof.a_commitment,
&proof.b_commitment,
&proof.c_commitment,
&setup.q_l,
&setup.q_r,
&setup.q_o,
&setup.q_m,
&setup.q_c,
&setup.s_id,
&setup.sigma_1,
&setup.sigma_2,
&setup.sigma_3,
&proof.z_1_commitment,
&proof.z_2_commitment,
&proof.z_1_commitment,
&proof.z_2_commitment,
];
let claimed_values = vec![
a_at_z,
b_at_z,
c_at_z,
q_l_at_z,
q_r_at_z,
q_o_at_z,
q_m_at_z,
q_c_at_z,
s_id_at_z,
sigma_1_at_z,
sigma_2_at_z,
sigma_3_at_z,
z_1_at_z,
z_2_at_z,
z_1_shifted_at_z,
z_2_shifted_at_z,
];
let opening_points = vec![
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z_by_omega,
z_by_omega
];
if t_1 != t_at_z {
println!("Recalculated t(z) is not equal to the provided value");
return Ok(false);
}
let valid = committer.verify_multiple_openings(commitments, opening_points, &claimed_values, aggregation_challenge, &proof.openings_proof, &mut transcript);
if !valid {
println!("Multiopening is invalid");
return Ok(false);
}
let valid = large_committer.verify_single(&proof.t_commitment, z, proof.t_opening_value, &proof.t_opening_proof, &mut transcript);
if !valid {
println!("T commitment opening is invalid");
return Ok(false);
}
// let mut opening_point = z;
// opening_point.mul_assign(&domain.generator);
// let commitments = vec![
// &proof.z_1_commitment,
// &proof.z_2_commitment,
// ];
// let claimed_values = vec![
// proof.z_1_shifted_opening_value,
// proof.z_2_shifted_opening_value
// ];
// let valid = committer.verify_multiple_openings(commitments, opening_point, &claimed_values, shifted_opening_aggregation_challenge, &proof.shifted_openings_proof, &mut transcript);
Ok(valid)
}
#[track_caller]
pub fn verify_nonhomomorphic_chunked<E: Engine, S: CommitmentScheme<E::Fr, Prng = T>, T: Transcript<E::Fr, Input = S::Commitment>>(
setup: &PlonkSetup<E, S>,
proof: &PlonkChunkedNonhomomorphicProof<E, S>,
meta: S::Meta
) -> Result<bool, SynthesisError> {
assert!(S::IS_HOMOMORPHIC == false);
let num_gates = setup.n;
let t = std::time::Instant::now();
let committer = S::new_for_size(num_gates.next_power_of_two(), meta);
println!("Committer creation taken {:?}", t.elapsed());
let t0 = std::time::Instant::now();
let mut transcript = T::new();
let t = std::time::Instant::now();
// we need n+1 to be a power of two and can not have n to be power of two
let required_domain_size = setup.n + 1;
assert!(required_domain_size.is_power_of_two());
transcript.commit_input(&proof.a_commitment);
transcript.commit_input(&proof.b_commitment);
transcript.commit_input(&proof.c_commitment);
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
transcript.commit_input(&proof.z_1_commitment);
transcript.commit_input(&proof.z_2_commitment);
// we do not commit those cause those are known already
let n_fe = E::Fr::from_str(&setup.n.to_string()).expect("must be valid field element");
let mut two_n_fe = n_fe;
two_n_fe.double();
let alpha = transcript.get_challenge();
transcript.commit_input(&proof.t_low_commitment);
transcript.commit_input(&proof.t_mid_commitment);
transcript.commit_input(&proof.t_high_commitment);
let z = transcript.get_challenge();
// this is a sanity check
let a_at_z = proof.a_opening_value;
let b_at_z = proof.b_opening_value;
let c_at_z = proof.c_opening_value;
let q_l_at_z = proof.q_l_opening_value;
let q_r_at_z = proof.q_r_opening_value;
let q_o_at_z = proof.q_o_opening_value;
let q_m_at_z = proof.q_m_opening_value;
let q_c_at_z = proof.q_c_opening_value;
let s_id_at_z = proof.s_id_opening_value;
let sigma_1_at_z = proof.sigma_1_opening_value;
let sigma_2_at_z = proof.sigma_2_opening_value;
let sigma_3_at_z = proof.sigma_3_opening_value;
let mut inverse_vanishing_at_z = evaluate_inverse_vanishing_poly::<E>(required_domain_size, z);
let z_1_at_z = proof.z_1_unshifted_opening_value;
let z_2_at_z = proof.z_2_unshifted_opening_value;
let z_1_shifted_at_z = proof.z_1_shifted_opening_value;
let z_2_shifted_at_z = proof.z_2_shifted_opening_value;
let l_0_at_z = evaluate_lagrange_poly::<E>(required_domain_size, 0, z);
let l_n_minus_one_at_z = evaluate_lagrange_poly::<E>(required_domain_size, setup.n - 1, z);
let t_low_at_z = proof.t_low_opening_value;
let t_mid_at_z = proof.t_mid_opening_value;
let t_high_at_z = proof.t_high_opening_value;
let z_in_pow_of_domain_size = z.pow([required_domain_size as u64]);
let mut t_at_z = E::Fr::zero();
t_at_z.add_assign(&t_low_at_z);
let mut tmp = z_in_pow_of_domain_size;
tmp.mul_assign(&t_mid_at_z);
t_at_z.add_assign(&tmp);
let mut tmp = z_in_pow_of_domain_size;
tmp.mul_assign(&z_in_pow_of_domain_size);
tmp.mul_assign(&t_high_at_z);
t_at_z.add_assign(&tmp);
{
transcript.commit_field_element(&a_at_z);
transcript.commit_field_element(&b_at_z);
transcript.commit_field_element(&c_at_z);
transcript.commit_field_element(&q_l_at_z);
transcript.commit_field_element(&q_r_at_z);
transcript.commit_field_element(&q_o_at_z);
transcript.commit_field_element(&q_m_at_z);
transcript.commit_field_element(&q_c_at_z);
transcript.commit_field_element(&s_id_at_z);
transcript.commit_field_element(&sigma_1_at_z);
transcript.commit_field_element(&sigma_2_at_z);
transcript.commit_field_element(&sigma_3_at_z);
transcript.commit_field_element(&t_low_at_z);
transcript.commit_field_element(&t_mid_at_z);
transcript.commit_field_element(&t_high_at_z);
transcript.commit_field_element(&z_1_at_z);
transcript.commit_field_element(&z_2_at_z);
transcript.commit_field_element(&z_1_shifted_at_z);
transcript.commit_field_element(&z_2_shifted_at_z);
}
let aggregation_challenge = transcript.get_challenge();
// TODO: add public inputs
// verify by blindly assembling a t poly
let mut t_1 = {
let mut res = q_c_at_z;
let mut tmp = q_l_at_z;
tmp.mul_assign(&a_at_z);
res.add_assign(&tmp);
let mut tmp = q_r_at_z;
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
let mut tmp = q_o_at_z;
tmp.mul_assign(&c_at_z);
res.add_assign(&tmp);
let mut tmp = q_m_at_z;
tmp.mul_assign(&a_at_z);
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
res
};
{
let mut res = z_1_at_z;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_1_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_2_at_z;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_2_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_shifted_at_z;
res.sub_assign(&z_2_shifted_at_z);
res.mul_assign(&l_n_minus_one_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_at_z;
res.sub_assign(&z_2_at_z);
res.mul_assign(&l_0_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
let domain = Domain::<E::Fr>::new_for_size(required_domain_size as u64)?;
let mut z_by_omega = z;
z_by_omega.mul_assign(&domain.generator);
let commitments = vec![
&proof.a_commitment,
&proof.b_commitment,
&proof.c_commitment,
&setup.q_l,
&setup.q_r,
&setup.q_o,
&setup.q_m,
&setup.q_c,
&setup.s_id,
&setup.sigma_1,
&setup.sigma_2,
&setup.sigma_3,
&proof.z_1_commitment,
&proof.z_2_commitment,
&proof.z_1_commitment,
&proof.z_2_commitment,
&proof.t_low_commitment,
&proof.t_mid_commitment,
&proof.t_high_commitment,
];
let claimed_values = vec![
a_at_z,
b_at_z,
c_at_z,
q_l_at_z,
q_r_at_z,
q_o_at_z,
q_m_at_z,
q_c_at_z,
s_id_at_z,
sigma_1_at_z,
sigma_2_at_z,
sigma_3_at_z,
z_1_at_z,
z_2_at_z,
z_1_shifted_at_z,
z_2_shifted_at_z,
t_low_at_z,
t_mid_at_z,
t_high_at_z,
];
let opening_points = vec![
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z_by_omega,
z_by_omega,
z,
z,
z,
];
if t_1 != t_at_z {
println!("Recalculated t(z) is not equal to the provided value");
return Ok(false);
}
println!("Initial verification taken {:?}", t.elapsed());
let t = std::time::Instant::now();
let valid = committer.verify_multiple_openings(
commitments,
opening_points,
&claimed_values,
aggregation_challenge,
&proof.openings_proof,
&mut transcript
);
println!("Verification of multiple openings taken {:?}", t.elapsed());
if !valid {
println!("Multiopening is invalid");
return Ok(false);
}
println!("Verification without overhead taken {:?}", t0.elapsed());
Ok(valid)
}
#[cfg(test)]
mod test {
use super::*;
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
struct TestCircuit<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for TestCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| {
Ok(E::Fr::from_str("10").unwrap())
})?;
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
let one = E::Fr::one();
let mut two = one;
two.double();
let mut negative_one = one;
negative_one.negate();
cs.enforce_zero_2((a, b), (two, negative_one))?;
let ten = E::Fr::from_str("10").unwrap();
cs.enforce_zero_2((b, c), (ten, negative_one))?;
cs.enforce_mul_3((a, b, c))?;
Ok(())
}
}
struct InvalidTestCircuit<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for InvalidTestCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| {
Ok(E::Fr::from_str("11").unwrap())
})?;
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
let one = E::Fr::one();
let mut two = one;
two.double();
let mut negative_one = one;
negative_one.negate();
cs.enforce_zero_2((a, b), (two, negative_one))?;
let ten = E::Fr::from_str("10").unwrap();
cs.enforce_zero_2((b, c), (ten, negative_one))?;
cs.enforce_mul_3((a, b, c))?;
Ok(())
}
}
#[test]
fn test_small_circuit_transparent_verification() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transparent::*;
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 2,
output_coeffs_at_degree_plus_one: 1,
fri_params: ()
};
let meta_large = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 2,
output_coeffs_at_degree_plus_one: 1,
fri_params: ()
};
let circuit = TestCircuit::<Bn256> {
_marker: PhantomData
};
let (setup, aux) = setup::<Bn256, Committer, _>(&circuit, meta).unwrap();
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 2,
output_coeffs_at_degree_plus_one: 1,
fri_params: ()
};
println!("Proving");
let proof = prove_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &setup, &aux, meta.clone(), meta_large.clone()).unwrap();
println!("Verifying");
let valid = verify_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta, meta_large).unwrap();
assert!(valid);
}
#[test]
fn test_small_circuit_invalid_witness_transparent_verification() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transparent::*;
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 2,
output_coeffs_at_degree_plus_one: 1,
fri_params: ()
};
let meta_large = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 2,
output_coeffs_at_degree_plus_one: 1,
fri_params: ()
};
let circuit = InvalidTestCircuit::<Bn256> {
_marker: PhantomData
};
let (setup, aux) = setup::<Bn256, Committer, _>(&circuit, meta.clone()).unwrap();
println!("Proving");
let proof = prove_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &setup, &aux, meta.clone(), meta_large.clone()).unwrap();
println!("Verifying");
let valid = verify_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta, meta_large).unwrap();
assert!(!valid);
}
#[derive(Clone)]
struct BenchmarkCircuit<E:Engine>{
num_steps: usize,
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for BenchmarkCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// yeah, fibonacci...
let one = E::Fr::one();
let mut negative_one = one;
negative_one.negate();
let mut two = one;
two.double();
let mut a = cs.alloc(|| {
Ok(E::Fr::one())
})?;
let mut b = cs.alloc(|| {
Ok(E::Fr::one())
})?;
cs.enforce_constant(a, E::Fr::one())?;
cs.enforce_constant(b, E::Fr::one())?;
let mut c = cs.alloc(|| {
Ok(two)
})?;
cs.enforce_zero_3((a, b, c), (one, one, negative_one))?;
let mut a_value = one;
let mut b_value = one;
let mut c_value = two;
for _ in 0..self.num_steps {
a = b;
b = c;
a_value = b_value;
b_value = c_value;
c_value.add_assign(&a_value);
c = cs.alloc(|| {
Ok(c_value)
})?;
cs.enforce_zero_3((a, b, c), (one, one, negative_one))?;
}
Ok(())
}
}
#[test]
fn test_bench_fibonacci_circuit() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transparent::*;
use crate::plonk::tester::*;
use std::time::Instant;
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
let meta_large = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
let circuit = BenchmarkCircuit::<Bn256> {
num_steps: 1_000_000,
_marker: PhantomData
};
{
let mut tester = TestingAssembly::<Bn256>::new();
circuit.synthesize(&mut tester).expect("must synthesize");
let satisfied = tester.is_satisfied();
assert!(satisfied);
println!("Circuit is satisfied");
}
println!("Start setup");
let start = Instant::now();
let (setup, aux) = setup::<Bn256, Committer, _>(&circuit, meta).unwrap();
println!("Setup taken {:?}", start.elapsed());
println!("Using circuit with N = {}", setup.n);
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
println!("Start proving");
let start = Instant::now();
let proof = prove_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &setup, &aux, meta.clone(), meta_large.clone()).unwrap();
println!("Proof taken {:?}", start.elapsed());
println!("Start verifying");
let start = Instant::now();
let valid = verify_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta, meta_large).unwrap();
println!("Verification with unnecessary precomputation taken {:?}", start.elapsed());
assert!(valid);
}
// #[test]
// fn test_bench_keccak_for_fibonacci_circuit() {
// use crate::pairing::bn256::{Bn256, Fr};
// use crate::plonk::utils::*;
// use crate::plonk::commitments::transparent::fri::*;
// use crate::plonk::commitments::transparent::iop::*;
// use crate::plonk::commitments::transcript::*;
// use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
// use crate::plonk::commitments::transparent::iop::keccak_trivial_iop::*;
// use crate::plonk::commitments::*;
// use crate::plonk::commitments::transparent::*;
// use std::time::Instant;
// type Iop = TrivialKeccakIOP<Fr>;
// type Fri = NaiveFriIop<Fr, Iop>;
// type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
// let meta = TransparentCommitterParameters {
// lde_factor: 16,
// num_queries: 10,
// output_coeffs_at_degree_plus_one: 16,
// };
// let meta_large = TransparentCommitterParameters {
// lde_factor: 16,
// num_queries: 10,
// output_coeffs_at_degree_plus_one: 16,
// };
// let circuit = BenchmarkCircuit::<Bn256> {
// num_steps: 1_000_000,
// _marker: PhantomData
// };
// println!("Start setup");
// let start = Instant::now();
// let setup = setup::<Bn256, Committer, _>(&circuit, meta).unwrap();
// println!("Setup taken {:?}", start.elapsed());
// println!("Using circuit with N = {}", setup.n);
// let meta = TransparentCommitterParameters {
// lde_factor: 16,
// num_queries: 10,
// output_coeffs_at_degree_plus_one: 16,
// };
// println!("Start proving");
// let start = Instant::now();
// let proof = prove_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>, _>(&circuit, meta, meta_large).unwrap();
// println!("Proof taken {:?}", start.elapsed());
// let meta = TransparentCommitterParameters {
// lde_factor: 16,
// num_queries: 10,
// output_coeffs_at_degree_plus_one: 16,
// };
// let meta_large = TransparentCommitterParameters {
// lde_factor: 16,
// num_queries: 10,
// output_coeffs_at_degree_plus_one: 16,
// };
// println!("Start verifying");
// let start = Instant::now();
// let valid = verify_nonhomomorphic::<Bn256, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta, meta_large).unwrap();
// println!("Verification with unnecessary precomputation taken {:?}", start.elapsed());
// assert!(!valid);
// }
#[test]
fn test_bench_homomorphic_plonk() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::pairing::bn256::Bn256;
use num_cpus;
use crate::pairing::ff::ScalarEngine;
use crate::pairing::CurveProjective;
use crate::multiexp::*;
use crate::worker::*;
use crate::source::*;
use std::sync::Arc;
use futures::{Future};
const SAMPLES: usize = 1 << 20;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v = (0..SAMPLES).map(|_| <Bn256 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>();
let g = (0..SAMPLES).map(|_| <Bn256 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>();
println!("Done generating test points and scalars");
let pool = Worker::new();
let start = std::time::Instant::now();
let _sparse = multiexp(
&pool,
(Arc::new(g), 0),
FullDensity,
Arc::new(v)
).wait().unwrap();
let per_one_poly = start.elapsed().as_micros();
// a, b, c, z_1, z_2, t, opening at z (of length t), opening at z*omega(of length a)
let total_expected_plonk = per_one_poly * (5 + 1 + 3 + 3 + 1);
println!("{} ms for expected plonk with ~ {} gates", total_expected_plonk/1000u128, SAMPLES);
}
#[test]
fn test_bench_transparent_engine() {
use crate::plonk::transparent_engine::proth_engine::*;
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transparent::*;
use crate::plonk::tester::*;
use std::time::Instant;
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let mut negative_one = Fr::one();
negative_one.negate();
println!("-1 = {}", negative_one);
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
let meta_large = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
let circuit = BenchmarkCircuit::<Transparent252> {
num_steps: 20,
_marker: PhantomData
};
{
let mut tester = TestingAssembly::<Transparent252>::new();
circuit.synthesize(&mut tester).expect("must synthesize");
let satisfied = tester.is_satisfied();
assert!(satisfied);
println!("Circuit is satisfied");
}
println!("Start setup");
let start = Instant::now();
let (setup, aux) = setup::<Transparent252, Committer, _>(&circuit, meta).unwrap();
println!("Setup taken {:?}", start.elapsed());
println!("Using circuit with N = {}", setup.n);
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
println!("Start proving");
let start = Instant::now();
let proof = prove_nonhomomorphic::<Transparent252, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &setup, &aux, meta.clone(), meta_large.clone()).unwrap();
println!("Proof taken {:?}", start.elapsed());
println!("Start verifying");
let start = Instant::now();
let valid = verify_nonhomomorphic::<Transparent252, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta, meta_large).unwrap();
println!("Verification with unnecessary precomputation taken {:?}", start.elapsed());
assert!(valid);
}
#[test]
fn test_bench_chunked_proof_on_transparent_engine() {
use crate::plonk::transparent_engine::proth_engine::*;
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transparent::*;
use crate::plonk::tester::*;
use std::time::Instant;
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let params = ();
let mut negative_one = Fr::one();
negative_one.negate();
println!("-1 = {}", negative_one);
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 10,
output_coeffs_at_degree_plus_one: 2,
fri_params: params
};
let circuit = BenchmarkCircuit::<Transparent252> {
num_steps: 20,
_marker: PhantomData
};
{
let mut tester = TestingAssembly::<Transparent252>::new();
circuit.synthesize(&mut tester).expect("must synthesize");
let satisfied = tester.is_satisfied();
assert!(satisfied);
println!("Circuit is satisfied");
}
println!("Start setup");
let start = Instant::now();
let (setup, aux) = setup::<Transparent252, Committer, _>(&circuit, meta.clone()).unwrap();
println!("Setup taken {:?}", start.elapsed());
println!("Using circuit with N = {}", setup.n);
println!("Start proving");
let start = Instant::now();
let proof = prove_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &aux, meta.clone()).unwrap();
println!("Proof taken {:?}", start.elapsed());
let proof_size = proof.estimate_proof_size();
dbg!(&proof_size);
println!("Start verifying");
let start = Instant::now();
let valid = verify_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta).unwrap();
println!("Verification with unnecessary precomputation taken {:?}", start.elapsed());
assert!(valid);
}
#[test]
fn test_bench_chunked_proof_on_transparent_engine_over_sizes() {
use crate::plonk::transparent_engine::proth_engine::*;
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transparent::*;
use crate::plonk::tester::*;
use std::time::Instant;
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let params = ();
let mut negative_one = Fr::one();
negative_one.negate();
println!("-1 = {}", negative_one);
let num_queries = 20;
for log2 in 10..=20 {
let size = (1<<log2) - 10;
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: num_queries,
output_coeffs_at_degree_plus_one: 16,
fri_params: params
};
let circuit = BenchmarkCircuit::<Transparent252> {
num_steps: size,
_marker: PhantomData
};
{
let mut tester = TestingAssembly::<Transparent252>::new();
circuit.synthesize(&mut tester).expect("must synthesize");
let satisfied = tester.is_satisfied();
assert!(satisfied);
println!("Circuit is satisfied");
}
println!("Start setup");
let start = Instant::now();
let (setup, aux) = setup::<Transparent252, Committer, _>(&circuit, meta.clone()).unwrap();
println!("Setup taken {:?}", start.elapsed());
let size_log_2 = setup.n.next_power_of_two().trailing_zeros();
println!("Using circuit with N = {}", setup.n);
println!("Start proving");
let start = Instant::now();
let proof = prove_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &aux, meta.clone()).unwrap();
println!("Proof taken {:?} for 2^{}", start.elapsed(), size_log_2);
let proof_size = proof.estimate_proof_size();
println!("Proof size is {} for 2^{} for {} queries", proof_size, size_log_2, num_queries);
println!("Start verifying");
let start = Instant::now();
let valid = verify_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta).unwrap();
println!("Verification with unnecessary precomputation taken {:?} for 2^{}", start.elapsed(), size_log_2);
assert!(valid);
}
}
#[test]
fn test_poly_eval_correctness() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::pairing::bn256::Fr;
use num_cpus;
use crate::pairing::ff::ScalarEngine;
use crate::pairing::CurveProjective;
use crate::multiexp::*;
use crate::worker::*;
use crate::source::*;
use std::sync::Arc;
use futures::{Future};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let poly_sizes = vec![1, 10, 100, 1000, 10_000, 1_000_000];
let x: Fr = rng.gen();
let worker = Worker::new();
for poly_size in poly_sizes.into_iter() {
let coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut point = Fr::one();
let mut result = Fr::zero();
for c in coeffs.iter() {
let mut tmp = point;
tmp.mul_assign(&c);
result.add_assign(&tmp);
point.mul_assign(&x);
}
let poly = Polynomial::<Fr, _>::from_coeffs(coeffs).unwrap();
let eval_result = poly.evaluate_at(&worker, x);
assert!(eval_result == result, "failed for size {}", poly_size);
}
}
#[test]
fn test_poly_grand_product_correctness() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::pairing::bn256::Fr;
use num_cpus;
use crate::pairing::ff::ScalarEngine;
use crate::pairing::CurveProjective;
use crate::multiexp::*;
use crate::worker::*;
use crate::source::*;
use std::sync::Arc;
use futures::{Future};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let poly_sizes = vec![1, 10, 100, 1000, 10_000, 1_000_000];
let worker = Worker::new();
for poly_size in poly_sizes.into_iter() {
let coeffs = (0..poly_size).map(|_| Fr::rand(rng)).filter(|el| !el.is_zero()).collect::<Vec<_>>();
let poly = Polynomial::<Fr, _>::from_values_unpadded(coeffs).unwrap();
let palallel_result = poly.calculate_grand_product(&worker).unwrap();
let serial_result = poly.calculate_grand_product_serial().unwrap();
if palallel_result != serial_result {
for (i, (c0, c1)) in palallel_result.as_ref().iter()
.zip(serial_result.as_ref().iter())
.enumerate()
{
assert!(c0 == c1, "failed at value number {} for size {}", i, poly_size);
}
}
}
}
#[test]
fn test_bench_lde() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::pairing::bn256::Fr;
use crate::pairing::ff::ScalarEngine;
use crate::pairing::CurveProjective;
use std::time::Instant;
use crate::worker::*;
use crate::plonk::commitments::transparent::utils::*;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let poly_sizes = vec![1, 10, 100, 1000, 10_000, 1_000_000, 2_000_000];
let worker = Worker::new();
for poly_size in poly_sizes.into_iter() {
let coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let poly = Polynomial::<Fr, _>::from_coeffs(coeffs).unwrap();
let start = Instant::now();
let _eval_result = poly.lde(&worker, 16);
println!("LDE with factor 16 for size {} taken {:?}", poly_size, start.elapsed());
let coeffs = (0..(16*poly_size)).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let poly = Polynomial::<Fr, _>::from_coeffs(coeffs).unwrap();
let start = Instant::now();
let eval_result = poly.clone().fft(&worker);
println!("FFT of the same size taken {:?}", start.elapsed());
if log2_floor(poly.size()) % 2 == 0 {
let log_n = log2_floor(poly.size());
let omega = poly.omega;
let mut coeffs = poly.into_coeffs();
let start = Instant::now();
crate::plonk::fft::radix_4::best_fft(&mut coeffs, &worker, &omega, log_n as u32);
println!("Radix-4 FFT of the same size taken {:?}", start.elapsed());
let to_compare = eval_result.into_coeffs();
assert!(to_compare == coeffs);
}
}
}
}<file_sep>/src/plonk/commitments/transparent/fri/naive_fri/mod.rs
pub mod naive_fri;
pub mod query_producer;
pub mod verifier;<file_sep>/src/log.rs
#[allow(unused_macros)]
cfg_if! {
if #[cfg(feature = "nolog")] {
macro_rules! log {
($($t:tt)*) => ()
}
macro_rules! elog {
($($t:tt)*) => ()
}
macro_rules! log_verbose {
($($t:tt)*) => ()
}
macro_rules! elog_verbose {
($($t:tt)*) => ()
}
pub struct Stopwatch {}
impl Stopwatch {
pub fn new() -> Stopwatch {
Stopwatch {}
}
}
} else if #[cfg(feature = "wasm")] {
use web_sys;
use web_sys::Performance;
macro_rules! log {
($($t:tt)*) => (web_sys::console::log_1(&format_args!($($t)*).to_string().into()))
}
macro_rules! elog {
($($t:tt)*) => (web_sys::console::log_1(&format_args!($($t)*).to_string().into()))
}
macro_rules! log_verbose {
($($t:tt)*) => (if $crate::verbose_flag() { web_sys::console::log_1(&format_args!($($t)*).to_string().into()) })
}
macro_rules! elog_verbose {
($($t:tt)*) => (if $crate::verbose_flag() { web_sys::console::log_1(&format_args!($($t)*).to_string().into()) })
}
pub struct Stopwatch {
start: f64,
perf: Performance
}
impl Stopwatch {
pub fn new() -> Stopwatch {
let perf = web_sys::window().unwrap().performance().unwrap();
Stopwatch { start: perf.now(), perf }
}
pub fn elapsed(&self) -> f64 {
(self.perf.now() - self.start) / 1000.0
}
}
} else {
macro_rules! log {
($($t:tt)*) => (println!($($t)*))
}
macro_rules! elog {
($($t:tt)*) => (eprintln!($($t)*))
}
macro_rules! log_verbose {
($($t:tt)*) => (if $crate::verbose_flag() { println!($($t)*) })
}
macro_rules! elog_verbose {
($($t:tt)*) => (if $crate::verbose_flag() { eprintln!($($t)*) })
}
pub struct Stopwatch {
start: std::time::Instant
}
impl Stopwatch {
pub fn new() -> Stopwatch {
Stopwatch { start: std::time::Instant::now() }
}
pub fn elapsed(&self) -> f64 {
self.start.elapsed().as_millis() as f64 / 1000.0
}
}
}
}<file_sep>/src/sonic/unhelped/grand_product_argument.rs
/// One must prove that for commitments to two polynomials of degree n products of the coefficients
/// in those two polynomials are equal (part of the permutation argument) with additional assumption that
/// those coefficients are never equal to zero
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use super::wellformed_argument::{WellformednessSignature, WellformednessArgument};
#[derive(Clone)]
pub struct GrandProductArgument<E: Engine> {
a_polynomials: Vec<Vec<E::Fr>>,
c_polynomials: Vec<Vec<E::Fr>>,
v_elements: Vec<E::Fr>,
t_polynomial: Option<Vec<E::Fr>>,
n: usize
}
#[derive(Clone)]
pub struct GrandProductProof<E: Engine> {
pub t_opening: E::G1Affine,
pub e_zinv: E::Fr,
pub e_opening: E::G1Affine,
pub f_y: E::Fr,
pub f_opening: E::G1Affine,
}
#[derive(Clone)]
pub struct GrandProductSignature<E: Engine> {
pub c_commitments: Vec<(E::G1Affine, E::Fr)>,
pub t_commitment: E::G1Affine,
pub grand_product_openings: Vec<(E::Fr, E::G1Affine)>,
pub proof: GrandProductProof<E>,
pub wellformedness_signature: WellformednessSignature<E>,
}
impl<E: Engine> GrandProductArgument<E> {
pub fn create_signature(
transcript: &mut Transcript,
grand_products: Vec<(Vec<E::Fr>, Vec<E::Fr>)>,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>,
) -> GrandProductSignature<E> {
let mut grand_product_challenges = vec![];
for _ in 0..grand_products.len() {
let c = transcript.get_challenge_scalar();
grand_product_challenges.push(c);
}
let mut all_polys = vec![];
let mut wellformed_challenges = vec![];
for _ in 0..(grand_products.len()*2) {
let c = transcript.get_challenge_scalar();
wellformed_challenges.push(c);
}
for p in grand_products.iter() {
let (a, b) = p;
all_polys.push(a.clone());
all_polys.push(b.clone());
}
let wellformedness_signature = WellformednessArgument::create_signature(
all_polys,
wellformed_challenges,
&srs
);
let mut grand_product_argument = GrandProductArgument::new(grand_products);
let c_commitments = grand_product_argument.commit_to_individual_c_polynomials(&srs);
let t_commitment = grand_product_argument.commit_to_t_polynomial(&grand_product_challenges, y, &srs);
let grand_product_openings = grand_product_argument.open_commitments_for_grand_product(y, z, &srs);
let a_zy: Vec<E::Fr> = grand_product_openings.iter().map(|el| el.0.clone()).collect();
let proof = grand_product_argument.make_argument(&a_zy, &grand_product_challenges, y, z, &srs);
GrandProductSignature {
c_commitments,
t_commitment,
grand_product_openings,
// a_zy,
proof,
wellformedness_signature
}
}
pub fn new(polynomials: Vec<(Vec<E::Fr>, Vec<E::Fr>)>) -> Self {
assert!(polynomials.len() > 0);
let n = polynomials[0].0.len();
let mut a_polynomials = vec![];
let mut c_polynomials = vec![];
let mut v_elements = vec![];
// a_{1..n} = first poly
// a_{n+1..2n+1} = b_{1..n} = second poly
// c_1 = a_1
// c_2 = a_2 * c_1 = a_2 * a_1
// c_3 = a_3 * c_2 = a_3 * a_2 * a_1
// ...
// c_n = a_n * c_{n-1} = \prod a_i
// a_{n+1} = c_{n}^-1
// c_{n+1} = 1
// c_{n+1} = a_{n+2} * c_{n+1} = a_{n+2}
// ...
// c_{2n+1} = \prod a_{n+1+i} = \prod b_{i}
// v = c_{n}^-1
// calculate c, serially for now
for p in polynomials.into_iter() {
let (p0, p1) = p;
assert!(p0.len() == p1.len());
assert!(p0.len() == n);
let mut c_poly: Vec<E::Fr> = Vec::with_capacity(2*n + 1);
let mut a_poly: Vec<E::Fr> = Vec::with_capacity(2*n + 1);
let mut c_coeff = E::Fr::one();
// add a
for a in p0.iter() {
c_coeff.mul_assign(a);
c_poly.push(c_coeff);
}
assert_eq!(c_poly.len(), n);
a_poly.extend(p0);
assert_eq!(a_poly.len(), n);
// v = a_{n+1} = c_{n}^-1
// let v = c_poly[n-1].inverse().unwrap();
let v = c_coeff.inverse().unwrap();
// ! IMPORTANT
// This line is indeed assigning a_{n+1} to zero instead of v
// for the practical purpose later we manually evaluate T polynomial
// and assign v to the term X^{n+1}
a_poly.push(E::Fr::zero());
// a_poly.push(v);
// add c_{n+1}
let mut c_coeff = E::Fr::one();
c_poly.push(c_coeff);
// add b
for b in p1.iter() {
c_coeff.mul_assign(b);
c_poly.push(c_coeff);
}
assert_eq!(c_poly.len(), 2*n + 1);
a_poly.extend(p1);
assert_eq!(c_poly[n-1], c_poly[2*n]);
assert_eq!(c_poly[n], E::Fr::one());
a_polynomials.push(a_poly);
c_polynomials.push(c_poly);
v_elements.push(v);
}
GrandProductArgument {
a_polynomials: a_polynomials,
c_polynomials: c_polynomials,
v_elements: v_elements,
t_polynomial: None,
n: n
}
}
// // Make a commitment to a polynomial in a form A*B^{x+1} = [a_1...a_{n}, 0, b_1...b_{n}]
// pub fn commit_for_grand_product(a: &[E::Fr], b: &[E::Fr], srs: &SRS<E>) -> E::G1Affine {
// assert_eq!(a.len(), b.len());
// let n = a.len();
// multiexp(
// srs.g_positive_x_alpha[0..(2*n+1)].iter(),
// a.iter()
// .chain_ext(Some(E::Fr::zero()).iter())
// .chain_ext(b.iter())
// ).into_affine()
// }
pub fn commit_for_individual_products(a: &[E::Fr], b: &[E::Fr], srs: &SRS<E>) -> (E::G1Affine, E::G1Affine) {
assert_eq!(a.len(), b.len());
let n = a.len();
let a = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
a.iter()).into_affine();
let b = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
b.iter()).into_affine();
(a, b)
}
pub fn open_commitments_for_grand_product(&self, y: E::Fr, z: E::Fr, srs: &SRS<E>) -> Vec<(E::Fr, E::G1Affine)> {
let n = self.n;
let mut yz = y;
yz.mul_assign(&z);
let mut results = vec![];
for a_poly in self.a_polynomials.iter() {
assert_eq!(a_poly[n], E::Fr::zero()); // there is no term for n+1 power
let val = evaluate_at_consequitive_powers(&a_poly[..], yz, yz);
// let a = & a_poly[0..n]; // powers [1, n]
// let b = & a_poly[(n+1)..]; // there is no n+1 term (numerated as `n`), skip it and start b
// assert_eq!(a.len(), n);
// assert_eq!(b.len(), n);
// let mut val = evaluate_at_consequitive_powers(a, yz, yz);
// {
// let tmp = yz.pow([(n+2) as u64]);
// let v = evaluate_at_consequitive_powers(b, tmp, yz);
// val.add_assign(&v);
// }
let mut constant_term = val;
constant_term.negate();
let opening = polynomial_commitment_opening(
0,
2*n + 1,
Some(constant_term).iter()
.chain_ext(a_poly.iter()),
yz,
&srs
);
// let opening = polynomial_commitment_opening(
// 0,
// 2*n + 1,
// Some(constant_term).iter()
// .chain_ext(a.iter())
// .chain_ext(Some(E::Fr::zero()).iter())
// .chain_ext(b.iter()),
// yz,
// &srs);
results.push((val, opening));
}
results
}
// Make a commitment for the begining of the protocol, returns commitment and `v` scalar
pub fn commit_to_individual_c_polynomials(&self, srs: &SRS<E>) -> Vec<(E::G1Affine, E::Fr)> {
let mut results = vec![];
let two_n_plus_1 = self.c_polynomials[0].len();
for (p, v) in self.c_polynomials.iter().zip(self.v_elements.iter()) {
let n = self.n;
assert_eq!(p[n], E::Fr::one(), "C_(n+1) must be one");
let c = multiexp(
srs.g_positive_x_alpha[0..two_n_plus_1].iter(),
p.iter()
).into_affine();
results.push((c, *v));
}
results
}
// Argument is based on an approach of main SONIC construction, but with a custom S(X,Y) polynomial of a simple form
pub fn commit_to_t_polynomial(&mut self, challenges: & Vec<E::Fr>, y: E::Fr, srs: &SRS<E>) -> E::G1Affine {
assert_eq!(challenges.len(), self.a_polynomials.len());
let n = self.n;
let mut t_polynomial: Option<Vec<E::Fr>> = None;
for (((a, c), v), challenge) in self.a_polynomials.iter()
.zip(self.c_polynomials.iter())
.zip(self.v_elements.iter())
.zip(challenges.iter())
{
let mut a_xy = a.clone();
let c_xy = c.clone();
let v = *v;
assert_eq!(a_xy.len(), 2*n + 1);
assert_eq!(c_xy.len(), 2*n + 1);
// make a T polynomial
let r: Vec<E::Fr> = {
// p_a(X,Y)*Y
let mut tmp = y;
tmp.square();
mut_distribute_consequitive_powers(&mut a_xy[..], tmp, y);
// add extra terms
//v*(XY)^{n+1}*Y + X^{n+2} + X^{n+1}Y − X^{2n+2}*Y
// n+1 term v*(XY)^{n+1}*Y + X^{n+1}Y
let tmp = y.pow(&[(n+2) as u64]);
let mut x_n_plus_one_term = v;
x_n_plus_one_term.mul_assign(&tmp);
x_n_plus_one_term.add_assign(&y);
a_xy[n].add_assign(&x_n_plus_one_term);
// n+2 term
a_xy[n+1].add_assign(&E::Fr::one());
// 2n+2 term
let mut tmp = y;
tmp.negate();
a_xy.push(tmp);
assert_eq!(a_xy.len(), 2*n + 2);
let mut r = vec![E::Fr::zero(); 2*n + 3];
r.extend(a_xy);
r
};
let r_prime: Vec<E::Fr> = {
let mut c_prime: Vec<E::Fr> = c_xy.iter().rev().map(|el| *el).collect();
c_prime.push(E::Fr::one());
c_prime.push(E::Fr::zero());
assert_eq!(c_prime.len(), 2*n + 3);
c_prime
};
// multiply polynomials with powers [-2n-2, -1] and [1, 2n+2],
// expect result to be [-2n+1, 2n+1]
let mut t: Vec<E::Fr> = multiply_polynomials::<E>(r, r_prime);
assert_eq!(t.len(), 6*n + 7);
// drain first powers due to the padding and last element due to requirement of being zero
for (i, el) in t[0..(2*n+3)].iter().enumerate() {
assert_eq!(*el, E::Fr::zero(), "{}", format!("Element {} is non-zero", i));
}
t.drain(0..(2*n+3));
let last = t.pop();
assert_eq!(last.unwrap(), E::Fr::zero(), "last element should be zero");
assert_eq!(t.len(), 4*n + 3);
let mut val = {
let mut tmp = y;
tmp.square();
evaluate_at_consequitive_powers(&c_xy, tmp, y)
};
val.add_assign(&E::Fr::one());
// subtract a constant term
assert_eq!(t[2*n+1], val);
t[2*n+1].sub_assign(&val);
if t_polynomial.is_some() {
if let Some(t_poly) = t_polynomial.as_mut() {
mul_add_polynomials(&mut t_poly[..], &t, *challenge);
}
} else {
mul_polynomial_by_scalar(&mut t, *challenge);
t_polynomial = Some(t);
}
}
let t_polynomial = t_polynomial.unwrap();
let c = multiexp(srs.g_negative_x_alpha[0..(2*n+1)].iter().rev()
.chain_ext(srs.g_positive_x_alpha[0..(2*n+1)].iter()),
t_polynomial[0..(2*n+1)].iter()
.chain_ext(t_polynomial[(2*n+2)..].iter())).into_affine();
self.t_polynomial = Some(t_polynomial);
c
}
// Argument is based on an approach of main SONIC construction, but with a custom S(X,Y) polynomial of a simple form
pub fn make_argument(self, a_zy: & Vec<E::Fr>, challenges: & Vec<E::Fr>, y: E::Fr, z: E::Fr, srs: &SRS<E>) -> GrandProductProof<E> {
assert_eq!(a_zy.len(), self.a_polynomials.len());
assert_eq!(challenges.len(), self.a_polynomials.len());
let n = self.n;
let c_polynomials = self.c_polynomials;
let mut e_polynomial: Option<Vec<E::Fr>> = None;
let mut f_polynomial: Option<Vec<E::Fr>> = None;
let mut yz = y;
yz.mul_assign(&z);
let z_inv = z.inverse().unwrap();
let mut t_subcomponent = E::Fr::zero();
for (((a, c), challenge), v) in a_zy.iter()
.zip(c_polynomials.into_iter())
.zip(challenges.iter())
.zip(self.v_elements.iter())
{
// cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y − z2n+2y)z−1
let mut c_zy = yz.pow([(n + 1) as u64]);
c_zy.mul_assign(v);
c_zy.add_assign(a);
c_zy.mul_assign(&y);
let mut z_n_plus_1 = z.pow([(n + 1) as u64]);
let mut z_n_plus_2 = z_n_plus_1;
z_n_plus_2.mul_assign(&z);
let mut z_2n_plus_2 = z_n_plus_1;
z_2n_plus_2.square();
z_2n_plus_2.mul_assign(&y);
z_n_plus_1.mul_assign(&y);
c_zy.add_assign(&z_n_plus_1);
c_zy.add_assign(&z_n_plus_2);
c_zy.sub_assign(&z_2n_plus_2);
c_zy.mul_assign(&z_inv);
let mut rc = c_zy;
rc.mul_assign(challenge);
let mut ry = y;
ry.mul_assign(challenge);
t_subcomponent.add_assign(&rc);
t_subcomponent.sub_assign(&challenge);
if e_polynomial.is_some() && f_polynomial.is_some() {
if let Some(e_poly) = e_polynomial.as_mut() {
if let Some(f_poly) = f_polynomial.as_mut() {
mul_add_polynomials(&mut e_poly[..], &c, rc);
mul_add_polynomials(&mut f_poly[..], &c, ry);
}
}
} else {
let mut e = c.clone();
let mut f = c;
mul_polynomial_by_scalar(&mut e, rc);
mul_polynomial_by_scalar(&mut f, ry);
e_polynomial = Some(e);
f_polynomial = Some(f);
}
}
let e_polynomial = e_polynomial.unwrap();
let f_polynomial = f_polynomial.unwrap();
// evaluate e at z^-1
let mut e_val = evaluate_at_consequitive_powers(&e_polynomial, z_inv, z_inv);
e_val.negate();
// evaluate f at y
let mut f_val = evaluate_at_consequitive_powers(&f_polynomial, y, y);
f_val.negate();
let e_opening = polynomial_commitment_opening(
0,
2*n + 1,
Some(e_val).iter().chain_ext(e_polynomial.iter()),
z_inv,
srs);
let f_opening = polynomial_commitment_opening(
0,
2*n + 1,
Some(f_val).iter().chain_ext(f_polynomial.iter()),
y,
srs);
e_val.negate();
f_val.negate();
t_subcomponent.add_assign(&e_val);
t_subcomponent.sub_assign(&f_val);
let mut t_poly = self.t_polynomial.unwrap();
assert_eq!(t_poly.len(), 4*n + 3);
assert!(t_poly[2*n + 1].is_zero());
// largest negative power of t is -2n-1
let t_zy = {
let tmp = z_inv.pow([(2*n+1) as u64]);
evaluate_at_consequitive_powers(&t_poly, tmp, z)
};
assert_eq!(t_zy, t_subcomponent);
assert!(t_poly[2*n + 1].is_zero());
t_poly[2*n + 1].sub_assign(&t_zy);
let t_opening = polynomial_commitment_opening(
2*n + 1,
2*n + 1,
t_poly.iter(),
z,
srs);
GrandProductProof {
t_opening: t_opening,
e_zinv: e_val,
e_opening: e_opening,
f_y: f_val,
f_opening: f_opening,
}
}
pub fn verify_ab_commitment(
n: usize,
randomness: & Vec<E::Fr>,
a_commitments: &Vec<E::G1Affine>,
b_commitments: &Vec<E::G1Affine>,
openings: &Vec<(E::Fr, E::G1Affine)>,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>
) -> bool {
assert_eq!(randomness.len(), a_commitments.len());
assert_eq!(openings.len(), a_commitments.len());
assert_eq!(b_commitments.len(), a_commitments.len());
// e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα)
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
// H^(x^(n+1)) is n+1 indexed
let mut h_x_n_plus_one_precomp = srs.h_positive_x[n+1];
h_x_n_plus_one_precomp.negate();
let h_x_n_plus_one_precomp = h_x_n_plus_one_precomp.prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let a = multiexp(
a_commitments.iter(),
randomness.iter(),
).into_affine();
let a = a.prepare();
let b = multiexp(
b_commitments.iter(),
randomness.iter(),
).into_affine();
let b = b.prepare();
let mut yz_neg = y;
yz_neg.mul_assign(&z);
yz_neg.negate();
let mut ops = vec![];
let mut value = E::Fr::zero();
for (el, r) in openings.iter().zip(randomness.iter()) {
let (v, o) = el;
ops.push(o.clone());
let mut val = *v;
val.mul_assign(&r);
value.add_assign(&val);
}
let value = g.mul(value.into_repr()).into_affine().prepare();
let openings = multiexp(
ops.iter(),
randomness.iter(),
).into_affine();
let openings_zy = openings.mul(yz_neg.into_repr()).into_affine().prepare();
let openings = openings.prepare();
// e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα)
E::final_exponentiation(&E::miller_loop(&[
(&openings, &h_alpha_x_precomp),
(&openings_zy, &h_alpha_precomp),
(&a, &h_prep),
(&b, &h_x_n_plus_one_precomp),
(&value, &h_alpha_precomp)
])).unwrap() == E::Fqk::one()
}
pub fn verify(
n: usize,
randomness: & Vec<E::Fr>,
a_zy: & Vec<E::Fr>,
challenges: &Vec<E::Fr>,
t_commitment: E::G1Affine,
commitments: &Vec<(E::G1Affine, E::Fr)>,
proof: &GrandProductProof<E>,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>
) -> bool {
assert_eq!(randomness.len(), 3);
assert_eq!(a_zy.len(), challenges.len());
assert_eq!(commitments.len(), challenges.len());
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
// first re-calculate cj and t(z,y)
let mut yz = y;
yz.mul_assign(&z);
let z_inv = z.inverse().unwrap();
let mut t_zy = E::Fr::zero();
t_zy.add_assign(&proof.e_zinv);
t_zy.sub_assign(&proof.f_y);
let mut commitments_points = vec![];
let mut rc_vec = vec![];
let mut ry_vec = vec![];
for ((r, commitment), a) in challenges.iter()
.zip(commitments.iter())
.zip(a_zy.iter()) {
let (c, v) = commitment;
commitments_points.push(c.clone());
// cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y − z2n+2y)z−1
let mut c_zy = yz.pow([(n + 1) as u64]);
c_zy.mul_assign(v);
c_zy.add_assign(a);
c_zy.mul_assign(&y);
let mut z_n_plus_1 = z.pow([(n + 1) as u64]);
let mut z_n_plus_2 = z_n_plus_1;
z_n_plus_2.mul_assign(&z);
let mut z_2n_plus_2 = z_n_plus_1;
z_2n_plus_2.square();
z_2n_plus_2.mul_assign(&y);
z_n_plus_1.mul_assign(&y);
c_zy.add_assign(&z_n_plus_1);
c_zy.add_assign(&z_n_plus_2);
c_zy.sub_assign(&z_2n_plus_2);
c_zy.mul_assign(&z_inv);
let mut rc = c_zy;
rc.mul_assign(&r);
rc_vec.push(rc);
let mut ry = y;
ry.mul_assign(&r);
ry_vec.push(ry);
let mut val = rc;
val.sub_assign(r);
t_zy.add_assign(&val);
}
let c_rc = multiexp(
commitments_points.iter(),
rc_vec.iter(),
).into_affine();
let c_ry = multiexp(
commitments_points.iter(),
ry_vec.iter(),
).into_affine();
let mut minus_y = y;
minus_y.negate();
let mut f_y = proof.f_opening.mul(minus_y.into_repr());
let g_f = g.mul(proof.f_y.into_repr());
f_y.add_assign(&g_f);
let mut minus_z = z;
minus_z.negate();
let mut t_z = proof.t_opening.mul(minus_z.into_repr());
let g_tzy = g.mul(t_zy.into_repr());
t_z.add_assign(&g_tzy);
let mut minus_z_inv = z_inv;
minus_z_inv.negate();
let mut e_z_inv = proof.e_opening.mul(minus_z_inv.into_repr());
let g_e = g.mul(proof.e_zinv.into_repr());
e_z_inv.add_assign(&g_e);
let h_alpha_term = multiexp(
vec![e_z_inv.into_affine(), f_y.into_affine(), t_z.into_affine()].iter(),
randomness.iter(),
).into_affine();
let h_alpha_x_term = multiexp(
Some(proof.e_opening).iter()
.chain_ext(Some(proof.f_opening).iter())
.chain_ext(Some(proof.t_opening).iter()),
randomness.iter(),
).into_affine();
let h_term = multiexp(
Some(c_rc).iter()
.chain_ext(Some(c_ry).iter())
.chain_ext(Some(t_commitment).iter()),
randomness.iter(),
).into_affine();
E::final_exponentiation(&E::miller_loop(&[
(&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
(&h_alpha_term.prepare(), &h_alpha_precomp),
(&h_term.prepare(), &h_prep),
])).unwrap() == E::Fqk::one()
}
}
#[test]
fn test_grand_product_argument() {
use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
// let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let srs = SRS::<Bls12>::new(128, srs_x, srs_alpha);
let n: usize = 1 << 5;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut permutation = coeffs.clone();
rng.shuffle(&mut permutation);
let coeffs_product = coeffs.iter().fold(Fr::one(), |mut sum, x| {
sum.mul_assign(&x);
sum
});
let permutation_product = permutation.iter().fold(Fr::one(), |mut sum, x| {
sum.mul_assign(&x);
sum
});
assert_eq!(coeffs_product, permutation_product);
assert!(!coeffs_product.is_zero());
let a_commitment = multiexp(srs.g_positive_x_alpha[0..n].iter(), coeffs.iter()).into_affine();
let b_commitment = multiexp(srs.g_positive_x_alpha[0..n].iter(), permutation.iter()).into_affine();
let (a, b) = GrandProductArgument::commit_for_individual_products(&coeffs[..], &permutation[..], &srs);
assert_eq!(a_commitment, a);
assert_eq!(b_commitment, b);
let mut argument = GrandProductArgument::new(vec![(coeffs, permutation)]);
let commitments_and_v_values = argument.commit_to_individual_c_polynomials(&srs);
assert_eq!(commitments_and_v_values.len(), 1);
let y : Fr = rng.gen();
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let t_commitment = argument.commit_to_t_polynomial(&challenges, y, &srs);
let z : Fr = rng.gen();
let grand_product_openings = argument.open_commitments_for_grand_product(y, z, &srs);
let randomness = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify_ab_commitment(
n,
&randomness,
&vec![a_commitment],
&vec![b_commitment],
&grand_product_openings,
y,
z,
&srs
);
assert!(valid, "grand product commitments should be valid");
let a_zy: Vec<Fr> = grand_product_openings.iter().map(|el| el.0.clone()).collect();
let proof = argument.make_argument(&a_zy, &challenges, y, z, &srs);
let randomness = (0..3).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify(
n,
&randomness,
&a_zy,
&challenges,
t_commitment,
&commitments_and_v_values,
&proof,
y,
z,
&srs);
assert!(valid, "t commitment should be valid");
}
<file_sep>/src/plonk/tester/mod.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
use crate::worker::*;
use super::polynomials::*;
use super::domains::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::utils::*;
use crate::plonk::generator::*;
#[derive(Debug)]
pub struct TestingAssembly<E: Engine> {
m: usize,
n: usize,
input_gates: Vec<Gate<E::Fr>>,
aux_gates: Vec<Gate<E::Fr>>,
num_inputs: usize,
num_aux: usize,
input_assingments: Vec<E::Fr>,
aux_assingments: Vec<E::Fr>,
inputs_map: Vec<usize>,
is_finalized: bool
}
impl<E: Engine> ConstraintSystem<E> for TestingAssembly<E> {
// allocate a variable
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_aux += 1;
let index = self.num_aux;
self.aux_assingments.push(value);
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_inputs += 1;
let index = self.num_inputs;
self.input_assingments.push(value);
let input_var = Variable(Index::Input(index));
let gate = Gate::<E::Fr>::new_enforce_constant_gate(input_var, Some(E::Fr::zero()), self.dummy_variable());
// let gate = Gate::<E>::new_enforce_constant_gate(input_var, Some(value), self.dummy_variable());
self.input_gates.push(gate);
Ok(input_var)
}
// enforce variable as boolean
fn enforce_boolean(&mut self, variable: Variable) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_enforce_boolean_gate(variable, self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate an abstract gate
fn new_gate(&mut self, variables: (Variable, Variable, Variable),
coeffs:(E::Fr,E::Fr,E::Fr,E::Fr,E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a constant
fn enforce_constant(&mut self, variable: Variable, constant: E::Fr) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_constant_gate(variable, Some(constant), self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_2(&mut self, variables: (Variable, Variable)) -> Result<(), SynthesisError> {
// q_l, q_r, q_o, q_c = 0, q_m = 1
let (v_0, v_1) = variables;
let zero = E::Fr::zero();
let one = E::Fr::one();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (zero, zero, zero, one, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_3(&mut self, variables: (Variable, Variable, Variable)) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_multiplication_gate(variables);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_2(&mut self, variables: (Variable, Variable), coeffs:(E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let (v_0, v_1) = variables;
let (c_0, c_1) = coeffs;
let zero = E::Fr::zero();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (c_0, c_1, zero, zero, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_3(&mut self, variables: (Variable, Variable, Variable), coeffs:(E::Fr, E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_zero_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
fn get_dummy_variable(&self) -> Variable {
self.dummy_variable()
}
}
impl<E: Engine> TestingAssembly<E> {
pub fn new() -> Self {
let mut tmp = Self {
n: 0,
m: 0,
input_gates: vec![],
aux_gates: vec![],
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: vec![],
inputs_map: vec![],
is_finalized: false,
};
let zero = tmp.alloc(|| Ok(E::Fr::zero())).expect("should have no issues");
tmp.enforce_constant(zero, E::Fr::zero()).expect("should have no issues");
match (tmp.dummy_variable(), zero) {
(Variable(Index::Aux(1)), Variable(Index::Aux(1))) => {},
_ => panic!("zero variable is incorrect")
}
tmp
}
// return variable that is not in a constraint formally, but has some value
fn dummy_variable(&self) -> Variable {
// <Self as ConstraintSystem<E>>::ZERO
Variable(Index::Aux(1))
}
pub fn num_gates(&self) -> usize {
assert!(self.is_finalized);
self.input_gates.len() + self.aux_gates.len()
}
fn finalize(&mut self) {
if self.is_finalized {
return;
}
let n = self.input_gates.len() + self.aux_gates.len();
if (n+1).is_power_of_two() {
return;
}
let empty_gate = Gate::<E::Fr>::new_empty_gate(self.dummy_variable());
let new_aux_len = (n+1).next_power_of_two() - 1 - self.input_gates.len();
self.aux_gates.resize(new_aux_len, empty_gate);
self.is_finalized = true;
}
fn get_value(&self, var: &Variable) -> E::Fr {
match var {
Variable(Index::Input(input)) => {
self.input_assingments[*input - 1]
},
Variable(Index::Aux(aux)) => {
self.aux_assingments[*aux - 1]
}
}
}
pub fn is_satisfied(mut self) -> bool {
self.finalize();
assert!(self.is_finalized);
fn coeff_into_field_element<F: PrimeField>(coeff: & Coeff<F>) -> F {
match coeff {
Coeff::Zero => {
F::zero()
},
Coeff::One => {
F::one()
},
Coeff::NegativeOne => {
let mut tmp = F::one();
tmp.negate();
tmp
},
Coeff::Full(c) => {
*c
},
}
}
// expect a small number of inputs
for (i, gate) in self.input_gates.iter().enumerate()
{
let q_l = coeff_into_field_element(&gate.q_l);
let q_r = coeff_into_field_element(&gate.q_r);
let q_o = coeff_into_field_element(&gate.q_o);
let q_m = coeff_into_field_element(&gate.q_m);
let q_c = coeff_into_field_element(&gate.q_c);
let a_value = self.get_value(gate.a_wire());
let b_value = self.get_value(gate.b_wire());
let c_value = self.get_value(gate.c_wire());
let mut res = q_c;
let mut tmp = q_l;
tmp.mul_assign(&a_value);
res.add_assign(&tmp);
let mut tmp = q_r;
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
let mut tmp = q_o;
tmp.mul_assign(&c_value);
res.add_assign(&tmp);
let mut tmp = q_m;
tmp.mul_assign(&a_value);
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
if !res.is_zero() {
println!("Unsatisfied at input gate {}", i+1);
return false;
}
}
for (i, gate) in self.aux_gates.iter().enumerate()
{
let q_l = coeff_into_field_element(&gate.q_l);
let q_r = coeff_into_field_element(&gate.q_r);
let q_o = coeff_into_field_element(&gate.q_o);
let q_m = coeff_into_field_element(&gate.q_m);
let q_c = coeff_into_field_element(&gate.q_c);
let a_value = self.get_value(gate.a_wire());
let b_value = self.get_value(gate.b_wire());
let c_value = self.get_value(gate.c_wire());
let mut res = q_c;
let mut tmp = q_l;
tmp.mul_assign(&a_value);
res.add_assign(&tmp);
let mut tmp = q_r;
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
let mut tmp = q_o;
tmp.mul_assign(&c_value);
res.add_assign(&tmp);
let mut tmp = q_m;
tmp.mul_assign(&a_value);
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
if !res.is_zero() {
println!("Unsatisfied at aux gate {}", i+1);
println!("Gate {:?}", *gate);
println!("A = {}, B = {}, C = {}", a_value, b_value, c_value);
return false;
}
}
true
}
}<file_sep>/src/plonk/commitments/transparent/iop_compiler/mod.rs
use crate::ff::PrimeField;
pub mod coset_combining_blake2s_tree;
pub trait Commitment: Clone + Eq + PartialEq + std::fmt::Debug {}
pub trait IopInstance<F: PrimeField>: PartialEq + Eq {
type Commitment: Clone + Eq + PartialEq + std::fmt::Debug;
type Query: IopQuery<F>;
type Params: Clone + Eq + PartialEq + std::fmt::Debug;
fn create(values: &[F], params: &Self::Params) -> Self;
fn size(&self) -> usize;
fn get_commitment(&self) -> Self::Commitment;
fn verify_query(commitment: &Self::Commitment, query: &Self::Query, params: &Self::Params) -> bool;
fn produce_query(&self, indexes: Vec<usize>, values: &[F]) -> Self::Query;
}
pub trait IopQuery<F: PrimeField>: 'static + PartialEq + Eq + Clone + std::fmt::Debug {
fn indexes(&self) -> Vec<usize>;
fn values(&self) -> &[F];
}
// const fn byte_size<F: PrimeField>() -> usize {
// (((F::NUM_BITS as usize) / 64) + 1) * 8
// }<file_sep>/src/plonk/better_better_cs/redshift/simple_fri/mod.rs
// pub mod fri;
// pub mod query_producer;
// pub mod verifier;
// pub mod precomputation;
use crate::SynthesisError;
use crate::worker::Worker;
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::Engine;
use crate::plonk::commitments::transcript::*;
use crate::plonk::better_better_cs::redshift::binary_tree::*;
use crate::plonk::better_better_cs::redshift::tree_hash::*;
use crate::plonk::polynomials::*;
use crate::plonk::commitments::transcript::Prng;
use crate::plonk::fft::cooley_tukey_ntt::*;
pub struct FriCombiner<E:Engine, H: BinaryTreeHasher<E::Fr>> {
precomputations: OmegasInvBitreversed<E::Fr>,
fri_domain_size: usize,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
folding_schedule: Vec<usize>,
tree_hasher: H,
optimal_values_per_leaf: usize,
coset_factor: E::Fr,
}
pub struct FriOraclesSet<E:Engine, H: BinaryTreeHasher<E::Fr>> {
pub intermediate_oracles: Vec<BinaryTree<E, H>>,
pub intermediate_roots: Vec<H::Output>,
pub intermediate_leaf_values: Vec<Vec<E::Fr>>,
pub intermediate_challenges: Vec<Vec<E::Fr>>,
pub final_coefficients: Vec<E::Fr>
}
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> FriCombiner<E, H> {
pub fn initialize_for_domain_size(
size: usize,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
coset_factor: E::Fr,
optimal_values_per_leaf: usize,
hasher: H
) -> Self {
assert!(output_coeffs_at_degree_plus_one.is_power_of_two());
assert!(lde_factor.is_power_of_two());
let precomputations = OmegasInvBitreversed::new_for_domain_size(size);
let mut schedule = vec![];
let folding_factor = size / lde_factor / output_coeffs_at_degree_plus_one;
assert!(folding_factor.is_power_of_two());
let mut size_left = log2_floor(folding_factor) as usize;
let base = log2_floor(optimal_values_per_leaf) as usize;
while size_left >= base {
size_left -= base;
schedule.push(base);
}
if size_left != 0 {
schedule.push(size_left)
}
Self {
precomputations,
fri_domain_size: size,
lde_factor,
output_coeffs_at_degree_plus_one,
folding_schedule: schedule,
tree_hasher: hasher,
optimal_values_per_leaf,
coset_factor
}
}
pub fn perform_fri_assuming_bitreversed<P: Prng<E::Fr, Input = H::Output>>(
&self,
lde_values: &[E::Fr],
prng: &mut P,
worker: &Worker,
) -> Result<FriOraclesSet<E, H>, SynthesisError> {
let mut coset_schedule_index = 0;
let coset_factor = self.folding_schedule[coset_schedule_index];
let mut total_wrap_factor = 1;
for s in self.folding_schedule.iter() {
let coeff = 1 << *s;
total_wrap_factor *= coeff;
}
let initial_domain_size = lde_values.len();
assert_eq!(self.precomputations.domain_size(), initial_domain_size);
let mut two = E::Fr::one();
two.double();
let two_inv = two.inverse().expect("should exist");
let initial_degree_plus_one = initial_domain_size / self.lde_factor;
assert_eq!(initial_degree_plus_one / total_wrap_factor, self.output_coeffs_at_degree_plus_one,
"number of FRI round does not match the ouput degree: initial degree+1 = {}, wrapping factor {}, output at degree+1 = {}",
initial_degree_plus_one, total_wrap_factor, self.output_coeffs_at_degree_plus_one);
let mut intermediate_oracles = vec![];
let mut intermediate_values = vec![];
let mut intermediate_roots = vec![];
let mut challenges = vec![];
let num_challenges = coset_factor;
let mut next_domain_challenges = {
let mut challenges = vec![];
for _ in 0..num_challenges {
challenges.push(prng.get_challenge());
}
challenges
};
challenges.push(next_domain_challenges.clone());
let mut values_slice = lde_values.as_ref();
let omegas_inv_bitreversed: &[E::Fr] = self.precomputations.bit_reversed_omegas();
// if we would precompute all N we would have
// [0, N/2, N/4, 3N/4, N/8, N/2 + N/8, N/8 + N/4, N/8 + N/4 + N/2, ...]
// but we only precompute half of them and have
// [0, N/4, N/8, N/8 + N/4, ...]
let mut this_domain_size = lde_values.len();
// step 0: fold totally by 2
// step 1: fold totally by 4
// etc...
let num_steps = self.folding_schedule.len();
// we do NOT need to make the first (largest) tree cause it's values are simulated
// so we will cover the first step later on separately
for (fri_step, coset_factor) in self.folding_schedule.iter().enumerate() {
let coset_factor = *coset_factor;
let wrapping_factor = 1 << coset_factor;
let next_domain_size = this_domain_size / wrapping_factor;
let mut next_values = vec![E::Fr::zero(); next_domain_size];
// we combine like this with FRI trees being aware of the FRI computations
// next_value(omega**)
// / \
// intermediate(omega*) intermediate(-omega*)
// / \ / \
// this(omega) this(-omega) this(omega') this(-omega')
//
// so omega* = omega^2i. omega' = sqrt(-omega^2i) = sqrt(omega^(N/2 + 2i)) = omega^N/4 + i
//
// we expect values to come bitreversed, so this(omega) and this(-omega) are always adjustent to each other
// because in normal emumeration it would be elements b0XYZ and b1XYZ, and now it's bZYX0 and bZYX1
//
// this(omega^(N/4 + i)) for b00YZ has a form b01YZ, so bitreversed it's bZY00 and bZY10
// this(-omega^(N/4 + i)) obviously has bZY11, so they are all near in initial values
worker.scope(next_values.len(), |scope, chunk| {
for (i, v) in next_values.chunks_mut(chunk).enumerate() {
let next_domain_challenges = next_domain_challenges.clone();
scope.spawn(move |_| {
let initial_k = i*chunk;
let mut this_level_values = Vec::with_capacity(wrapping_factor);
let mut next_level_values = vec![E::Fr::zero(); wrapping_factor];
for (j, v) in v.iter_mut().enumerate() {
let batch_id = initial_k + j;
let values_offset = batch_id*wrapping_factor;
for (wrapping_step, challenge) in next_domain_challenges.iter().enumerate() {
let base_omega_idx = (batch_id * wrapping_factor) >> (1 + wrapping_step);
let expected_this_level_values = wrapping_factor >> wrapping_step;
let expected_next_level_values = wrapping_factor >> (wrapping_step + 1);
let inputs = if wrapping_step == 0 {
&values_slice[values_offset..(values_offset + wrapping_factor)]
} else {
&this_level_values[..expected_this_level_values]
};
// imagine first FRI step, first wrapping step
// in values we have f(i), f(i + N/2), f(i + N/4), f(i + N/4 + N/2), f(i + N/8), ...
// so we need to use omega(i) for the first pair, omega(i + N/4) for the second, omega(i + N/8)
// on the next step we would have f'(2i), f'(2i + N/2), f'(2i + N/4), f'(2i + N/4 + N/2)
// so we would have to pick omega(2i) and omega(2i + N/4)
// this means LSB is always the same an only depend on the pair_idx below
// MSB is more tricky
// for a batch number 0 we have i = 0
// for a batch number 1 due to bitreverse we have index equal to b000001xxx where LSB are not important in the batch
// such value actually gives i = bxxx100000 that is a bitreverse of the batch number with proper number of bits
// due to precomputed omegas also being bitreversed we just need a memory location b000001xxx >> 1
debug_assert_eq!(inputs.len() / 2, expected_next_level_values);
for (pair_idx, (pair, o)) in inputs.chunks(2).zip(next_level_values[..expected_next_level_values].iter_mut()).enumerate() {
debug_assert!(base_omega_idx & pair_idx == 0);
let omega_idx = base_omega_idx + pair_idx;
let omega_inv = omegas_inv_bitreversed[omega_idx];
let f_at_omega = pair[0];
let f_at_minus_omega = pair[1];
let mut v_even_coeffs = f_at_omega;
v_even_coeffs.add_assign(&f_at_minus_omega);
let mut v_odd_coeffs = f_at_omega;
v_odd_coeffs.sub_assign(&f_at_minus_omega);
v_odd_coeffs.mul_assign(&omega_inv);
let mut tmp = v_odd_coeffs;
tmp.mul_assign(&challenge);
tmp.add_assign(&v_even_coeffs);
tmp.mul_assign(&two_inv);
*o = tmp;
}
this_level_values.clear();
this_level_values.clone_from(&next_level_values);
}
*v = next_level_values[0];
}
});
}
});
// until we hit the last step we take newly produced values
// and make an oracle from them
if fri_step < num_steps - 1 {
coset_schedule_index += 1;
this_domain_size = next_domain_size;
let coset_factor = self.folding_schedule[coset_schedule_index];
let tree_params = BinaryTreeParams {
values_per_leaf: (1 << coset_factor)
};
let intermediate_oracle = BinaryTree::create(
&next_values,
self.tree_hasher.clone(),
&tree_params
);
let root = intermediate_oracle.get_commitment();
let num_challenges = coset_factor;
next_domain_challenges = {
prng.commit_input(&root);
let mut challenges = vec![];
for _ in 0..num_challenges {
challenges.push(prng.get_challenge());
}
challenges
};
challenges.push(next_domain_challenges.clone());
intermediate_roots.push(root);
intermediate_oracles.push(intermediate_oracle);
}
intermediate_values.push(next_values);
values_slice = intermediate_values.last().expect("is something").as_ref();
}
assert_eq!(challenges.len(), num_steps);
assert_eq!(intermediate_roots.len(), num_steps-1);
assert_eq!(intermediate_oracles.len(), num_steps-1);
assert_eq!(intermediate_values.len(), num_steps);
let mut final_poly_values = Polynomial::from_values(values_slice.to_vec())?;
final_poly_values.bitreverse_enumeration(&worker);
let final_poly_coeffs = if self.coset_factor == E::Fr::one() {
final_poly_values.icoset_fft(&worker)
} else {
final_poly_values.icoset_fft_for_generator(&worker, &self.coset_factor)
};
let mut final_poly_coeffs = final_poly_coeffs.into_coeffs();
let mut degree = final_poly_coeffs.len() - 1;
for c in final_poly_coeffs.iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break
}
}
assert!(degree < self.output_coeffs_at_degree_plus_one, "polynomial degree is too large, coeffs = {:?}", final_poly_coeffs);
final_poly_coeffs.truncate(self.output_coeffs_at_degree_plus_one);
let set = FriOraclesSet {
intermediate_oracles,
intermediate_roots,
intermediate_leaf_values: intermediate_values,
intermediate_challenges: challenges,
final_coefficients: final_poly_coeffs
};
Ok(set)
}
}
<file_sep>/src/kate_commitment/mod.rs
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::ff::{Field, PrimeField};
use crate::worker::Worker;
use crate::plonk::polynomials::*;
use std::sync::Arc;
use crate::multiexp;
use crate::SynthesisError;
pub trait CrsType {}
pub struct CrsForMonomialForm;
pub struct CrsForLagrangeForm;
pub struct CrsForLagrangeFormOnCoset;
impl CrsType for CrsForMonomialForm {}
impl CrsType for CrsForLagrangeForm {}
impl CrsType for CrsForLagrangeFormOnCoset {}
pub struct Crs<E: Engine, T: CrsType> {
pub g1_bases: Arc<Vec<E::G1Affine>>,
pub g2_monomial_bases: Arc<Vec<E::G2Affine>>,
_marker: std::marker::PhantomData<T>
}
use std::io::{Read, Write};
use crate::byteorder::ReadBytesExt;
use crate::byteorder::WriteBytesExt;
use crate::byteorder::BigEndian;
impl<E: Engine, T: CrsType> PartialEq for Crs<E, T> {
fn eq(&self, other: &Self) -> bool {
self.g1_bases == other.g1_bases
&& self.g2_monomial_bases == other.g2_monomial_bases
}
}
impl<E: Engine, T: CrsType> Eq for Crs<E, T> { }
impl<E: Engine, T: CrsType> Crs<E, T> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> std::io::Result<()>
{
writer.write_u64::<BigEndian>(self.g1_bases.len() as u64)?;
for g in &self.g1_bases[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
writer.write_u64::<BigEndian>(self.g2_monomial_bases.len() as u64)?;
for g in &self.g2_monomial_bases[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> std::io::Result<Self>
{
use crate::pairing::EncodedPoint;
let mut g1_repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
let num_g1 = reader.read_u64::<BigEndian>()?;
let mut g1_bases = Vec::with_capacity(num_g1 as usize);
for _ in 0..num_g1 {
reader.read_exact(g1_repr.as_mut())?;
let p = g1_repr.into_affine().map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
g1_bases.push(p);
}
let num_g2 = reader.read_u64::<BigEndian>()?;
assert!(num_g2 == 2u64);
let mut g2_bases = Vec::with_capacity(num_g2 as usize);
for _ in 0..num_g2 {
reader.read_exact(g2_repr.as_mut())?;
let p = g2_repr.into_affine().map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
g2_bases.push(p);
}
let new = Self {
g1_bases: Arc::new(g1_bases),
g2_monomial_bases: Arc::new(g2_bases),
_marker: std::marker::PhantomData
};
Ok(new)
}
}
impl<E: Engine> Crs<E, CrsForMonomialForm> {
pub fn dummy_crs(size: usize) -> Self {
assert!(size.is_power_of_two());
let g1 = vec![E::G1Affine::one(); size];
let g2 = vec![E::G2Affine::one(); 2];
Self {
g1_bases: Arc::new(g1),
g2_monomial_bases: Arc::new(g2),
_marker: std::marker::PhantomData
}
}
pub fn crs_42(size: usize, worker: &Worker) -> Self {
// kind of how ceremony would work
assert!(size.is_power_of_two());
let mut g2 = vec![E::G2Affine::one(); 2];
use crate::group::Scalar;
use crate::domain::EvaluationDomain;
use crate::pairing::Wnaf;
let mut coeffs = vec![Scalar::<E>(E::Fr::one()); size];
{
let gen = E::Fr::from_str("42").unwrap();
g2[1] = g2[1].mul(gen.into_repr()).into_affine();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, p) in coeffs.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_p = gen.pow(&[(i*chunk) as u64]);
for p in p.iter_mut() {
p.0 = current_p;
current_p.mul_assign(&gen);
}
});
}
});
}
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(E::G1Affine::one().into_projective(), size);
let mut g1 = vec![E::G1Affine::zero().into_projective(); size];
worker.scope(g1.len(), |scope, chunk| {
for (g1, p) in g1.chunks_mut(chunk).zip(coeffs.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_| {
for (g1, p) in g1.iter_mut().zip(p.iter())
{
// Compute final exponent
let exp = p.0;
// Exponentiate
*g1 = g1_wnaf.scalar(exp.into_repr());
}
// Batch normalize
E::G1::batch_normalization(g1);
});
}
});
let g1: Vec<_> = g1.into_iter().map(|el| el.into_affine()).collect();
Self {
g1_bases: Arc::new(g1),
g2_monomial_bases: Arc::new(g2),
_marker: std::marker::PhantomData
}
}
}
impl<E: Engine> Crs<E, CrsForLagrangeForm> {
// Completely invalid, only for testing purposes
pub fn dummy_crs(size: usize) -> Self {
assert!(size.is_power_of_two());
let g1 = vec![E::G1Affine::one(); size];
let g2 = vec![E::G2Affine::one(); 2];
Self {
g1_bases: Arc::new(g1),
g2_monomial_bases: Arc::new(g2),
_marker: std::marker::PhantomData
}
}
pub fn crs_42(size: usize, worker: &Worker) -> Self {
let tmp = Crs::<E, CrsForMonomialForm>::crs_42(size, &worker);
Self::from_powers(&tmp, size, &worker)
}
pub fn from_powers(powers: &Crs::<E, CrsForMonomialForm>, size: usize, worker: &Worker) -> Self {
assert!(size.is_power_of_two());
assert!(size <= powers.g1_bases.len());
let g2 = powers.g2_monomial_bases.as_ref().to_vec();
let g1 = powers.g1_bases.as_ref()[..size].to_vec();
let g1 = g1.into_iter().map(|el| Point(el.into_projective())).collect();
use crate::group::Point;
use crate::domain::EvaluationDomain;
let mut g1 = EvaluationDomain::from_coeffs(g1).expect("must fit into the domain");
g1.transform_powers_of_tau_into_lagrange_basis(&worker);
let mut g1: Vec<_> = g1.into_coeffs().into_iter().map(|el| el.0).collect();
worker.scope(g1.len(), |scope, chunk| {
for g1 in g1.chunks_mut(chunk)
{
scope.spawn(move |_| {
// Batch normalize
E::G1::batch_normalization(g1);
});
}
});
let g1: Vec<_> = g1.into_iter().map(|el| el.into_affine()).collect();
Self {
g1_bases: Arc::new(g1),
g2_monomial_bases: Arc::new(g2),
_marker: std::marker::PhantomData
}
}
}
impl<E: Engine> Crs<E, CrsForLagrangeFormOnCoset> {
// Completely invalid, only for testing purposes
pub fn dummy_crs(size: usize) -> Self {
assert!(size.is_power_of_two());
let g1 = vec![E::G1Affine::one(); size];
let g2 = vec![E::G2Affine::one(); 2];
Self {
g1_bases: Arc::new(g1),
g2_monomial_bases: Arc::new(g2),
_marker: std::marker::PhantomData
}
}
pub fn crs_42(size: usize, worker: &Worker) -> Self {
let tmp = Crs::<E, CrsForMonomialForm>::crs_42(size, &worker);
Self::from_powers(&tmp, size, &worker)
}
pub fn from_powers(powers: &Crs::<E, CrsForMonomialForm>, size: usize, worker: &Worker) -> Self {
assert!(size.is_power_of_two());
assert!(size <= powers.g1_bases.len());
let g2 = powers.g2_monomial_bases.as_ref().to_vec();
let g1 = powers.g1_bases.as_ref()[..size].to_vec();
let g1: Vec<_> = g1.into_iter().map(|el| Point(el.into_projective())).collect();
use crate::group::Point;
use crate::domain::EvaluationDomain;
let mut g1 = EvaluationDomain::from_coeffs(g1).expect("must fit into the domain");
g1.transform_powers_of_tau_into_lagrange_basis_on_coset(&worker);
let mut g1: Vec<_> = g1.into_coeffs().into_iter().map(|el| el.0).collect();
worker.scope(g1.len(), |scope, chunk| {
for g1 in g1.chunks_mut(chunk)
{
scope.spawn(move |_| {
// Batch normalize
E::G1::batch_normalization(g1);
});
}
});
let g1: Vec<_> = g1.into_iter().map(|el| el.into_affine()).collect();
Self {
g1_bases: Arc::new(g1),
g2_monomial_bases: Arc::new(g2),
_marker: std::marker::PhantomData
}
}
}
pub(crate) fn elements_into_representations<E: Engine>(
worker: &Worker,
scalars: &[E::Fr]
) -> Result<Vec<<E::Fr as PrimeField>::Repr>, SynthesisError>
{
let mut representations = vec![<E::Fr as PrimeField>::Repr::default(); scalars.len()];
worker.scope(scalars.len(), |scope, chunk| {
for (scalar, repr) in scalars.chunks(chunk)
.zip(representations.chunks_mut(chunk)) {
scope.spawn(move |_| {
for (scalar, repr) in scalar.iter()
.zip(repr.iter_mut()) {
*repr = scalar.into_repr();
}
});
}
});
Ok(representations)
}
pub fn commit_using_monomials<E: Engine>(
poly: &Polynomial<E::Fr, Coefficients>,
crs: &Crs<E, CrsForMonomialForm>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
let scalars_repr = elements_into_representations::<E>(
&worker,
&poly.as_ref()
)?;
let res = multiexp::dense_multiexp::<E::G1Affine>(
&worker,
&crs.g1_bases[..scalars_repr.len()],
&scalars_repr
)?;
Ok(res.into_affine())
}
pub fn commit_using_values<E: Engine>(
poly: &Polynomial<E::Fr, Values>,
crs: &Crs<E, CrsForLagrangeForm>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
assert_eq!(poly.size(), crs.g1_bases.len());
let scalars_repr = elements_into_representations::<E>(
&worker,
&poly.as_ref()
)?;
let res = multiexp::dense_multiexp::<E::G1Affine>(
&worker,
&crs.g1_bases,
&scalars_repr
)?;
Ok(res.into_affine())
}
pub fn commit_using_raw_values<E: Engine>(
values: &[E::Fr],
crs: &Crs<E, CrsForLagrangeForm>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
assert_eq!(values.len().next_power_of_two(), crs.g1_bases.len());
let scalars_repr = elements_into_representations::<E>(
&worker,
&values
)?;
let res = multiexp::dense_multiexp::<E::G1Affine>(
&worker,
&crs.g1_bases[0..values.len()],
&scalars_repr
)?;
Ok(res.into_affine())
}
use crate::source::QueryDensity;
pub fn commit_using_values_with_density<E: Engine, D, Q> (
values: &[E::Fr],
density: D,
crs: &Crs<E, CrsForLagrangeForm>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>
{
use futures::Future;
// assert_eq!(values.len(), crs.g1_bases.len());
let scalars_repr = elements_into_representations::<E>(
&worker,
&values
)?;
// scalars_repr.resize(crs.g1_bases.len(), <E::Fr as PrimeField>::Repr::default());
let res = multiexp::multiexp(
&worker,
(crs.g1_bases.clone(), 0),
density,
Arc::new(scalars_repr)
).wait()?;
Ok(res.into_affine())
}
pub fn commit_using_values_on_coset<E: Engine>(
poly: &Polynomial<E::Fr, Values>,
crs: &Crs<E, CrsForLagrangeFormOnCoset>,
worker: &Worker
) -> Result<E::G1Affine , SynthesisError> {
assert_eq!(poly.size(), crs.g1_bases.len());
let scalars_repr = elements_into_representations::<E>(
&worker,
&poly.as_ref()
)?;
let res = multiexp::dense_multiexp::<E::G1Affine>(
&worker,
&crs.g1_bases,
&scalars_repr
)?;
Ok(res.into_affine())
}
pub fn calculate_batch_opening_quotient_from_monomials<E: Engine>(
polys: &[Polynomial<E::Fr, Coefficients>],
challenges: &[E::Fr],
at: E::Fr,
worker: &Worker,
) -> Result<Polynomial<E::Fr, Coefficients>, SynthesisError> {
assert_eq!(polys.len(), challenges.len());
assert!(polys.len() > 0);
let mut tmp = polys[0].clone();
tmp.scale(worker, challenges[0]);
for (p, c) in polys[1..].iter().zip(challenges[1..].iter()) {
tmp.add_assign_scaled(worker, p, c);
}
let quotient = divide_single::<E>(tmp.as_ref(), at);
Polynomial::from_coeffs(quotient)
}
pub fn open_from_monomials<E: Engine>(
poly: &Polynomial<E::Fr, Coefficients>,
at: E::Fr,
_expected_value: E::Fr,
crs: &Crs<E, CrsForMonomialForm>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
assert!(poly.size().is_power_of_two());
let division_result = divide_single::<E>(poly.as_ref(), at);
assert!(division_result.len().is_power_of_two());
let division_result = Polynomial::from_coeffs(division_result)?;
let opening_proof = commit_using_monomials(
&division_result,
&crs,
&worker
)?;
Ok(opening_proof)
}
pub fn open_from_values<E: Engine>(
poly: &Polynomial<E::Fr, Values>,
at: E::Fr,
expected_value: E::Fr,
crs: &Crs<E, CrsForLagrangeForm>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
assert!(poly.size().is_power_of_two());
let division_result = vec![E::Fr::one(); poly.size()];
let mut division_result = Polynomial::from_values(division_result)?;
division_result.distribute_powers(&worker, division_result.omega);
division_result.sub_constant(&worker, &at);
division_result.batch_inversion(&worker)?;
worker.scope(division_result.size(), |scope, chunk_size| {
for (result, values) in division_result.as_mut().chunks_mut(chunk_size)
.zip(poly.as_ref().chunks(chunk_size))
{
scope.spawn(move |_| {
for (r, &val) in result.iter_mut().zip(values.iter()) {
let mut tmp = val;
tmp.sub_assign(&expected_value);
r.mul_assign(&tmp);
}
});
}
});
let opening_proof = commit_using_values(&division_result, &crs, &worker)?;
Ok(opening_proof)
}
pub fn open_from_values_on_coset<E: Engine>(
poly: &Polynomial<E::Fr, Values>,
coset_factor: E::Fr,
at: E::Fr,
expected_value: E::Fr,
crs: &Crs<E, CrsForLagrangeFormOnCoset>,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
assert!(poly.size().is_power_of_two());
let division_result = vec![coset_factor; poly.size()];
let mut division_result = Polynomial::from_values(division_result)?; // [g, g, g, g, ...]
division_result.distribute_powers(&worker, division_result.omega); // [g, g*omega, g*omega^2, ...]
division_result.sub_constant(&worker, &at); // g - z, g*omega - z, g*omega^2 - z, ...]
division_result.batch_inversion(&worker)?;
worker.scope(division_result.size(), |scope, chunk_size| {
for (result, values) in division_result.as_mut().chunks_mut(chunk_size)
.zip(poly.as_ref().chunks(chunk_size))
{
scope.spawn(move |_| {
for (r, &val) in result.iter_mut().zip(values.iter()) {
let mut tmp = val;
tmp.sub_assign(&expected_value);
r.mul_assign(&tmp);
}
});
}
});
let opening_proof = commit_using_values_on_coset(&division_result, &crs, &worker)?;
Ok(opening_proof)
}
pub fn perform_batched_divisor_for_opening<E: Engine>(
mut polynomials: Vec<Polynomial<E::Fr, Values>>,
open_at: E::Fr,
opening_values: &[E::Fr],
challenge: E::Fr,
challenge_start: E::Fr,
worker: &Worker
) -> Result<(Polynomial<E::Fr, Values>, E::Fr), SynthesisError> {
assert!(polynomials.len() == opening_values.len(), "different number of polynomials and opening values");
// assert!(polynomials.len() > 1, "should aggregate only two or more polynomials");
let size = polynomials[0].size();
assert!(size.is_power_of_two());
let common_divisor = vec![E::Fr::one(); size];
let mut common_divisor = Polynomial::from_values(common_divisor)?;
common_divisor.distribute_powers(&worker, common_divisor.omega);
common_divisor.sub_constant(&worker, &open_at);
common_divisor.batch_inversion(&worker)?;
for (p, v) in polynomials.iter_mut().zip(opening_values.iter()) {
assert!(p.size() == size);
p.sub_constant(&worker, v);
}
let rest: Vec<_> = polynomials.drain(1..).collect();
let mut aggregation = polynomials.pop().expect("one polynomial left");
if challenge_start != E::Fr::one() {
aggregation.scale(&worker, challenge);
}
let mut this_challenge = challenge_start;
this_challenge.mul_assign(&challenge);
for other in rest.into_iter() {
aggregation.add_assign_scaled(&worker, &other, &this_challenge);
this_challenge.mul_assign(&challenge);
}
aggregation.mul_assign(&worker, &common_divisor);
drop(common_divisor);
// return next challenge and aggregation
Ok((aggregation, this_challenge))
}
pub fn perform_batch_opening_from_values<E: Engine>(
polynomials: Vec<Polynomial<E::Fr, Values>>,
crs: &Crs::<E, CrsForLagrangeForm>,
open_at: E::Fr,
opening_values: &[E::Fr],
challenge: E::Fr,
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
let (aggregation, _) = perform_batched_divisor_for_opening::<E>(
polynomials,
open_at,
opening_values,
challenge,
E::Fr::one(),
&worker
)?;
let opening_proof = commit_using_values(&aggregation, &crs, &worker)?;
Ok(opening_proof)
}
pub fn is_valid_opening<E: Engine>(
commitment: E::G1Affine,
z: E::Fr,
opening_value: E::Fr,
opening_proof: E::G1Affine,
g2_by_x: E::G2Affine
) -> bool {
// (f(x) - f(z))/(x - z) = op(x)
// f(x) = f(z) + op(x) * (x - z)
// e(f(x) - f(z) + z*op(x), 1) = e(op(x), x)
// e(f(x) - f(z) + z*op(x), 1) * e(-op(x), x) == 1 // e(0, 0)
let mut pair_with_1_part = commitment.into_projective();
let gen_by_opening_value = E::G1Affine::one().mul(opening_value.into_repr());
let proof_by_z = opening_proof.mul(z.into_repr());
pair_with_1_part.sub_assign(&gen_by_opening_value);
pair_with_1_part.add_assign(&proof_by_z);
let mut pair_with_x_part = opening_proof;
pair_with_x_part.negate();
let result = E::final_exponentiation(
&E::miller_loop(
&[
(&pair_with_1_part.into_affine().prepare(), &E::G2Affine::one().prepare()),
(&pair_with_x_part.prepare(), &g2_by_x.prepare()),
]
));
if let Some(res) = result {
return res == E::Fqk::one();
}
false
}
pub fn is_valid_multiopening<E: Engine>(
commitments: &[E::G1Affine],
z: E::Fr,
opening_values: &[E::Fr],
opening_proof: E::G1Affine,
challenge: E::Fr,
g2_by_x: E::G2Affine
) -> bool {
assert!(commitments.len() == opening_values.len());
// \sum_{i} alpha^i (f(x) - f(z))/(x - z) = op(x)
// \sum_{i} alpha^i (f(x) - f(z)) - op(x) * (x - z) = 0
// e(\sum_{i} alpha^i (f(x) - f(z)) + z*op(x), 1) = e(op(x), x)
// e(\sum_{i} alpha^i (f(x) - f(z)) + z*op(x), 1) * e(-op(x), x) == 1 // e(0, 0)
let mut aggregation = E::G1::zero();
let mut this_challenge = E::Fr::one();
// later change for efficiency
for (c, v) in commitments.iter().zip(opening_values.iter()) {
let mut pair_with_1_part = c.into_projective();
let gen_by_opening_value = E::G1Affine::one().mul(v.into_repr());
pair_with_1_part.sub_assign(&gen_by_opening_value);
pair_with_1_part.mul_assign(this_challenge.into_repr());
aggregation.add_assign(&pair_with_1_part);
this_challenge.mul_assign(&challenge);
}
let proof_by_z = opening_proof.mul(z.into_repr());
aggregation.add_assign(&proof_by_z);
let mut pair_with_x_part = opening_proof;
pair_with_x_part.negate();
let result = E::final_exponentiation(
&E::miller_loop(
&[
(&aggregation.into_affine().prepare(), &E::G2Affine::one().prepare()),
(&pair_with_x_part.prepare(), &g2_by_x.prepare()),
]
));
if let Some(res) = result {
return res == E::Fqk::one();
}
false
}
pub(crate) fn divide_single<E: Engine>(
poly: &[E::Fr],
opening_point: E::Fr,
) -> Vec<E::Fr> {
// we are only interested in quotient without a reminder, so we actually don't need opening value
let mut b = opening_point;
b.negate();
let mut q = vec![E::Fr::zero(); poly.len()];
let mut tmp = E::Fr::zero();
let mut found_one = false;
for (q, r) in q.iter_mut().rev().skip(1).zip(poly.iter().rev()) {
if !found_one {
if r.is_zero() {
continue
} else {
found_one = true;
}
}
let mut lead_coeff = *r;
lead_coeff.sub_assign(&tmp);
*q = lead_coeff;
tmp = lead_coeff;
tmp.mul_assign(&b);
}
q
}
pub fn make_crs_from_ignition_transcript<S: AsRef<std::ffi::OsStr> + ?Sized>(
path: &S
) -> Result<Crs<crate::pairing::bn256::Bn256, CrsForMonomialForm>, SynthesisError> {
use crate::pairing::bn256::{Bn256, Fq, Fq2, Fq12};
use crate::pairing::EncodedPoint;
use crate::ff::{PrimeField, PrimeFieldRepr};
use std::io::BufRead;
const CHUNKS: usize = 20;
let base_path = std::path::Path::new(&path);
let mut g1_bases = Vec::with_capacity(100800000 + 1);
g1_bases.push(<Bn256 as Engine>::G1Affine::one());
let mut g2_bases = vec![<Bn256 as Engine>::G2Affine::one()];
for i in 0..CHUNKS {
let full_path = base_path.join(&format!("transcript{:02}.dat", i));
println!("Opening {}", full_path.to_string_lossy());
let file = std::fs::File::open(full_path).map_err(|e| SynthesisError::IoError(e))?;
let mut reader = std::io::BufReader::with_capacity(1 << 24, file);
// skip 28 bytes
let mut tmp = [0u8; 28];
reader.read_exact(&mut tmp).expect("must skip 28 bytes");
let mut fq_repr = <Fq as PrimeField>::Repr::default();
let b_coeff = Fq::from_str("3").unwrap();
fq_repr.as_mut()[0] = 0x3bf938e377b802a8;
fq_repr.as_mut()[1] = 0x020b1b273633535d;
fq_repr.as_mut()[2] = 0x26b7edf049755260;
fq_repr.as_mut()[3] = 0x2514c6324384a86d;
let c0 = Fq::from_raw_repr(fq_repr).expect("c0 for B coeff for G2");
fq_repr.as_mut()[0] = 0x38e7ecccd1dcff67;
fq_repr.as_mut()[1] = 0x65f0b37d93ce0d3e;
fq_repr.as_mut()[2] = 0xd749d0dd22ac00aa;
fq_repr.as_mut()[3] = 0x0141b9ce4a688d4d;
let c1 = Fq::from_raw_repr(fq_repr).expect("c0 for B coeff for G2");
let b_coeff_fq2 = Fq2 {
c0: c0,
c1: c1
};
for _ in 0..5_040_000{
// we have to manually read X and Y coordinates
for k in 0..4 {
fq_repr.as_mut()[k] = reader.read_u64::<BigEndian>().expect("must read u64");
}
let x = Fq::from_repr(fq_repr).expect("must be valid field element encoding");
for k in 0..4 {
fq_repr.as_mut()[k] = reader.read_u64::<BigEndian>().expect("must read u64");
}
let y = Fq::from_repr(fq_repr).expect("must be valid field element encoding");
// manual on-curve check
{
let mut lhs = y;
lhs.square();
let mut rhs = x;
rhs.square();
rhs.mul_assign(&x);
rhs.add_assign(&b_coeff);
assert!(lhs == rhs);
}
let p = <Bn256 as Engine>::G1Affine::from_xy_unchecked(x, y);
g1_bases.push(p);
}
if i == 0 {
// read G2
{
for k in 0..4 {
fq_repr.as_mut()[k] = reader.read_u64::<BigEndian>().expect("must read u64");
}
let x_c0 = Fq::from_repr(fq_repr).expect("must be valid field element encoding");
for k in 0..4 {
fq_repr.as_mut()[k] = reader.read_u64::<BigEndian>().expect("must read u64");
}
let x_c1 = Fq::from_repr(fq_repr).expect("must be valid field element encoding");
for k in 0..4 {
fq_repr.as_mut()[k] = reader.read_u64::<BigEndian>().expect("must read u64");
}
let y_c0 = Fq::from_repr(fq_repr).expect("must be valid field element encoding");
for k in 0..4 {
fq_repr.as_mut()[k] = reader.read_u64::<BigEndian>().expect("must read u64");
}
let y_c1 = Fq::from_repr(fq_repr).expect("must be valid field element encoding");
let x = Fq2 {
c0: x_c0,
c1: x_c1
};
let y = Fq2 {
c0: y_c0,
c1: y_c1
};
{
let mut lhs = y;
lhs.square();
let mut rhs = x;
rhs.square();
rhs.mul_assign(&x);
rhs.add_assign(&b_coeff_fq2);
assert!(lhs == rhs);
}
let g2 = <Bn256 as Engine>::G2Affine::from_xy_unchecked(x, y);
g2_bases.push(g2);
// sanity check by using pairing
{
// check e(g1, g2^x) == e(g1^{x}, g2)
let valid = Bn256::final_exponentiation(
&Bn256::miller_loop(
&[
(&g1_bases[0].prepare(), &g2.prepare())
]
)
).unwrap() == Bn256::final_exponentiation(
&Bn256::miller_loop(
&[
(&g1_bases[1].prepare(), &g2_bases[0].prepare())
]
)
).unwrap();
assert!(valid);
}
}
// read G2
let mut tmp = [0u8; 128];
reader.read_exact(&mut tmp).expect("must skip 128 bytes of irrelevant G2 point");
}
// read to end
reader.consume(64);
assert_eq!(reader.fill_buf().unwrap().len(), 0);
}
assert_eq!(g1_bases.len(), 100800000 + 1);
assert_eq!(g2_bases.len(), 2);
let new = Crs::<crate::pairing::bn256::Bn256, CrsForMonomialForm> {
g1_bases: Arc::new(g1_bases),
g2_monomial_bases: Arc::new(g2_bases),
_marker: std::marker::PhantomData
};
Ok(new)
}
#[cfg(test)]
pub(crate) mod test {
use super::*;
use crate::pairing::bn256::{Bn256, Fr};
use crate::worker::Worker;
use crate::ff::{PrimeField, Field};
use crate::plonk::polynomials::*;
#[test]
fn test_transformations_of_crs_1() {
let worker = Worker::new();
let monomial = Crs::<Bn256, CrsForMonomialForm>::crs_42(1, &worker);
let lagrange = Crs::<Bn256, CrsForLagrangeForm>::crs_42(1, &worker);
let lagrange_coset = Crs::<Bn256, CrsForLagrangeFormOnCoset>::crs_42(1, &worker);
println!("Monomial = {:?}", monomial.g1_bases);
println!("Lagrange = {:?}", lagrange.g1_bases);
println!("Lagrange coset = {:?}", lagrange_coset.g1_bases);
}
#[test]
fn test_transformations_of_crs_2() {
let worker = Worker::new();
let monomial = Crs::<Bn256, CrsForMonomialForm>::crs_42(2, &worker);
let lagrange = Crs::<Bn256, CrsForLagrangeForm>::crs_42(2, &worker);
let lagrange_coset = Crs::<Bn256, CrsForLagrangeFormOnCoset>::crs_42(2, &worker);
println!("Monomial = {:?}", monomial.g1_bases);
println!("Lagrange = {:?}", lagrange.g1_bases);
println!("Lagrange coset = {:?}", lagrange_coset.g1_bases);
// for a poly in a form a + bx
// commitment is a + b*tau
// values on domain are a+b, a-b
// commitment bases are (1+tau)/2, (1-tau)/2
// commitment is (a+b)(1+tau)/2 + (a-b)(1-tau)/2 = a/2 + a*tau/2 + b/2 + b*tau/2 + a/2 - a*tau/2 - b/2 + b*tau/2 = a + tau*b
// valus on coset are a + gen*b, a - gen*b
// commitment is a*(b_0 + b_1) + gen*b*(b_0 - b_1) = a * tau*b
// so bases must be b_0 + b_1 = 1 and b_0 - b_1 = tau / gen
// so b_0 = 1 + tau/gen/2, b_1 = 1 - tau/gen/2
let one = Fr::one();
let mut two = Fr::one();
two.double();
let poly = Polynomial::<Fr, Coefficients>::from_coeffs(vec![one, two]).unwrap();
let values = poly.clone().fft(&worker);
let values_on_coset = poly.clone().coset_fft(&worker);
let mut tmp = Fr::multiplicative_generator();
tmp.mul_assign(&two);
tmp.add_assign(&one);
assert!(tmp == values_on_coset.as_ref()[0]);
let commitment = commit_using_monomials(&poly, &monomial, &worker).unwrap();
let commitment_values = commit_using_values(&values, &lagrange, &worker).unwrap();
let commitment_values_on_coset = commit_using_values_on_coset(&values_on_coset, &lagrange_coset, &worker).unwrap();
assert!(commitment == commitment_values);
assert!(commitment == commitment_values_on_coset);
}
#[test]
fn test_transformations_of_crs_4() {
let worker = Worker::new();
let monomial = Crs::<Bn256, CrsForMonomialForm>::crs_42(4, &worker);
let lagrange = Crs::<Bn256, CrsForLagrangeForm>::crs_42(4, &worker);
let lagrange_coset = Crs::<Bn256, CrsForLagrangeFormOnCoset>::crs_42(4, &worker);
let one = Fr::one();
let mut two = Fr::one();
two.double();
let poly = Polynomial::<Fr, Coefficients>::from_coeffs(vec![one, two, one, two]).unwrap();
let values = poly.clone().fft(&worker);
let values_on_coset = poly.clone().coset_fft(&worker);
let commitment = commit_using_monomials(&poly, &monomial, &worker).unwrap();
let commitment_values = commit_using_values(&values, &lagrange, &worker).unwrap();
let commitment_values_on_coset = commit_using_values_on_coset(&values_on_coset, &lagrange_coset, &worker).unwrap();
assert!(commitment == commitment_values);
assert!(commitment == commitment_values_on_coset);
}
#[test]
fn test_transformations_of_crs_large() {
let worker = Worker::new();
let size = 1024;
let monomial = Crs::<Bn256, CrsForMonomialForm>::crs_42(size, &worker);
let lagrange = Crs::<Bn256, CrsForLagrangeForm>::crs_42(size, &worker);
let lagrange_coset = Crs::<Bn256, CrsForLagrangeFormOnCoset>::crs_42(size, &worker);
let mut two = Fr::one();
two.double();
let poly = Polynomial::<Fr, Coefficients>::from_coeffs(vec![two; size]).unwrap();
let values = poly.clone().fft(&worker);
let values_on_coset = poly.clone().coset_fft(&worker);
let commitment = commit_using_monomials(&poly, &monomial, &worker).unwrap();
let commitment_values = commit_using_values(&values, &lagrange, &worker).unwrap();
let commitment_values_on_coset = commit_using_values_on_coset(&values_on_coset, &lagrange_coset, &worker).unwrap();
assert!(commitment == commitment_values);
assert!(commitment == commitment_values_on_coset);
}
#[test]
fn test_opening_large() {
let worker = Worker::new();
let size = 1024;
let monomial = Crs::<Bn256, CrsForMonomialForm>::crs_42(size, &worker);
let lagrange = Crs::<Bn256, CrsForLagrangeForm>::crs_42(size, &worker);
let lagrange_coset = Crs::<Bn256, CrsForLagrangeFormOnCoset>::crs_42(size, &worker);
let mut two = Fr::one();
two.double();
let poly = Polynomial::<Fr, Coefficients>::from_coeffs(vec![two; size]).unwrap();
let values = poly.clone().fft(&worker);
let values_on_coset = poly.clone().coset_fft(&worker);
let z = Fr::from_str("1337").unwrap();
let poly_at_z = poly.evaluate_at(&worker, z);
let values_at_z = values.barycentric_evaluate_at(&worker, z).unwrap();
let valus_on_coset_at_z = values_on_coset.barycentric_over_coset_evaluate_at(&worker, z, &Fr::multiplicative_generator()).unwrap();
assert!(poly_at_z == values_at_z);
assert!(poly_at_z == valus_on_coset_at_z);
let commitment = commit_using_monomials(&poly, &monomial, &worker).unwrap();
let commitment_values = commit_using_values(&values, &lagrange, &worker).unwrap();
let commitment_values_on_coset = commit_using_values_on_coset(&values_on_coset, &lagrange_coset, &worker).unwrap();
assert!(commitment == commitment_values);
assert!(commitment == commitment_values_on_coset);
let opening_poly = open_from_monomials(&poly, z, poly_at_z, &monomial, &worker).unwrap();
let opening_values = open_from_values(&values, z, poly_at_z, &lagrange, &worker).unwrap();
let opening_values_on_coset = open_from_values_on_coset(&values_on_coset, Fr::multiplicative_generator(), z, poly_at_z, &lagrange_coset, &worker).unwrap();
assert!(opening_poly == opening_values);
assert!(opening_poly == opening_values_on_coset);
let valid = is_valid_opening::<Bn256>(commitment, z, poly_at_z, opening_poly, monomial.g2_monomial_bases[1]);
assert!(valid);
}
#[test]
fn test_open_ignition_setup() {
let large_setup = make_crs_from_ignition_transcript("/Users/alexvlasov/Downloads/setup").unwrap();
let base_path = std::path::Path::new("/Users/alexvlasov/Downloads/setup/processed");
for n in 20..=26 {
let full_path = base_path.join(&format!("setup_2^{}.key", n));
println!("Opening {}", full_path.to_string_lossy());
let file = std::fs::File::create(full_path).unwrap();
let size = 1 << n;
let truncated_key = Crs::<Bn256, CrsForMonomialForm> {
g1_bases: Arc::new(large_setup.g1_bases[..size].to_vec()),
g2_monomial_bases: large_setup.g2_monomial_bases.clone(),
_marker: std::marker::PhantomData
};
let mut writer = std::io::BufWriter::with_capacity(1 << 24, file);
truncated_key.write(&mut writer).unwrap();
}
}
#[test]
fn transform_ignition_setup() {
let base_path = std::path::Path::new("/Users/alexvlasov/Downloads/setup/processed");
let worker = crate::worker::Worker::new();
for n in 20..=26 {
let full_path = base_path.join(&format!("setup_2^{}.key", n));
println!("Opening {}", full_path.to_string_lossy());
let file = std::fs::File::open(full_path).unwrap();
let mut reader = std::io::BufReader::with_capacity(1 << 24, file);
let monomial_form = Crs::<Bn256, CrsForMonomialForm>::read(&mut reader).unwrap();
let size = 1 << n;
let lagrange = Crs::<Bn256, CrsForLagrangeForm>::from_powers(&monomial_form, size, &worker);
let full_path = base_path.join(&format!("setup_2^{}_lagrange.key", n));
println!("Opening {}", full_path.to_string_lossy());
let file = std::fs::File::create(full_path).unwrap();
let mut writer = std::io::BufWriter::with_capacity(1 << 24, file);
lagrange.write(&mut writer).unwrap();
}
}
#[test]
fn test_crs_serialization() {
let worker = Worker::new();
let mut buffer = Vec::with_capacity(1<<28);
let crs = Crs::<Bn256, CrsForMonomialForm>::crs_42(1024, &worker);
crs.write(&mut buffer).expect("must serialize CRS");
let new = Crs::<Bn256, CrsForMonomialForm>::read(&buffer[..]).expect("must deserialize CRS");
assert!(new == crs);
}
use rand::{Rng};
pub(crate) fn make_random_field_elements<F: PrimeField>(
worker: &Worker,
num_elements: usize,
) -> Vec<F> {
use rand::{XorShiftRng, SeedableRng, Rand, Rng, ChaChaRng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
make_random_field_elements_for_rng(worker, num_elements, rng)
}
pub(crate) fn make_random_field_elements_for_rng<F: PrimeField, R: Rng>(
worker: &Worker,
num_elements: usize,
mut rng: R
) -> Vec<F> {
let mut result = vec![F::zero(); num_elements];
use rand::{XorShiftRng, SeedableRng, Rand, Rng, ChaChaRng};
worker.scope(result.len(), |scope, chunk| {
for r in result.chunks_mut(chunk)
{
let seed: [u32; 4] = rng.gen();
let subrng = ChaChaRng::from_seed(&seed);
scope.spawn(move |_| {
let mut subrng = subrng;
for r in r.iter_mut() {
*r = subrng.gen();
}
});
}
});
result
}
fn make_random_g1_points<G: CurveAffine>(
worker: &Worker,
num_elements: usize,
) -> Vec<G> {
use rand::{XorShiftRng, SeedableRng, Rand, Rng, ChaChaRng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
make_random_g1_points_for_rng(worker, num_elements, rng)
}
fn make_random_g1_points_for_rng<G: CurveAffine, R: Rng>(
worker: &Worker,
num_elements: usize,
mut rng: R
) -> Vec<G> {
let mut result = vec![G::zero(); num_elements];
use rand::{XorShiftRng, SeedableRng, Rand, Rng, ChaChaRng};
worker.scope(result.len(), |scope, chunk| {
for r in result.chunks_mut(chunk)
{
let seed: [u32; 4] = rng.gen();
let subrng = ChaChaRng::from_seed(&seed);
scope.spawn(move |_| {
let mut subrng = subrng;
for r in r.iter_mut() {
let p: G::Projective = subrng.gen();
*r = p.into_affine();
}
});
}
});
result
}
#[test]
#[ignore]
fn test_multiexp_performance_on_large_data() {
use crate::pairing::bn256::{Bn256, Fr};
use std::time::Instant;
let max_size = 1 << 26;
let worker = Worker::new();
assert!(worker.cpus >= 16, "should be tested only on large machines");
println!("Generating scalars");
let scalars = make_random_field_elements::<Fr>(&worker, max_size);
println!("Generating points");
let points = make_random_g1_points::<<Bn256 as Engine>::G1Affine>(&worker, max_size);
println!("Done");
for size in vec![1 << 23, 1 << 24, 1 << 25, 1 << 26] {
for cpus in vec![16, 32, 48, 64] {
// for cpus in vec![16, 24, 32] {
let s = &scalars[..size];
let g = &points[..size];
let subworker = Worker::new_with_cpus(cpus);
let now = Instant::now();
// copy-paste, but ok
let subtime = Instant::now();
let scalars_repr = super::elements_into_representations::<Bn256>(
&subworker,
s
).unwrap();
println!("Scalars conversion taken {:?}", subtime.elapsed());
let subtime = Instant::now();
let _ = multiexp::dense_multiexp::<<Bn256 as Engine>::G1Affine>(
&subworker,
g,
&scalars_repr
).unwrap();
println!("Multiexp taken {:?}", subtime.elapsed());
println!("Total time taken for {} points on {} cpus = {:?}", size, cpus, now.elapsed());
}
}
}
#[test]
#[ignore]
fn test_future_based_multiexp_performance_on_large_data() {
use crate::pairing::bn256::{Bn256, Fr};
use std::time::Instant;
use std::sync::Arc;
let max_size = 1 << 26;
let worker = Worker::new();
assert!(worker.cpus >= 16, "should be tested only on large machines");
println!("Generating scalars");
let scalars = make_random_field_elements::<Fr>(&worker, max_size);
println!("Generating points");
let points = make_random_g1_points::<<Bn256 as Engine>::G1Affine>(&worker, max_size);
println!("Done");
for size in vec![1 << 23, 1 << 24, 1 << 25, 1 << 26] {
for cpus in vec![16, 32, 48, 64] {
// for cpus in vec![16, 24, 32] {
let s = &scalars[..size];
let g = points[..size].to_vec();
let g = Arc::from(g);
let subworker = Worker::new_with_cpus(cpus);
let now = Instant::now();
// copy-paste, but ok
let subtime = Instant::now();
let scalars_repr = super::elements_into_representations::<Bn256>(
&subworker,
s
).unwrap();
let scalars_repr = Arc::from(scalars_repr);
println!("Scalars conversion taken {:?}", subtime.elapsed());
let subtime = Instant::now();
let _ = multiexp::future_based_multiexp::<<Bn256 as Engine>::G1Affine>(
&subworker,
Arc::clone(&g),
Arc::clone(&scalars_repr)
).wait();
println!("Future based multiexp taken {:?}", subtime.elapsed());
println!("Total time taken for {} points on {} cpus = {:?}", size, cpus, now.elapsed());
}
}
}
#[test]
#[ignore]
fn test_long_naive_division() {
use crate::pairing::bn256::{Bn256, Fr};
use std::time::Instant;
let max_size = 1 << 26;
let worker = Worker::new();
assert!(worker.cpus >= 16, "should be tested only on large machines");
println!("Generating scalars");
let scalars = make_random_field_elements::<Fr>(&worker, max_size);
let divide_at = Fr::from_str("1234567890").unwrap();
println!("Done");
for size in vec![1 << 23, 1 << 24, 1 << 25, 1 << 26] {
let s = &scalars[..size];
let now = Instant::now();
let _ = divide_single::<Bn256>(s, divide_at);
println!("Total time taken for {} points division = {:?}", size, now.elapsed());
}
}
fn serialize_affine_points_for_fpga<E: Engine, W: std::io::Write>(
points: &[E::G1Affine],
mut dst: W
) -> Result<(), std::io::Error> {
use crate::pairing::ff::PrimeFieldRepr;
println!("First point = {}", points[0]);
for p in points.iter() {
let (x, y) = p.into_xy_unchecked();
let repr = x.into_raw_repr();
repr.write_le(&mut dst)?;
let repr = y.into_raw_repr();
repr.write_le(&mut dst)?;
}
Ok(())
}
fn serialize_scalars_for_fpga<E: Engine, W: std::io::Write>(
scalars: &[E::Fr],
mut dst: W
) -> Result<(), std::io::Error> {
use crate::pairing::ff::PrimeFieldRepr;
println!("First scalar = {}", scalars[0]);
for s in scalars.iter() {
let repr = s.into_repr();
repr.write_le(&mut dst)?;
}
Ok(())
}
fn serialize_projective_points_for_fpga<E: Engine, W: std::io::Write>(
points: &[E::G1],
mut dst: W
) -> Result<(), std::io::Error> {
use crate::pairing::ff::PrimeFieldRepr;
let (x, y, z) = points[1].into_xyz_unchecked();
println!("Second bucket (for scalar = 1): X = {}, Y = {}, Z = {}", x, y, z);
for p in points.iter() {
let (x, y, z) = p.into_xyz_unchecked();
let repr = x.into_raw_repr();
repr.write_le(&mut dst)?;
let repr = y.into_raw_repr();
repr.write_le(&mut dst)?;
let repr = z.into_raw_repr();
repr.write_le(&mut dst)?;
}
Ok(())
}
fn simulate_first_buckets<E: Engine>(points: &[E::G1Affine], scalars: &[E::Fr], c: usize, random_point: E::G1Affine) -> Vec<E::G1> {
use crate::pairing::ff::ScalarEngine;
use crate::pairing::ff::PrimeFieldRepr;
let skip = 0;
let mask = (1u64 << c) - 1u64;
let p = random_point.into_projective();
let mut buckets = vec![p; 1 << c];
for (exp, point) in scalars.iter().zip(points.iter()) {
let this_exp = exp.into_repr();
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
buckets[this_exp as usize].add_assign_mixed(point);
}
buckets
}
fn test_multiexp_bn254(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>) {
use crate::pairing::bn256::Bn256;
test_multiexps_inner::<Bn256>(max_size, sizes, num_cpus);
}
fn test_multiexp_bn254_compact(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>) {
use crate::pairing::compact_bn256::Bn256;
test_multiexps_inner::<Bn256>(max_size, sizes, num_cpus);
}
fn test_multiexps_inner<E: Engine>(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>) {
use std::time::Instant;
use std::sync::Arc;
let worker = Worker::new();
println!("Generating scalars");
let scalars = make_random_field_elements::<E::Fr>(&worker, max_size);
println!("Generating points");
let points = make_random_g1_points::<E::G1Affine>(&worker, max_size);
println!("Done");
for size in sizes {
for &cpus in &num_cpus {
let s = &scalars[..size];
let g = points[..size].to_vec();
let subworker = Worker::new_with_cpus(cpus);
let scalars_repr = super::elements_into_representations::<E>(
&subworker,
s
).unwrap();
let subtime = Instant::now();
let _ = multiexp::dense_multiexp::<E::G1Affine>(
&subworker,
&g,
&scalars_repr
).unwrap();
println!("Dense simple multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::dense_unrolled_multiexp_with_prefetch::<<Bn256 as Engine>::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("Dense unrolled multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::dense_multiexp_uniform::<E::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("Dense uniform multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::stack_allocated_dense_multiexp::<<Bn256 as Engine>::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("Dense stack multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::map_reduce_multiexp::<E::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("Map reduce multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::producer_consumer_dense_multiexp::<<Bn256 as Engine>::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("ProdCons stack multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::stack_allocated_uncompensated_dense_multiexp::<<Bn256 as Engine>::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("Uncompensated stack multiexp of size {} taken {:?} on {} cpus", size, subtime.elapsed(), cpus);
// let subtime = Instant::now();
// let _ = multiexp::dense_multiexp_with_manual_unrolling::<<Bn256 as Engine>::G1Affine>(
// &subworker,
// &g,
// &scalars_repr
// ).unwrap();
// println!("Dense manually unrolled multiexp taken {:?} on {} cpus", subtime.elapsed(), cpus);
// let g = Arc::from(g);
// let scalars_repr = Arc::from(scalars_repr);
// let subtime = Instant::now();
// let _ = multiexp::future_based_multiexp::<<Bn256 as Engine>::G1Affine>(
// &subworker,
// Arc::clone(&g),
// Arc::clone(&scalars_repr)
// ).wait();
// println!("Future based multiexp taken {:?} on {} cpus", subtime.elapsed(), cpus);
}
}
}
// fn test_multiexps_over_window_sizes_bn254(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>) {
// use crate::pairing::bn256::Bn256;
// test_multiexps_over_window_sizes::<Bn256>(max_size, sizes, num_cpus, windows);
// }
// fn test_multiexps_over_window_sizes_bn254_compact(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>) {
// use crate::pairing::compact_bn256::Bn256;
// test_multiexps_over_window_sizes::<Bn256>(max_size, sizes, num_cpus, windows);
// }
// fn test_multiexps_over_window_sizes<E: Engine>(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>) {
// use std::time::Instant;
// use std::sync::Arc;
// let worker = Worker::new();
// println!("Generating scalars");
// let scalars = make_random_field_elements::<E::Fr>(&worker, max_size);
// println!("Generating points");
// let points = make_random_g1_points::<E::G1Affine>(&worker, max_size);
// println!("Done");
// for size in sizes {
// for &cpus in &num_cpus {
// let mut subresults = vec![];
// for &window in &windows {
// let s = &scalars[..size];
// let g = points[..size].to_vec();
// let subworker = Worker::new_with_cpus(cpus);
// let scalars_repr = super::elements_into_representations::<E>(
// &subworker,
// s
// ).unwrap();
// let subtime = Instant::now();
// let window = window as u32;
// let _ = multiexp::map_reduce_multiexp_over_fixed_window::<E::G1Affine>(
// &subworker,
// &g,
// &scalars_repr,
// window
// ).unwrap();
// subresults.push((window, subtime.elapsed().as_millis()));
// // println!("Map reduce multiexp of size {} taken {:?} on {} cpus with window size = {}", size, subtime.elapsed(), cpus, window);
// }
// subresults.sort_by(|a, b| {
// a.1.cmp(&b.1)
// });
// println!("Map reduce multiexp of size {} on {} CPUs:", size, cpus);
// for (window, time_ms) in &subresults[0..3] {
// println!("Window = {}, time = {} ms", window, time_ms);
// }
// }
// }
// }
// fn test_buffered_multiexps_bn254_compact(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>, buffer_sizes: Vec<usize>) {
// use crate::pairing::compact_bn256::Bn256;
// test_buffered_multiexp::<Bn256>(max_size, sizes, num_cpus, windows, buffer_sizes);
// }
// fn test_buffered_multiexp<E: Engine>(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>, buffer_sizes: Vec<usize>) {
// use std::time::Instant;
// use std::sync::Arc;
// let worker = Worker::new();
// println!("Generating scalars");
// let scalars = make_random_field_elements::<E::Fr>(&worker, max_size);
// println!("Generating points");
// let points = make_random_g1_points::<E::G1Affine>(&worker, max_size);
// println!("Done");
// for size in sizes {
// for &cpus in &num_cpus {
// for &buffer_size in &buffer_sizes {
// let mut subresults = vec![];
// for &window in &windows {
// let s = &scalars[..size];
// let g = points[..size].to_vec();
// let subworker = Worker::new_with_cpus(cpus);
// let scalars_repr = super::elements_into_representations::<E>(
// &subworker,
// s
// ).unwrap();
// let subtime = Instant::now();
// let window = window as u32;
// let _ = multiexp::buffered_multiexp_over_fixed_window_and_buffer_size::<E::G1Affine>(
// &subworker,
// &g,
// &scalars_repr,
// window,
// buffer_size
// ).unwrap();
// subresults.push((window, subtime.elapsed().as_millis()));
// // println!("Map reduce multiexp of size {} taken {:?} on {} cpus with window size = {}", size, subtime.elapsed(), cpus, window);
// }
// subresults.sort_by(|a, b| {
// a.1.cmp(&b.1)
// });
// println!("Map reduce multiexp of size {} on {} CPUs with buffer size {}:", size, cpus, buffer_size);
// for (window, time_ms) in &subresults[0..3] {
// println!("Window = {}, time = {} ms", window, time_ms);
// }
// }
// }
// }
// }
// fn test_future_based_multiexps_over_window_sizes_bn254_compact(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>) {
// use crate::pairing::compact_bn256::Bn256;
// test_future_based_multiexps_over_window_sizes::<Bn256>(max_size, sizes, num_cpus, windows);
// }
// fn test_future_based_multiexps_over_window_sizes_bn254(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>) {
// use crate::pairing::bn256::Bn256;
// test_future_based_multiexps_over_window_sizes::<Bn256>(max_size, sizes, num_cpus, windows);
// }
// #[ignore]
// #[test]
// fn test_optimal_bn254_multiexp() {
// // use crate::pairing::bn256::Bn256;
// use crate::pairing::compact_bn256::Bn256;
// test_optimal_multiexp::<Bn256>(2, 1 << 24, 24, 12, true);
// test_optimal_multiexp::<Bn256>(2, 1 << 24, 24, 12, false);
// test_optimal_multiexp::<Bn256>(2, 1 << 25, 24, 11, true);
// test_optimal_multiexp::<Bn256>(2, 1 << 25, 24, 11, false);
// }
// fn test_optimal_multiexp<E: Engine>(max_parallel_jobs: usize, max_size: usize, cpus_per_job: usize, window: usize, same_base: bool) {
// use futures::executor::block_on;
// use futures::future::join_all;
// use std::time::Instant;
// use std::sync::Arc;
// use crate::source::FullDensity;
// let mut bases = vec![];
// let mut scalars = vec![];
// let worker = Worker::new();
// assert!(max_parallel_jobs >= 1);
// use rand::{XorShiftRng, SeedableRng, Rand, Rng, ChaChaRng};
// let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// for _ in 0..max_parallel_jobs {
// let seed: [u32; 4] = rng.gen();
// let mut subrng = ChaChaRng::from_seed(&seed);
// let sc = make_random_field_elements_for_rng::<E::Fr, _>(&worker, max_size, &mut subrng);
// let p = make_random_g1_points_for_rng::<E::G1Affine, _>(&worker, max_size, &mut subrng);
// let s = super::elements_into_representations::<E>(
// &worker,
// &sc
// ).unwrap();
// bases.push(Arc::from(p));
// scalars.push(Arc::from(s));
// }
// for num_jobs in 1..=max_parallel_jobs {
// let mut jobs = vec![];
// let subworker = Worker::new_with_cpus(cpus_per_job * num_jobs);
// let subtime = Instant::now();
// let window = window as u32;
// for idx in 0..num_jobs {
// let id = if same_base {
// 0
// } else {
// idx
// };
// let p = Arc::clone(&bases[id]);
// let s = Arc::clone(&scalars[idx]);
// let job = multiexp::future_based_dense_multiexp_over_fixed_width_windows(
// &subworker,
// p,
// s,
// window
// );
// // let job = multiexp::multiexp_with_fixed_width::<_, _, _, _>(
// // &subworker,
// // (p, 0),
// // FullDensity,
// // s,
// // window
// // );
// jobs.push(job);
// }
// let joiner = join_all(jobs);
// let _ = block_on(joiner);
// let elapsed = subtime.elapsed();
// if same_base {
// print!("For same bases: ");
// } else {
// print!("For different bases: ");
// }
// println!("{} jobs of size {} with {} CPUs per job and {} bits window taken {:?}", num_jobs, max_size, cpus_per_job, window, elapsed);
// }
// }
// #[ignore]
// #[test]
// fn test_l3_shared_multiexp_bn254() {
// // use crate::pairing::bn256::Bn256;
// use crate::pairing::compact_bn256::Bn256;
// test_l3_shared_multiexp::<Bn256>(4, 1 << 24, 24, 12);
// test_l3_shared_multiexp::<Bn256>(4, 1 << 25, 24, 12);
// test_optimal_multiexp::<Bn256>(1, 1 << 24, 24, 12, true);
// test_optimal_multiexp::<Bn256>(1, 1 << 25, 24, 12, true);
// test_optimal_multiexp::<Bn256>(1, 1 << 24, 16, 16, true);
// test_optimal_multiexp::<Bn256>(1, 1 << 25, 16, 16, true);
// test_optimal_multiexp::<Bn256>(2, 1 << 24, 24, 12, true);
// test_optimal_multiexp::<Bn256>(2, 1 << 25, 24, 12, true);
// test_optimal_multiexp::<Bn256>(3, 1 << 24, 16, 16, true);
// test_optimal_multiexp::<Bn256>(3, 1 << 25, 16, 16, true);
// test_optimal_multiexp::<Bn256>(4, 1 << 24, 16, 16, true);
// test_optimal_multiexp::<Bn256>(4, 1 << 25, 16, 16, true);
// }
// fn test_l3_shared_multiexp<E: Engine>(max_parallel_jobs: usize, max_size: usize, cpus_per_job: usize, window: usize) {
// use std::time::Instant;
// let mut bases = vec![];
// let mut scalars = vec![];
// let worker = Worker::new();
// use rand::{XorShiftRng, SeedableRng, Rand, Rng, ChaChaRng};
// let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// for _ in 0..max_parallel_jobs {
// let seed: [u32; 4] = rng.gen();
// let mut subrng = ChaChaRng::from_seed(&seed);
// let sc = make_random_field_elements_for_rng::<E::Fr, _>(&worker, max_size, &mut subrng);
// let p = make_random_g1_points_for_rng::<E::G1Affine, _>(&worker, max_size, &mut subrng);
// let s = super::elements_into_representations::<E>(
// &worker,
// &sc
// ).unwrap();
// bases.push(p);
// scalars.push(s);
// }
// for j in 1..=max_parallel_jobs {
// let subworker = Worker::new_with_cpus(cpus_per_job * j);
// let subtime = Instant::now();
// let mut exps = vec![];
// for i in 0..j {
// exps.push(&scalars[i][..]);
// }
// println!("Running for {} parallel job", j);
// let _ = multiexp::l3_shared_multexp(
// &subworker,
// &bases[0][..],
// &exps[..],
// ).unwrap();
// let elapsed = subtime.elapsed();
// println!("L3 shared multiexp for {} jobs of size {} with {} CPUs per job and {} bits window taken {:?}", j, max_size, cpus_per_job, window, elapsed);
// }
// }
fn test_future_based_multiexps_over_window_sizes<E: Engine>(max_size: usize, sizes: Vec<usize>, num_cpus: Vec<usize>, windows: Vec<usize>) {
use std::time::Instant;
use std::sync::Arc;
use crate::source::FullDensity;
let worker = Worker::new();
println!("Generating scalars");
let scalars = make_random_field_elements::<E::Fr>(&worker, max_size);
println!("Generating points");
let points = make_random_g1_points::<E::G1Affine>(&worker, max_size);
println!("Done");
for size in sizes {
for &cpus in &num_cpus {
let mut subresults = vec![];
let mut alt_subresults = vec![];
let s = &scalars[..size];
let g = points[..size].to_vec();
let scalars_repr = super::elements_into_representations::<E>(
&worker,
s
).unwrap();
let g = Arc::from(g);
let s = Arc::from(scalars_repr);
for &window in &windows {
let subworker = Worker::new_with_cpus(cpus);
let subtime = Instant::now();
let window = window as u32;
let _ = multiexp::future_based_dense_multiexp_over_fixed_width_windows(
&subworker,
Arc::clone(&g),
Arc::clone(&s),
window
).wait();
alt_subresults.push((window, subtime.elapsed().as_millis()));
let subtime = Instant::now();
let _ = multiexp::multiexp_with_fixed_width::<_, _, _, _>(
&subworker,
(Arc::clone(&g), 0),
FullDensity,
Arc::clone(&s),
window
).wait();
subresults.push((window, subtime.elapsed().as_millis()));
}
subresults.sort_by(|a, b| {
a.1.cmp(&b.1)
});
alt_subresults.sort_by(|a, b| {
a.1.cmp(&b.1)
});
println!("Standard future based multiexp of size {} on {} CPUs:", size, cpus);
for (window, time_ms) in &subresults[0..3] {
println!("Window = {}, time = {} ms", window, time_ms);
}
println!("Tuned future based multiexp of size {} on {} CPUs:", size, cpus);
for (window, time_ms) in &alt_subresults[0..3] {
println!("Window = {}, time = {} ms", window, time_ms);
}
}
}
}
#[test]
#[ignore]
fn test_different_multiexps() {
test_multiexp_bn254(1<<20, vec![1 << 20], vec![3, 4, 6]);
}
#[test]
#[ignore]
fn test_large_data_different_multiexps() {
let max_size = 1 << 26;
let worker = Worker::new();
assert!(worker.cpus >= 16, "should be tested only on large machines");
let sizes = vec![1 << 23, 1 << 24, 1 << 25, 1 << 26];
let cpus = vec![8, 12, 16, 24, 32, 48];
// test_multiexp_bn254(max_size, sizes, cpus);
test_multiexp_bn254_compact(max_size, sizes, cpus);
}
// #[test]
// #[ignore]
// fn test_small_data_different_windows() {
// let max_size = 1 << 20;
// let sizes = vec![1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20];
// let cpus = vec![3, 4, 6];
// let windows = vec![7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18];
// test_multiexps_over_window_sizes_bn254(max_size, sizes, cpus, windows);
// }
// #[test]
// #[ignore]
// fn test_large_data_different_windows_multiexp() {
// let max_size = 1 << 26;
// let worker = Worker::new();
// assert!(worker.cpus >= 16, "should be tested only on large machines");
// let sizes = vec![1 << 20, 1 << 21, 1 << 22, 1 << 23, 1 << 24, 1 << 25, 1 << 26];
// let cpus = vec![8, 12, 16, 24, 32, 48];
// let windows = vec![7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18];
// // test_multiexps_over_window_sizes_bn254(max_size, sizes, cpus, windows);
// test_multiexps_over_window_sizes_bn254_compact(max_size, sizes, cpus, windows);
// }
// #[test]
// #[ignore]
// fn test_large_data_buffered_multiexp() {
// let max_size = 1 << 26;
// let worker = Worker::new();
// assert!(worker.cpus >= 16, "should be tested only on large machines");
// let sizes = vec![1 << 20, 1 << 21, 1 << 22, 1 << 23, 1 << 24, 1 << 25, 1 << 26];
// let cpus = vec![8, 12, 16, 24, 32, 48];
// let windows = vec![10, 11, 12, 13, 14, 15, 16];
// let buffer_sizes = vec![4, 8, 16, 32, 64, 128];
// // test_multiexps_over_window_sizes_bn254(max_size, sizes, cpus, windows);
// test_buffered_multiexps_bn254_compact(max_size, sizes, cpus, windows, buffer_sizes);
// }
// #[test]
// #[ignore]
// fn future_based_test_large_data_different_windows() {
// let max_size = 1 << 26;
// let worker = Worker::new();
// assert!(worker.cpus >= 16, "should be tested only on large machines");
// let sizes = vec![1 << 20, 1 << 21, 1 << 22, 1 << 23, 1 << 24, 1 << 25, 1 << 26];
// let cpus = vec![8, 12, 16, 24, 32, 48];
// let windows = vec![7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18];
// // test_future_based_multiexps_over_window_sizes_bn254(max_size, sizes, cpus, windows);
// test_future_based_multiexps_over_window_sizes_bn254_compact(max_size, sizes, cpus, windows);
// }
fn make_random_points_with_unknown_discrete_log<E: Engine>(
dst: &[u8],
seed: &[u8],
num_points: usize
) -> Vec<E::G1Affine> {
let mut result = vec![];
use rand::{Rng, SeedableRng};
use rand::chacha::ChaChaRng;
// Create an RNG based on the outcome of the random beacon
let mut rng = {
// if we use Blake hasher
let input: Vec<u8> = dst.iter().chain(seed.iter()).cloned().collect();
let h = blake2s_simd::blake2s(&input);
assert!(h.as_bytes().len() == 32);
let mut seed = [0u32; 8];
for (i, chunk) in h.as_bytes().chunks_exact(8).enumerate() {
seed[i] = (&chunk[..]).read_u32::<BigEndian>().expect("digest is large enough for this to work");
}
ChaChaRng::from_seed(&seed)
};
for _ in 0..num_points {
let point: E::G1 = rng.gen();
result.push(point.into_affine());
}
result
}
#[test]
fn produce_fpga_test_vectors() {
use crate::pairing::ff::ScalarEngine;
use crate::pairing::bls12_381::Bls12;
let worker =crate::worker::Worker::new();
let random_point = make_random_points_with_unknown_discrete_log::<Bls12>(
&b"fpga_dst"[..],
&hex::decode(crate::constants::ETH_BLOCK_10_000_000_HASH).unwrap(),
1
)[0];
let (x, y) = random_point.into_xy_unchecked();
println!("Random point in Montgomery form: X = {}, Y = {}", x.into_raw_repr(), y.into_raw_repr());
let base_path = std::path::Path::new("./");
for n in vec![6, 7, 20] {
let points_path = base_path.join(&format!("input_points_2^{}.key", n));
let scalars_path = base_path.join(&format!("input_scalars_2^{}.key", n));
let buckets_path = base_path.join(&format!("output_buckets_2^{}.key", n));
println!("Opening {}", points_path.to_string_lossy());
let file = std::fs::File::create(points_path).unwrap();
let mut points_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(scalars_path).unwrap();
let mut scalars_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(buckets_path).unwrap();
let mut buckets_file = std::io::BufWriter::with_capacity(1 << 24, file);
let size = 1 << n;
let scalars = make_random_field_elements::<<Bls12 as ScalarEngine>::Fr>(&worker, size);
let points = make_random_g1_points::<<Bls12 as Engine>::G1Affine>(&worker, size);
let buckets = simulate_first_buckets::<Bls12>(&points, &scalars, 13, random_point);
serialize_affine_points_for_fpga::<Bls12, _>(&points, &mut points_file).unwrap();
serialize_scalars_for_fpga::<Bls12, _>(&scalars, &mut scalars_file).unwrap();
serialize_projective_points_for_fpga::<Bls12, _>(&buckets, &mut buckets_file).unwrap();
}
}
#[test]
fn produce_bn254_fpga_test_vectors() {
use crate::pairing::ff::ScalarEngine;
use crate::pairing::bn256::{Bn256, Fr};
let bucket_width = 16;
let worker = crate::worker::Worker::new();
let random_point = make_random_points_with_unknown_discrete_log::<Bn256>(
&b"fpga_dst"[..],
&hex::decode(crate::constants::ETH_BLOCK_10_000_000_HASH).unwrap(),
1
)[0];
let (x, y) = random_point.into_xy_unchecked();
println!("Random point in Montgomery form: X = {}, Y = {}", x.into_raw_repr(), y.into_raw_repr());
let base_path = std::path::Path::new("./");
let mut num_buckets = (Fr::NUM_BITS as usize) / bucket_width;
if (Fr::NUM_BITS as usize) % bucket_width != 0 {
num_buckets += 1;
}
for n in vec![6, 7, 20] {
let points_path = base_path.join(&format!("bn_254_input_points_2^{}_width_{}.key", n, bucket_width));
let scalars_path = base_path.join(&format!("bn_254_input_scalars_2^{}_width_{}.key", n, bucket_width));
let initial_buckets_path = base_path.join(&format!("bn_254_input_buckets_2^{}_width_{}.key", n, bucket_width));
let buckets_path = base_path.join(&format!("bn_254_output_buckets_2^{}_width_{}.key", n, bucket_width));
println!("Opening {}", points_path.to_string_lossy());
let file = std::fs::File::create(points_path).unwrap();
let mut points_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(scalars_path).unwrap();
let mut scalars_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(initial_buckets_path).unwrap();
let mut initial_buckets_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(buckets_path).unwrap();
let mut buckets_file = std::io::BufWriter::with_capacity(1 << 24, file);
let size = 1 << n;
let scalars = make_random_field_elements::<<Bn256 as ScalarEngine>::Fr>(&worker, size);
let points = make_random_g1_points::<<Bn256 as Engine>::G1Affine>(&worker, size);
let initial_buckets = vec![random_point.into_projective(); num_buckets * (1 << bucket_width)];
let buckets = simulate_first_buckets::<Bn256>(&points, &scalars, bucket_width, random_point);
serialize_affine_points_for_fpga::<Bn256, _>(&points, &mut points_file).unwrap();
serialize_scalars_for_fpga::<Bn256, _>(&scalars, &mut scalars_file).unwrap();
serialize_projective_points_for_fpga::<Bn256, _>(&initial_buckets, &mut initial_buckets_file).unwrap();
serialize_projective_points_for_fpga::<Bn256, _>(&buckets, &mut buckets_file).unwrap();
}
}
#[test]
fn produce_fpga_window_12_test_vectors() {
let width = 12;
use crate::pairing::ff::ScalarEngine;
use crate::pairing::bls12_381::Bls12;
let worker = crate::worker::Worker::new();
let random_point = make_random_points_with_unknown_discrete_log::<Bls12>(
&b"fpga_dst"[..],
&hex::decode(crate::constants::ETH_BLOCK_10_000_000_HASH).unwrap(),
1
)[0];
let (x, y) = random_point.into_xy_unchecked();
println!("Random point in Montgomery form: X = {}, Y = {}", x.into_raw_repr(), y.into_raw_repr());
let base_path = std::path::Path::new("./");
for n in vec![6, 7, 20] {
let points_path = base_path.join(&format!("input_points_2^{}.key", n));
let scalars_path = base_path.join(&format!("input_scalars_2^{}.key", n));
let buckets_path = base_path.join(&format!("width_{}_output_buckets_2^{}.key", width, n));
println!("Opening {}", points_path.to_string_lossy());
let file = std::fs::File::create(points_path).unwrap();
let mut points_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(scalars_path).unwrap();
let mut scalars_file = std::io::BufWriter::with_capacity(1 << 24, file);
let file = std::fs::File::create(buckets_path).unwrap();
let mut buckets_file = std::io::BufWriter::with_capacity(1 << 24, file);
let size = 1 << n;
let scalars = make_random_field_elements::<<Bls12 as ScalarEngine>::Fr>(&worker, size);
let points = make_random_g1_points::<<Bls12 as Engine>::G1Affine>(&worker, size);
let buckets = simulate_first_buckets::<Bls12>(&points, &scalars, width, random_point);
serialize_affine_points_for_fpga::<Bls12, _>(&points, &mut points_file).unwrap();
serialize_scalars_for_fpga::<Bls12, _>(&scalars, &mut scalars_file).unwrap();
serialize_projective_points_for_fpga::<Bls12, _>(&buckets, &mut buckets_file).unwrap();
}
}
}
<file_sep>/src/group.rs
use crate::pairing::{
Engine,
CurveProjective
};
use crate::pairing::ff::{
Field,
PrimeField
};
use super::{
SynthesisError
};
pub trait Group<E: Engine>: Sized + Copy + Clone + Send + Sync {
fn group_zero() -> Self;
fn group_mul_assign(&mut self, by: &E::Fr);
fn group_add_assign(&mut self, other: &Self);
fn group_sub_assign(&mut self, other: &Self);
}
pub struct Point<G: CurveProjective>(pub G);
impl<G: CurveProjective> PartialEq for Point<G> {
fn eq(&self, other: &Point<G>) -> bool {
self.0 == other.0
}
}
impl<G: CurveProjective> Copy for Point<G> { }
impl<G: CurveProjective> Clone for Point<G> {
fn clone(&self) -> Point<G> {
*self
}
}
impl<G: CurveProjective> Group<G::Engine> for Point<G> {
fn group_zero() -> Self {
Point(G::zero())
}
fn group_mul_assign(&mut self, by: &G::Scalar) {
self.0.mul_assign(by.into_repr());
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
pub struct Scalar<E: Engine>(pub E::Fr);
impl<E: Engine> PartialEq for Scalar<E> {
fn eq(&self, other: &Scalar<E>) -> bool {
self.0 == other.0
}
}
impl<E: Engine> Copy for Scalar<E> { }
impl<E: Engine> Clone for Scalar<E> {
fn clone(&self) -> Scalar<E> {
*self
}
}
impl<E: Engine> Group<E> for Scalar<E> {
fn group_zero() -> Self {
Scalar(E::Fr::zero())
}
fn group_mul_assign(&mut self, by: &E::Fr) {
self.0.mul_assign(by);
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}<file_sep>/README.md
# bellman "Community edition"
Originally developed for ZCash, it has diverged now and focuses solely on the [PLONK](https://eprint.iacr.org/2019/953) proof system. Uses our "community edition" pairing for Ethereum's BN256 curve.
## Features
Allows one to design PLONK circuits with custom gates and lookup tables with junction with [franklin-crypto](https://github.com/matter-labs/franklin-crypto) gadget library. At the moment the lookup argument implies using the lookup over the first three state columns (usually refered as A/B/C) and allows to have simultaneously a gate and a lookup applied on the same row of the trace.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Code Examples:
- [Edcon2019_material](https://github.com/matter-labs/Edcon2019_material)
- [EDCON Workshop record (youtube): Intro to bellman: Practical zkSNARKs constructing for Ethereum](https://www.youtube.com/watch?v=tUY0YGTpehg&t=74s)
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
<file_sep>/src/plonk/commitments/transparent/iop/mod.rs
use crate::pairing::ff::PrimeField;
pub mod trivial_coset_combiner;
pub mod blake2s_trivial_iop;
pub mod keccak_trivial_iop;
pub trait CosetInformation: Sized + Clone + Copy {
const COSET_SIZE: usize;
}
pub trait CosetCombiner<F: PrimeField> {
const EXPECTED_DEGREE: usize;
const COSET_SIZE: usize;
// type CosetData: CosetInformation;
fn get_for_natural_index(leafs: &[F], natural_index: usize) -> &F;
fn get_for_tree_index(leafs: &[F], tree_index: usize) -> &F;
fn tree_index_into_natural_index(tree_index: usize) -> usize;
fn natural_index_into_tree_index(natural_index: usize) -> usize;
fn get_coset_for_natural_index(natural_index: usize, domain_size: usize) -> Vec<usize>;
fn get_coset_for_tree_index(tree_index: usize, domain_size: usize) -> Vec<usize>;
}
pub trait HashFunctionOutput: Clone + Eq + PartialEq + std::fmt::Debug {}
pub trait LeafEncoder<F: PrimeField> {
type Output;
fn encode_leaf(value: &F) -> Self::Output;
}
pub trait FiatShamirHasher<F: PrimeField> {
type Input;
fn transform(value: &Self::Input) -> F;
}
pub trait IopTreeHasher<F: PrimeField> {
type HashOutput: HashFunctionOutput;
type LeafEncoder: LeafEncoder<F>;
fn hash_leaf(value: &F) -> Self::HashOutput;
fn hash_encoded_leaf(value: &<Self::LeafEncoder as LeafEncoder<F>>::Output) -> Self::HashOutput;
fn hash_node(values: &[Self::HashOutput], level: usize) -> Self::HashOutput;
}
pub trait IopTree<F: PrimeField> {
type Combiner: CosetCombiner<F>;
type TreeHasher: IopTreeHasher<F>;
type FiatShamirTransformer: FiatShamirHasher<F, Input = <Self::TreeHasher as IopTreeHasher<F>>::HashOutput>;
fn create(leafs: &[F]) -> Self;
fn size(&self) -> usize;
fn get_root(&self) -> <Self::TreeHasher as IopTreeHasher<F>>::HashOutput;
fn encode_root_into_challenge(root: & <Self::TreeHasher as IopTreeHasher<F>>::HashOutput) -> F;
fn get_challenge_scalar_from_root(&self) -> F;
fn verify(root: &<Self::TreeHasher as IopTreeHasher<F>>::HashOutput, leaf_value: &F, path: &[<Self::TreeHasher as IopTreeHasher<F>>::HashOutput], index: usize) -> bool;
fn get_path(&self, index: usize, leafs_values: &[F]) -> Vec< <Self::TreeHasher as IopTreeHasher<F>>::HashOutput >;
}
pub trait IopQuery<F: PrimeField>: 'static + PartialEq + Eq + Clone + std::fmt::Debug {
type TreeHasher: IopTreeHasher<F>;
fn tree_index(&self) -> usize;
fn natural_index(&self) -> usize;
fn natural_indexes(&self) -> Vec<usize>;
fn value(&self) -> F;
fn values(&self) -> &[F];
fn path(&self) -> &[<Self::TreeHasher as IopTreeHasher<F>>::HashOutput];
}
pub trait IOP<F: PrimeField> {
type Combiner: CosetCombiner<F>;
type Tree: IopTree<F, Combiner = Self::Combiner>;
type Query: IopQuery<F, TreeHasher = <Self::Tree as IopTree<F> >::TreeHasher>;
fn create(leafs: & [F]) -> Self;
fn get_for_natural_index(leafs: &[F], natural_index: usize) -> &F;
fn get_for_tree_index(leafs: &[F], tree_index: usize) -> &F;
fn get_root(&self) -> < <Self::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput;
fn verify_query(query: &Self::Query, root: &< <Self::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput) -> bool;
fn query(&self, natural_index: usize, leafs: &[F]) -> Self::Query;
}<file_sep>/src/sonic/sonic/constraint_systems.rs
use crate::pairing::{Engine};
use crate::sonic::cs::Backend;
use std::marker::PhantomData;
use std::iter::Peekable;
use crate::SynthesisError;
use crate::sonic::cs::SynthesisDriver;
use crate::sonic::cs::{Circuit, ConstraintSystem, Variable, LinearCombination, Coeff};
use crate::pairing::ff::Field;
use super::M;
pub struct NonassigningSynthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
}
impl<E: Engine, B: Backend<E>>NonassigningSynthesizer<E, B> {
pub fn new(backend: B) -> Self {
Self {
backend: backend,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
}
}
}
impl<E: Engine, B: Backend<E>> ConstraintSystem<E> for NonassigningSynthesizer<E, B> {
const ONE: Variable = Variable::A(1);
fn alloc<F>(&mut self, _value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_b = Variable::B(index);
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let var_a = Variable::A(index);
self.current_variable = Some(index);
Ok(var_a)
}
}
}
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let input_var = self.alloc(value)?;
self.enforce_zero(LinearCombination::zero() + input_var);
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: LinearCombination<E>)
{
self.q += 1;
let y = self.backend.new_linear_constraint();
for (var, coeff) in lc.as_ref() {
self.backend.insert_coefficient(*var, *coeff, &y);
}
}
fn multiply<F>(&mut self, _values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let a = Variable::A(index);
let b = Variable::B(index);
let c = Variable::C(index);
Ok((a, b, c))
}
fn get_value(&self, var: Variable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
pub struct Synthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
}
impl<E: Engine, B: Backend<E>> ConstraintSystem<E> for Synthesizer<E, B> {
const ONE: Variable = Variable::A(1);
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_a = Variable::A(index);
let var_b = Variable::B(index);
let var_c = Variable::C(index);
let mut product = None;
let value_a = self.backend.get_var(var_a);
self.backend.set_var(var_b, || {
let value_b = value()?;
product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?);
product.as_mut().map(|product| product.mul_assign(&value_b));
Ok(value_b)
})?;
self.backend.set_var(var_c, || {
product.ok_or(SynthesisError::AssignmentMissing)
})?;
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let var_a = Variable::A(index);
self.backend.set_var(var_a, value)?;
self.current_variable = Some(index);
Ok(var_a)
}
}
}
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let input_var = self.alloc(value)?;
self.enforce_zero(LinearCombination::zero() + input_var);
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: LinearCombination<E>)
{
self.q += 1;
let y = self.backend.new_linear_constraint();
for (var, coeff) in lc.as_ref() {
self.backend.insert_coefficient(*var, *coeff, &y);
}
}
fn multiply<F>(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let a = Variable::A(index);
let b = Variable::B(index);
let c = Variable::C(index);
let mut b_val = None;
let mut c_val = None;
self.backend.set_var(a, || {
let (a, b, c) = values()?;
b_val = Some(b);
c_val = Some(c);
Ok(a)
})?;
self.backend.set_var(b, || {
b_val.ok_or(SynthesisError::AssignmentMissing)
})?;
self.backend.set_var(c, || {
c_val.ok_or(SynthesisError::AssignmentMissing)
})?;
Ok((a, b, c))
}
fn get_value(&self, var: Variable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
impl<E: Engine, B: Backend<E>>Synthesizer<E, B> {
pub fn new(backend: B) -> Self {
Self {
backend: backend,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
}
}
}
pub struct PermutationSynthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
// These vectors will hold, for all of the wires, the terms related to these
// wires for each of the M permutation polynomials. The Coeff<E> is the
// coefficient, and the usize is q, the index of the linear constraint and is
// related to the power of Y in the s_1(X, Y) polynomial.
pub a: Vec<[Option<(Coeff<E>, usize)>; M]>,
pub b: Vec<[Option<(Coeff<E>, usize)>; M]>,
pub c: Vec<[Option<(Coeff<E>, usize)>; M]>,
}
impl<E: Engine, B: Backend<E>> ConstraintSystem<E> for PermutationSynthesizer<E, B> {
const ONE: Variable = Variable::A(1);
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_a = Variable::A(index);
let var_b = Variable::B(index);
let var_c = Variable::C(index);
let mut product = None;
let value_a = self.backend.get_var(var_a);
self.backend.set_var(var_b, || {
let value_b = value()?;
product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?);
product.as_mut().map(|product| product.mul_assign(&value_b));
Ok(value_b)
})?;
self.backend.set_var(var_c, || {
product.ok_or(SynthesisError::AssignmentMissing)
})?;
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
// Create slots for the new wires.
self.a.push([None; M]);
self.b.push([None; M]);
self.c.push([None; M]);
let var_a = Variable::A(index);
self.backend.set_var(var_a, value)?;
self.current_variable = Some(index);
Ok(var_a)
}
}
}
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let input_var = self.alloc(value)?;
self.enforce_zero(LinearCombination::zero() + input_var);
// The new variable has all free slots, so this shouldn't create
// more than one linear combination.
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: LinearCombination<E>)
{
// We just redirect things into the (recursing) enforce_equals method which
// does the actual work. Annoyingly, we need to use dynamic dispatch on the
// underlying iterator because once you've taken a Peekable<I> you can't get
// the underlying iterator (since .next() may have been called on it) so
// at each depth of recursion we'd end up with a new type, which is
// impossible for the compiler to reason about.
let lc = lc.as_ref();
let lc: &mut Iterator<Item=&(Variable, Coeff<E>)> = &mut lc.into_iter();
let lc = lc.peekable();
self.enforce_equals(lc, None);
}
fn multiply<F>(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
// Create slots for the new wires.
self.a.push([None; M]);
self.b.push([None; M]);
self.c.push([None; M]);
let a = Variable::A(index);
let b = Variable::B(index);
let c = Variable::C(index);
let mut b_val = None;
let mut c_val = None;
self.backend.set_var(a, || {
let (a, b, c) = values()?;
b_val = Some(b);
c_val = Some(c);
Ok(a)
})?;
self.backend.set_var(b, || {
b_val.ok_or(SynthesisError::AssignmentMissing)
})?;
self.backend.set_var(c, || {
c_val.ok_or(SynthesisError::AssignmentMissing)
})?;
Ok((a, b, c))
}
fn get_value(&self, var: Variable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
impl<E: Engine, B: Backend<E>> PermutationSynthesizer<E, B> {
pub fn new(backend: B) -> Self {
Self {
backend: backend,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
a: vec![],
b: vec![],
c: vec![],
}
}
// Enforces that the value of `lhs` equals the value
// of `rhs`, returning the value of the left hand side
// as determined by the assignment. If rhs is none, it
// is interpreted to be zero.
fn enforce_equals<'a>(
&mut self,
mut lhs: Peekable<&mut Iterator<Item=&'a (Variable, Coeff<E>)>>,
rhs: Option<Variable>
) -> Option<E::Fr>
{
// First, let's create a new linear constraint. We'll save its y value
// for the backend and q as well.
self.q += 1;
let q = self.q;
let y = self.backend.new_linear_constraint();
let mut slots_available = [true; M];
let mut num_slots_available = M;
// If the caller is enforce_equals we need to return the value of the lhs
// so that rhs can be assigned properly, so we keep track of it here.
let mut current_value = if rhs.is_some() { Some(E::Fr::zero()) } else { None };
// If rhs is Some, then we _need_ to involve it in this
// linear constraint, so let's just handle it right away. (This also
// helps avoid an infinite recursion issue described later.)
if let Some(rhs) = rhs {
self.emplace_variable(&mut slots_available, &y, rhs, Coeff::NegativeOne, q);
num_slots_available -= 1;
}
// Iterate through the linear combination
loop {
if let Some(term) = lhs.next() {
assert!(num_slots_available > 0);
if num_slots_available == 1 && lhs.peek().is_some() {
// We'll be out of slots if we add this variable to the linear
// combination; instead, create an ephemeral variable to hold
// the value of the remaining terms and use that. Temporarily,
// give the variable "zero" value.
let ephemeral = self.alloc(|| Ok(E::Fr::zero())).expect("assignment is provided so this should not fail");
// One of the annoying "tricks" we have to embrace is that the ephemeral
// variable has all of its slots available, and so because it's the rhs
// when we recursively call `enforce_equals` we know that it will not trigger
// a condition in `emplace_variable` that results in the variable being
// duplicated; otherwise, the duplicate variable will have a value of zero
// and we'd have to somehow track all of the duplicates when we later assign.
let mut iter = Some(term).into_iter().chain(lhs);
let iter: &mut Iterator<Item=&(Variable, Coeff<E>)> = &mut iter;
let value = self.enforce_equals(iter.peekable(), Some(ephemeral));
// Set the correct ephemeral value right away
self.backend.set_var(ephemeral, || {
value.ok_or(SynthesisError::AssignmentMissing)
}).expect("assignment is provided so this should not fail");
// Fix the underlying assignment -- the c-wire value will change if the ephemeral
// value was a b-wire.
self.fix_variable_assignment(ephemeral);
// Now we emplace the variable into the linear combination.
self.emplace_variable(&mut slots_available, &y, ephemeral, Coeff::One, q);
num_slots_available -= 1;
match (&mut current_value, &value) {
(Some(ref mut current_value), Some(ref value)) => {
current_value.add_assign(&value);
},
_ => {
current_value = None;
}
}
assert!(num_slots_available == 0);
// We're done, so return.
return current_value;
} else {
self.emplace_variable(&mut slots_available, &y, term.0, term.1, q);
num_slots_available -= 1;
match (&mut current_value, self.backend.get_var(term.0)) {
(Some(ref mut current_value), Some(mut value)) => {
term.1.multiply(&mut value);
current_value.add_assign(&value);
},
_ => {
current_value = None;
}
}
}
} else {
// We're done, so return.
return current_value;
}
}
}
// This takes a variable and coefficient and places it into a linear combination,
// given a set of slots that are available, and updates the slot availability to
// reflect which slot was chosen.
fn emplace_variable(&mut self, slots_available: &mut [bool; M], y: &B::LinearConstraintIndex, var: Variable, coeff: Coeff<E>, q: usize)
{
// Get the slots for this wire.
let wire_slots = self.get_wire_slots(var);
// Let's handle the simple case where the linear combination and the
// variable have a slot that coincides.
let mut available_i = None;
for i in 0..M {
if slots_available[i] {
available_i = Some(i);
if wire_slots[i] {
self.emplace_slot(var, i, coeff, y, q);
slots_available[i] = false;
return;
}
}
}
let available_i = available_i.expect("there is always at least one slot open");
// available_i corresponds to a slot that is available in the linear
// combination; clearly, it is not available for the wire. In order
// to rectify this, we will create a new wire with the same value.
let ephemeral_value = self.backend.get_var(var);
let ephemeral = self.alloc(|| {
ephemeral_value.ok_or(SynthesisError::AssignmentMissing)
}).expect("assignment is provided so this should not fail");
// Now, we'll emplace the slot for _this_ variable.
self.emplace_slot(ephemeral, available_i, coeff, y, q);
slots_available[available_i] = false;
// Next, we'll free up a slot in the original wire
let free_i = (available_i + 1) % M;
// by moving the term to the ephemeral wire.
self.move_slot(free_i, var, ephemeral);
// The original wire has slot free_i available now, and
// the new wire has only available_i and (available_i + 1) % M
// occupied. As long as M>=3, this means available_i + 2 % M
// is a free wire for the ephemeral and it is distinct from
// free_i! So, we can relate the ephemeral variable to the
// original.
let iter = [(var, Coeff::One), (ephemeral, Coeff::NegativeOne)];
let mut iter = iter.into_iter();
let iter: &mut Iterator<Item=&(Variable, Coeff<E>)> = &mut iter;
self.enforce_equals(iter.peekable(), None);
}
// Move slot value from wire to another
fn move_slot(&mut self, slot: usize, from: Variable, to: Variable) {
let slot_val;
{
let from_vals = match from {
Variable::A(index) => &mut self.a[index - 1],
Variable::B(index) => &mut self.b[index - 1],
Variable::C(index) => &mut self.c[index - 1],
};
if from_vals[slot].is_none() {
// In this case, we do nothing.
return;
}
slot_val = from_vals[slot].unwrap();
from_vals[slot] = None;
}
// We need the backend to compute the cached y^q value for us,
// if it needs it.
let y = self.backend.get_for_q(slot_val.1);
self.backend.insert_coefficient(from, -slot_val.0, &y); // Negate coefficient to undo
{
let to_vals = match to {
Variable::A(index) => &mut self.a[index - 1],
Variable::B(index) => &mut self.b[index - 1],
Variable::C(index) => &mut self.c[index - 1],
};
to_vals[slot] = Some(slot_val);
self.backend.insert_coefficient(to, slot_val.0, &y);
}
}
// Place a coefficient in a slot
fn emplace_slot(&mut self, var: Variable, slot_index: usize, coeff: Coeff<E>, y: &B::LinearConstraintIndex, q: usize)
{
let vals = match var {
Variable::A(index) => &mut self.a[index - 1],
Variable::B(index) => &mut self.b[index - 1],
Variable::C(index) => &mut self.c[index - 1],
};
vals[slot_index] = Some((coeff, q));
self.backend.insert_coefficient(var, coeff, &y);
}
// Get available slots for a wire
fn get_wire_slots(&self, var: Variable) -> [bool; M] {
let vals = match var {
Variable::A(index) => &self.a[index - 1],
Variable::B(index) => &self.b[index - 1],
Variable::C(index) => &self.c[index - 1],
};
let mut slots = [true; M];
for i in 0..M {
if vals[i].is_some() {
slots[i] = false;
}
}
slots
}
// If a variable changes value, we probably need to adjust.
fn fix_variable_assignment(&mut self, var: Variable) {
let index = var.get_index();
let a_value = self.backend.get_var(Variable::A(index));
let b_value = self.backend.get_var(Variable::B(index));
let c_value = match (a_value, b_value) {
(Some(mut a), Some(b)) => {
a.mul_assign(&b);
Some(a)
},
_ => { None }
};
self.backend.set_var(Variable::C(index), || {
c_value.ok_or(SynthesisError::AssignmentMissing)
}).expect("assignment exists if the closure is called");
}
}<file_sep>/src/plonk/redshift/mod.rs
mod prover;
mod generator;
// use crate::Engine;
// use crate::plonk::plonk::prover::PlonkSetup;
// use crate::plonk::commitments::CommitmentScheme;
// use crate::plonk::Transcript;
// use crate::SynthesisError;
// use crate::pairing::ff::{Field, PrimeField};
// pub struct PlonkChunkedNonhomomorphicProof<E: Engine, S: CommitmentScheme<E::Fr> >{
// pub a_opening_value: E::Fr,
// pub b_opening_value: E::Fr,
// pub c_opening_value: E::Fr,
// pub q_l_opening_value: E::Fr,
// pub q_r_opening_value: E::Fr,
// pub q_o_opening_value: E::Fr,
// pub q_m_opening_value: E::Fr,
// pub q_c_opening_value: E::Fr,
// pub s_id_opening_value: E::Fr,
// pub sigma_1_opening_value: E::Fr,
// pub sigma_2_opening_value: E::Fr,
// pub sigma_3_opening_value: E::Fr,
// pub z_1_unshifted_opening_value: E::Fr,
// pub z_2_unshifted_opening_value: E::Fr,
// pub z_1_shifted_opening_value: E::Fr,
// pub z_2_shifted_opening_value: E::Fr,
// pub t_low_opening_value: E::Fr,
// pub t_mid_opening_value: E::Fr,
// pub t_high_opening_value: E::Fr,
// pub a_commitment: S::Commitment,
// pub b_commitment: S::Commitment,
// pub c_commitment: S::Commitment,
// pub z_1_commitment: S::Commitment,
// pub z_2_commitment: S::Commitment,
// pub t_low_commitment: S::Commitment,
// pub t_mid_commitment: S::Commitment,
// pub t_high_commitment: S::Commitment,
// pub openings_proof: S::OpeningProof,
// }
// use crate::plonk::domains::Domain;
// fn evaluate_inverse_vanishing_poly<E: Engine>(vahisning_size: usize, point: E::Fr) -> E::Fr {
// assert!(vahisning_size.is_power_of_two());
// // update from the paper - it should not hold for the last generator, omega^(n) in original notations
// // Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
// let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64).expect("should fit");
// let n_domain_omega = domain.generator;
// let root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
// let mut numerator = point;
// numerator.sub_assign(&root);
// let mut denominator = point.pow([vahisning_size as u64]);
// denominator.sub_assign(&E::Fr::one());
// let denominator = denominator.inverse().expect("must exist");
// numerator.mul_assign(&denominator);
// numerator
// }
// fn evaluate_lagrange_poly<E: Engine>(vahisning_size:usize, poly_number: usize, at: E::Fr) -> E::Fr {
// assert!(vahisning_size.is_power_of_two());
// let mut repr = E::Fr::zero().into_repr();
// repr.as_mut()[0] = vahisning_size as u64;
// let size_fe = E::Fr::from_repr(repr).expect("is a valid representation");
// // let size_inv = n_fe.inverse().expect("must exist");
// // L_0(X) = (Z_H(X) / (X - 1)).(1/n) and L_0(1) = 1
// // L_1(omega) = 1 = L_0(omega * omega^-1)
// let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64).expect("domain of this size should exist");
// let omega = domain.generator;
// let omega_inv = omega.inverse().expect("must exist");
// let argument_multiplier = omega_inv.pow([poly_number as u64]);
// let mut argument = at;
// argument.mul_assign(&argument_multiplier);
// let mut numerator = argument.pow([vahisning_size as u64]);
// numerator.sub_assign(&E::Fr::one());
// let mut denom = argument;
// denom.sub_assign(&E::Fr::one());
// denom.mul_assign(&size_fe);
// let denom_inv = denom.inverse().expect("must exist");
// numerator.mul_assign(&denom_inv);
// numerator
// }
<file_sep>/src/sonic/helped/adapted_verifier.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters};
use super::helper::{Aggregate};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::{Circuit};
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::srs::SRS;
use crate::sonic::sonic::Nonassigning;
use super::verifier::verify_aggregate_on_srs as verify_aggregate_on_srs_sonic_circuit;
use super::verifier::verify_proofs_on_srs as verify_proofs_on_srs_sonic_circuit;
pub fn verify_proofs<E: Engine, C: Circuit<E> + Clone, R: Rng>(
proofs: &[Proof<E>],
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError>
{
let adapted_circuit = AdaptorCircuit(circuit);
verify_proofs_on_srs_sonic_circuit::<_, _, Nonassigning, _>(proofs, inputs, adapted_circuit, rng, ¶ms.srs)
}
/// Check multiple proofs with aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_aggregate<E: Engine, C: Circuit<E> + Clone, R: Rng>(
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError> {
let adapted_circuit = AdaptorCircuit(circuit);
verify_aggregate_on_srs_sonic_circuit::<_, _, Nonassigning, _>(proofs, aggregate, inputs, adapted_circuit, rng, ¶ms.srs)
}
// #[test]
// fn my_fun_circuit_test() {
// use crate::pairing::ff::PrimeField;
// use crate::pairing::bls12_381::{Bls12, Fr};
// use super::*;
// use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination};
// struct MyCircuit;
// impl<E: Engine> Circuit<E> for MyCircuit {
// fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// let (a, b, _) = cs.multiply(|| {
// Ok((
// E::Fr::from_str("10").unwrap(),
// E::Fr::from_str("20").unwrap(),
// E::Fr::from_str("200").unwrap(),
// ))
// })?;
// cs.enforce_zero(LinearCombination::from(a) + a - b);
// //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
// //cs.enforce_zero(LinearCombination::from(b) - multiplier);
// Ok(())
// }
// }
// let srs = SRS::<Bls12>::new(
// 20,
// Fr::from_str("22222").unwrap(),
// Fr::from_str("33333333").unwrap(),
// );
// let proof = create_proof_on_srs::<Bls12, _, Basic>(&MyCircuit, &srs).unwrap();
// use std::time::{Instant};
// let start = Instant::now();
// let mut batch = MultiVerifier::<Bls12, _, Basic>::new(MyCircuit, &srs).unwrap();
// for _ in 0..1 {
// batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None);
// }
// assert!(batch.check_all());
// let elapsed = start.elapsed();
// println!("time to verify: {:?}", elapsed);
// }
<file_sep>/src/plonk/plonk/prover.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::plonk::transparent_engine::TransparentEngine;
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
use crate::worker::*;
use crate::plonk::domains::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::utils::*;
use crate::plonk::polynomials::*;
#[derive(Debug, Clone)]
pub struct ProvingAssembly<E: Engine> {
m: usize,
n: usize,
input_gates: Vec<Gate<E::Fr>>,
aux_gates: Vec<Gate<E::Fr>>,
num_inputs: usize,
num_aux: usize,
input_assingments: Vec<E::Fr>,
aux_assingments: Vec<E::Fr>,
inputs_map: Vec<usize>,
is_finalized: bool
}
impl<E: Engine> ConstraintSystem<E> for ProvingAssembly<E> {
// const ZERO: Variable = Variable(Index::Aux(1));
// const ONE: Variable = Variable(Index::Aux(2));
// allocate a variable
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_aux += 1;
let index = self.num_aux;
self.aux_assingments.push(value);
// println!("Allocated variable Aux({}) with value {}", index, value);
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_inputs += 1;
let index = self.num_inputs;
self.input_assingments.push(value);
let input_var = Variable(Index::Input(index));
let gate = Gate::<E::Fr>::new_enforce_constant_gate(input_var, Some(E::Fr::zero()), self.dummy_variable());
// let gate = Gate::<E>::new_enforce_constant_gate(input_var, Some(value), self.dummy_variable());
self.input_gates.push(gate);
// println!("Allocated input Input({}) with value {}", index, value);
Ok(input_var)
}
// enforce variable as boolean
fn enforce_boolean(&mut self, variable: Variable) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_enforce_boolean_gate(variable, self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate an abstract gate
fn new_gate(&mut self, variables: (Variable, Variable, Variable),
coeffs:(E::Fr,E::Fr,E::Fr,E::Fr,E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_gate(variables, coeffs);
// println!("Enforced new gate number {}: {:?}", self.n, gate);
self.aux_gates.push(gate);
self.n += 1;
// let satisfied = self.clone().is_satisfied();
// if !satisfied {
// return Err(SynthesisError::Unsatisfiable);
// }
Ok(())
}
// allocate a constant
fn enforce_constant(&mut self, variable: Variable, constant: E::Fr) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_constant_gate(variable, Some(constant), self.dummy_variable());
// println!("Enforced new constant gate number {}: {:?}", self.n, gate);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_2(&mut self, variables: (Variable, Variable)) -> Result<(), SynthesisError> {
// q_l, q_r, q_o, q_c = 0, q_m = 1
let (v_0, v_1) = variables;
let zero = E::Fr::zero();
let one = E::Fr::one();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (zero, zero, zero, one, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_3(&mut self, variables: (Variable, Variable, Variable)) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_multiplication_gate(variables);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_2(&mut self, variables: (Variable, Variable), coeffs:(E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let (v_0, v_1) = variables;
let (c_0, c_1) = coeffs;
let zero = E::Fr::zero();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (c_0, c_1, zero, zero, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_3(&mut self, variables: (Variable, Variable, Variable), coeffs:(E::Fr, E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_zero_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
fn get_value(&self, var: Variable) -> Result<E::Fr, SynthesisError> {
let value = match var {
Variable(Index::Input(input)) => {
self.input_assingments[input - 1]
},
Variable(Index::Aux(aux)) => {
self.aux_assingments[aux - 1]
}
};
Ok(value)
}
fn get_dummy_variable(&self) -> Variable {
self.dummy_variable()
}
}
impl<E: Engine> ProvingAssembly<E> {
pub fn new() -> Self {
let mut tmp = Self {
n: 0,
m: 0,
input_gates: vec![],
aux_gates: vec![],
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: vec![],
inputs_map: vec![],
is_finalized: false,
};
let zero = tmp.alloc(|| Ok(E::Fr::zero())).expect("should have no issues");
tmp.enforce_constant(zero, E::Fr::zero()).expect("should have no issues");
// let one = tmp.alloc(|| Ok(E::Fr::one())).expect("should have no issues");
// tmp.enforce_constant(one, E::Fr::one()).expect("should have no issues");
// match (zero, <Self as ConstraintSystem<E>>::ZERO) {
// (Variable(Index::Aux(1)), Variable(Index::Aux(1))) => {},
// _ => panic!("zero variable is incorrect")
// }
// match (one, <Self as ConstraintSystem<E>>::ONE) {
// (Variable(Index::Aux(2)), Variable(Index::Aux(2))) => {},
// _ => panic!("one variable is incorrect")
// }
match (tmp.dummy_variable(), zero) {
(Variable(Index::Aux(1)), Variable(Index::Aux(1))) => {},
_ => panic!("zero variable is incorrect")
}
tmp
}
fn new_empty_gate(&mut self) -> usize {
self.n += 1;
let index = self.n;
self.aux_gates.push(Gate::<E::Fr>::empty());
index
}
fn set_gate(&mut self, gate: Gate<E::Fr>, index: usize) {
self.aux_gates[index-1] = gate;
}
// return variable that is not in a constraint formally, but has some value
fn dummy_variable(&self) -> Variable {
// <Self as ConstraintSystem<E>>::ZERO
Variable(Index::Aux(1))
}
pub(crate) fn make_wire_assingments(&self) -> (Vec<E::Fr>, Vec<E::Fr>, Vec<E::Fr>) {
assert!(self.is_finalized);
// create a vector of gate assingments
// if a_i = j then w_j = f_l(g^i)
let total_num_gates = self.input_gates.len() + self.aux_gates.len();
let mut f_l = vec![E::Fr::zero(); total_num_gates];
let mut f_r = vec![E::Fr::zero(); total_num_gates];
let mut f_o = vec![E::Fr::zero(); total_num_gates];
for (i, gate) in self.input_gates.iter().chain(&self.aux_gates).enumerate()
{
match gate.a_wire() {
Variable(Index::Input(index)) => {
f_l[i] = self.input_assingments[index - 1];
},
Variable(Index::Aux(index)) => {
f_l[i] = self.aux_assingments[index - 1];
},
}
match gate.b_wire() {
Variable(Index::Input(index)) => {
f_r[i] = self.input_assingments[index - 1];
},
Variable(Index::Aux(index)) => {
f_r[i] = self.aux_assingments[index - 1];
},
}
match gate.c_wire() {
Variable(Index::Input(index)) => {
f_o[i] = self.input_assingments[index - 1];
},
Variable(Index::Aux(index)) => {
f_o[i] = self.aux_assingments[index - 1];
},
}
}
(f_l, f_r, f_o)
}
pub(crate) fn make_circuit_description_polynomials(&self, worker: &Worker) -> Result<(
Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>,
Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>
), SynthesisError> {
assert!(self.is_finalized);
let total_num_gates = self.input_gates.len() + self.aux_gates.len();
let mut q_l = vec![E::Fr::zero(); total_num_gates];
let mut q_r = vec![E::Fr::zero(); total_num_gates];
let mut q_o = vec![E::Fr::zero(); total_num_gates];
let mut q_m = vec![E::Fr::zero(); total_num_gates];
let mut q_c = vec![E::Fr::zero(); total_num_gates];
fn coeff_into_field_element<F: PrimeField>(coeff: & Coeff<F>) -> F {
match coeff {
Coeff::Zero => {
F::zero()
},
Coeff::One => {
F::one()
},
Coeff::NegativeOne => {
let mut tmp = F::one();
tmp.negate();
tmp
},
Coeff::Full(c) => {
*c
},
}
}
// expect a small number of inputs
for (((((gate, q_l), q_r), q_o), q_m), q_c) in self.input_gates.iter()
.zip(q_l.iter_mut())
.zip(q_r.iter_mut())
.zip(q_o.iter_mut())
.zip(q_m.iter_mut())
.zip(q_c.iter_mut())
{
*q_l = coeff_into_field_element(&gate.q_l);
*q_r = coeff_into_field_element(&gate.q_r);
*q_o = coeff_into_field_element(&gate.q_o);
*q_m = coeff_into_field_element(&gate.q_m);
*q_c = coeff_into_field_element(&gate.q_c);
}
let num_input_gates = self.input_gates.len();
let q_l_aux = &mut q_l[num_input_gates..];
let q_r_aux = &mut q_r[num_input_gates..];
let q_o_aux = &mut q_o[num_input_gates..];
let q_m_aux = &mut q_m[num_input_gates..];
let q_c_aux = &mut q_c[num_input_gates..];
debug_assert!(self.aux_gates.len() == q_l_aux.len());
worker.scope(self.aux_gates.len(), |scope, chunk| {
for (((((gate, q_l), q_r), q_o), q_m), q_c) in self.aux_gates.chunks(chunk)
.zip(q_l_aux.chunks_mut(chunk))
.zip(q_r_aux.chunks_mut(chunk))
.zip(q_o_aux.chunks_mut(chunk))
.zip(q_m_aux.chunks_mut(chunk))
.zip(q_c_aux.chunks_mut(chunk))
{
scope.spawn(move |_| {
for (((((gate, q_l), q_r), q_o), q_m), q_c) in gate.iter()
.zip(q_l.iter_mut())
.zip(q_r.iter_mut())
.zip(q_o.iter_mut())
.zip(q_m.iter_mut())
.zip(q_c.iter_mut())
{
*q_l = coeff_into_field_element(&gate.q_l);
*q_r = coeff_into_field_element(&gate.q_r);
*q_o = coeff_into_field_element(&gate.q_o);
*q_m = coeff_into_field_element(&gate.q_m);
*q_c = coeff_into_field_element(&gate.q_c);
}
});
}
});
let q_l = Polynomial::from_values(q_l)?;
let q_r = Polynomial::from_values(q_r)?;
let q_o = Polynomial::from_values(q_o)?;
let q_m = Polynomial::from_values(q_m)?;
let q_c = Polynomial::from_values(q_c)?;
Ok((q_l, q_r, q_o, q_m, q_c))
}
pub(crate) fn calculate_permutations_as_in_a_paper(&self) -> (Vec<usize>, Vec<usize>, Vec<usize>) {
assert!(self.is_finalized);
let num_gates = self.input_gates.len() + self.aux_gates.len();
let num_partitions = self.num_inputs + self.num_aux;
let num_inputs = self.num_inputs;
// in the partition number i there is a set of indexes in V = (a, b, c) such that V_j = i
let mut partitions = vec![vec![]; num_partitions + 1];
for (j, gate) in self.input_gates.iter().chain(&self.aux_gates).enumerate()
{
match gate.a_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j+1);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j+1);
}
},
}
match gate.b_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j + 1 + num_gates);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j + 1 + num_gates);
}
},
}
match gate.c_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j + 1 + 2*num_gates);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j + 1 + 2*num_gates);
}
},
}
}
let mut sigma_1: Vec<_> = (1..=num_gates).collect();
let mut sigma_2: Vec<_> = ((num_gates+1)..=(2*num_gates)).collect();
let mut sigma_3: Vec<_> = ((2*num_gates + 1)..=(3*num_gates)).collect();
let mut permutations = vec![vec![]; num_partitions + 1];
fn rotate(mut vec: Vec<usize>) -> Vec<usize> {
if vec.len() > 0 {
let els: Vec<_> = vec.drain(0..1).collect();
vec.push(els[0]);
}
vec
}
for (i, partition) in partitions.into_iter().enumerate().skip(1) {
// copy-permutation should have a cycle around the partition
let permutation = rotate(partition.clone());
permutations[i] = permutation.clone();
for (original, new) in partition.into_iter()
.zip(permutation.into_iter())
{
if original <= num_gates {
debug_assert!(sigma_1[original - 1] == original);
sigma_1[original - 1] = new;
} else if original <= 2*num_gates {
debug_assert!(sigma_2[original - num_gates - 1] == original);
sigma_2[original - num_gates - 1] = new;
} else {
debug_assert!(sigma_3[original - 2*num_gates - 1] == original);
sigma_3[original - 2*num_gates - 1] = new;
}
}
}
(sigma_1, sigma_2, sigma_3)
}
fn make_s_id(&self) -> Vec<usize> {
let size = self.input_gates.len() + self.aux_gates.len();
let result: Vec<_> = (1..=size).collect();
result
}
pub(crate) fn output_setup_polynomials(&self, worker: &Worker) -> Result<
(
Polynomial::<E::Fr, Coefficients>, // q_l
Polynomial::<E::Fr, Coefficients>, // q_r
Polynomial::<E::Fr, Coefficients>, // q_o
Polynomial::<E::Fr, Coefficients>, // q_m
Polynomial::<E::Fr, Coefficients>, // q_c
Polynomial::<E::Fr, Coefficients>, // s_id
Polynomial::<E::Fr, Coefficients>, // sigma_1
Polynomial::<E::Fr, Coefficients>, // sigma_2
Polynomial::<E::Fr, Coefficients>, // sigma_3
), SynthesisError>
{
assert!(self.is_finalized);
let s_id = self.make_s_id();
let (sigma_1, sigma_2, sigma_3) = self.calculate_permutations_as_in_a_paper();
let s_id = convert_to_field_elements::<E::Fr>(&s_id, &worker);
let sigma_1 = convert_to_field_elements::<E::Fr>(&sigma_1, &worker);
let sigma_2 = convert_to_field_elements::<E::Fr>(&sigma_2, &worker);
let sigma_3 = convert_to_field_elements::<E::Fr>(&sigma_3, &worker);
let s_id = Polynomial::from_values(s_id)?;
let sigma_1 = Polynomial::from_values(sigma_1)?;
let sigma_2 = Polynomial::from_values(sigma_2)?;
let sigma_3 = Polynomial::from_values(sigma_3)?;
let (q_l, q_r, q_o, q_m, q_c) = self.make_circuit_description_polynomials(&worker)?;
let s_id = s_id.ifft(&worker);
let sigma_1 = sigma_1.ifft(&worker);
let sigma_2 = sigma_2.ifft(&worker);
let sigma_3 = sigma_3.ifft(&worker);
let q_l = q_l.ifft(&worker);
let q_r = q_r.ifft(&worker);
let q_o = q_o.ifft(&worker);
let q_m = q_m.ifft(&worker);
let q_c = q_c.ifft(&worker);
Ok((q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3))
}
pub fn num_gates(&self) -> usize {
assert!(self.is_finalized);
self.input_gates.len() + self.aux_gates.len()
}
pub fn finalize(&mut self) {
if self.is_finalized {
return;
}
let n = self.input_gates.len() + self.aux_gates.len();
if (n+1).is_power_of_two() {
self.is_finalized = true;
return;
}
let empty_gate = Gate::<E::Fr>::new_empty_gate(self.dummy_variable());
let new_aux_len = (n+1).next_power_of_two() - 1 - self.input_gates.len();
self.aux_gates.resize(new_aux_len, empty_gate);
self.is_finalized = true;
}
pub fn is_satisfied(mut self) -> bool {
// self.finalize();
// assert!(self.is_finalized);
fn coeff_into_field_element<F: PrimeField>(coeff: & Coeff<F>) -> F {
match coeff {
Coeff::Zero => {
F::zero()
},
Coeff::One => {
F::one()
},
Coeff::NegativeOne => {
let mut tmp = F::one();
tmp.negate();
tmp
},
Coeff::Full(c) => {
*c
},
}
}
// expect a small number of inputs
for (i, gate) in self.input_gates.iter().enumerate()
{
let q_l = coeff_into_field_element(&gate.q_l);
let q_r = coeff_into_field_element(&gate.q_r);
let q_o = coeff_into_field_element(&gate.q_o);
let q_m = coeff_into_field_element(&gate.q_m);
let q_c = coeff_into_field_element(&gate.q_c);
assert!(q_c.is_zero(), "should not hardcode a constant into the input gate");
let a_value = self.get_value(*gate.a_wire()).expect("must get a variable value");
let b_value = self.get_value(*gate.b_wire()).expect("must get a variable value");
let c_value = self.get_value(*gate.c_wire()).expect("must get a variable value");
let input_value = self.input_assingments[i];
let mut res = input_value;
res.negate();
let mut tmp = q_l;
tmp.mul_assign(&a_value);
res.add_assign(&tmp);
let mut tmp = q_r;
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
let mut tmp = q_o;
tmp.mul_assign(&c_value);
res.add_assign(&tmp);
let mut tmp = q_m;
tmp.mul_assign(&a_value);
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
if !res.is_zero() {
println!("Unsatisfied at input gate {}: {:?}", i+1, gate);
println!("A value = {}, B value = {}, C value = {}", a_value, b_value, c_value);
return false;
}
}
for (i, gate) in self.aux_gates.iter().enumerate()
{
let q_l = coeff_into_field_element(&gate.q_l);
let q_r = coeff_into_field_element(&gate.q_r);
let q_o = coeff_into_field_element(&gate.q_o);
let q_m = coeff_into_field_element(&gate.q_m);
let q_c = coeff_into_field_element(&gate.q_c);
let a_value = self.get_value(*gate.a_wire()).expect("must get a variable value");
let b_value = self.get_value(*gate.b_wire()).expect("must get a variable value");
let c_value = self.get_value(*gate.c_wire()).expect("must get a variable value");
let mut res = q_c;
let mut tmp = q_l;
tmp.mul_assign(&a_value);
res.add_assign(&tmp);
let mut tmp = q_r;
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
let mut tmp = q_o;
tmp.mul_assign(&c_value);
res.add_assign(&tmp);
let mut tmp = q_m;
tmp.mul_assign(&a_value);
tmp.mul_assign(&b_value);
res.add_assign(&tmp);
if !res.is_zero() {
println!("Unsatisfied at aux gate {}", i+1);
println!("Gate {:?}", *gate);
println!("A = {}, B = {}, C = {}", a_value, b_value, c_value);
return false;
}
}
true
}
fn calculate_inverse_vanishing_polynomial_in_a_coset(&self, worker: &Worker, poly_size:usize, vahisning_size: usize) -> Result<Polynomial::<E::Fr, Values>, SynthesisError> {
assert!(poly_size.is_power_of_two());
assert!(vahisning_size.is_power_of_two());
// update from the paper - it should not hold for the last generator, omega^(n) in original notations
// Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64)?;
let n_domain_omega = domain.generator;
let mut root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
root.negate();
let multiplicative_generator = E::Fr::multiplicative_generator();
let mut negative_one = E::Fr::one();
negative_one.negate();
let mut numerator = Polynomial::<E::Fr, Values>::from_values(vec![multiplicative_generator; poly_size])?;
// evaluate X in linear time
numerator.distribute_powers(&worker, numerator.omega);
numerator.add_constant(&worker, &root);
// numerator.add_constant(&worker, &negative_one);
// now it's a series of values in a coset
// now we should evaluate X^(n+1) - 1 in a linear time
let shift = multiplicative_generator.pow([vahisning_size as u64]);
let mut denominator = Polynomial::<E::Fr, Values>::from_values(vec![shift; poly_size])?;
// elements are h^size - 1, (hg)^size - 1, (hg^2)^size - 1, ...
denominator.distribute_powers(&worker, denominator.omega.pow([vahisning_size as u64]));
denominator.add_constant(&worker, &negative_one);
denominator.batch_inversion(&worker)?;
numerator.mul_assign(&worker, &denominator);
Ok(numerator)
}
fn evaluate_inverse_vanishing_poly(&self, vahisning_size: usize, point: E::Fr) -> E::Fr {
assert!(vahisning_size.is_power_of_two());
// update from the paper - it should not hold for the last generator, omega^(n) in original notations
// Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64).expect("should fit");
let n_domain_omega = domain.generator;
let root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
let mut numerator = point;
numerator.sub_assign(&root);
let mut denominator = point.pow([vahisning_size as u64]);
denominator.sub_assign(&E::Fr::one());
let denominator = denominator.inverse().expect("must exist");
numerator.mul_assign(&denominator);
numerator
}
fn calculate_lagrange_poly(&self, worker: &Worker, poly_size:usize, poly_number: usize) -> Result<Polynomial::<E::Fr, Coefficients>, SynthesisError> {
assert!(poly_size.is_power_of_two());
assert!(poly_number < poly_size);
let mut poly = Polynomial::<E::Fr, Values>::from_values(vec![E::Fr::zero(); poly_size])?;
poly.as_mut()[poly_number] = E::Fr::one();
Ok(poly.ifft(&worker))
}
}
// pub struct RedshiftProof<F: PrimeField, I: IopInstance<F>, FRI: FriIop<F, IopType = I>>{
// pub a_opening_value: F,
// pub b_opening_value: F,
// pub c_opening_value: F,
// pub q_l_opening_value: F,
// pub q_r_opening_value: F,
// pub q_o_opening_value: F,
// pub q_m_opening_value: F,
// pub q_c_opening_value: F,
// pub s_id_opening_value: F,
// pub sigma_1_opening_value: F,
// pub sigma_2_opening_value: F,
// pub sigma_3_opening_value: F,
// pub z_1_unshifted_opening_value: F,
// pub z_2_unshifted_opening_value: F,
// pub z_1_shifted_opening_value: F,
// pub z_2_shifted_opening_value: F,
// pub t_low_opening_value: F,
// pub t_mid_opening_value: F,
// pub t_high_opening_value: F,
// pub a_commitment: I::Commitment,
// pub b_commitment: I::Commitment,
// pub c_commitment: I::Commitment,
// pub z_1_commitment: I::Commitment,
// pub z_2_commitment: I::Commitment,
// pub t_low_commitment: I::Commitment,
// pub t_mid_commitment: I::Commitment,
// pub t_high_commitment: I::Commitment,
// pub openings_proof: FRI::Proof,
// }
use crate::plonk::fft::cooley_tukey_ntt::CTPrecomputations;
use crate::pairing::{CurveAffine, CurveProjective};
use crate::pairing::EncodedPoint;
#[derive(Debug)]
pub struct PlonkSetup<E: Engine>{
pub n: usize,
pub q_l: E::G1Affine,
pub q_r: E::G1Affine,
pub q_o: E::G1Affine,
pub q_m: E::G1Affine,
pub q_c: E::G1Affine,
pub s_id: E::G1Affine,
pub sigma_1: E::G1Affine,
pub sigma_2: E::G1Affine,
pub sigma_3: E::G1Affine,
}
// #[derive(Debug)]
pub struct PlonkSetupPrecomputation<E: Engine>{
pub q_l_aux: Polynomial<E::Fr, Values>,
pub q_r_aux: Polynomial<E::Fr, Values>,
pub q_o_aux: Polynomial<E::Fr, Values>,
pub q_m_aux: Polynomial<E::Fr, Values>,
pub q_c_aux: Polynomial<E::Fr, Values>,
pub s_id_aux: Polynomial<E::Fr, Values>,
pub sigma_1_aux: Polynomial<E::Fr, Values>,
pub sigma_2_aux: Polynomial<E::Fr, Values>,
pub sigma_3_aux: Polynomial<E::Fr, Values>,
}
struct OpeningRequest<'a, E: Engine> {
polynomials: Vec<&'a Polynomial<E::Fr, Coefficients>>,
opening_point: E::Fr,
opening_values: Vec<E::Fr>
}
use crate::multiexp::dense_multiexp;
pub(crate) fn field_elements_into_representations<E: Engine>(
worker: &Worker,
scalars: Vec<E::Fr>
) -> Result<Vec<<E::Fr as PrimeField>::Repr>, SynthesisError>
{
let mut representations = vec![<E::Fr as PrimeField>::Repr::default(); scalars.len()];
worker.scope(scalars.len(), |scope, chunk| {
for (scalar, repr) in scalars.chunks(chunk)
.zip(representations.chunks_mut(chunk)) {
scope.spawn(move |_| {
for (scalar, repr) in scalar.iter()
.zip(repr.iter_mut()) {
*repr = scalar.into_repr();
}
});
}
});
Ok(representations)
}
impl<E: Engine> ProvingAssembly<E> {
pub(crate) fn commit_single_poly(
poly: &Polynomial<E::Fr, Coefficients>,
bases: &[E::G1Affine],
worker: &Worker
) -> Result<E::G1Affine, SynthesisError> {
let reprs = field_elements_into_representations::<E>(&worker, poly.as_ref().to_owned())?;
let result = dense_multiexp(&worker, bases, &reprs)?;
Ok(result.into_affine())
}
fn divide_single(
poly: &[E::Fr],
opening_point: E::Fr,
) -> Vec<E::Fr> {
// we are only interested in quotient without a reminder, so we actually don't need opening value
let mut b = opening_point;
b.negate();
let mut q = vec![E::Fr::zero(); poly.len()];
let mut tmp = E::Fr::zero();
let mut found_one = false;
for (q, r) in q.iter_mut().rev().skip(1).zip(poly.iter().rev()) {
if !found_one {
if r.is_zero() {
continue
} else {
found_one = true;
}
}
let mut lead_coeff = *r;
lead_coeff.sub_assign(&tmp);
*q = lead_coeff;
tmp = lead_coeff;
tmp.mul_assign(&b);
}
q
}
fn multiopening<T: Transcript<E::Fr>>
(
opening_request: OpeningRequest<E>,
bases: &[E::G1Affine],
worker: &Worker,
transcript: &mut T
) -> Result<E::G1Affine, SynthesisError> {
let required_size = opening_request.polynomials[0].size();
let mut final_aggregate = Polynomial::from_coeffs(vec![E::Fr::zero(); required_size])?;
let aggregation_challenge = transcript.get_challenge();
let mut alpha = E::Fr::one();
for poly in opening_request.polynomials.iter() {
final_aggregate.add_assign_scaled(&worker, poly, &alpha);
alpha.mul_assign(&aggregation_challenge);
}
let q = Self::divide_single(final_aggregate.as_ref(), opening_request.opening_point);
let q = Polynomial::from_coeffs(q)?;
let opening = Self::commit_single_poly(&q, bases, &worker)?;
Ok(opening)
}
fn prove_with_setup_precomputed<CP: CTPrecomputations<E::Fr>, CPI: CTPrecomputations<E::Fr>, T: Transcript<E::Fr> >(
self,
setup_precomp: &PlonkSetupPrecomputation<E>,
worker: &Worker,
omegas_bitreversed: &CP,
omegas_inv_bitreversed: &CPI,
bases: &[E::G1Affine]
) -> Result<(), SynthesisError> {
assert!(self.is_finalized);
let mut transcript = T::new();
let n = self.input_gates.len() + self.aux_gates.len();
// we need n+1 to be a power of two and can not have n to be power of two
let required_domain_size = n + 1;
assert!(required_domain_size.is_power_of_two());
let (w_l, w_r, w_o) = self.make_wire_assingments();
// these are 2^k - 1 size and explicitly unpadded
let w_l = Polynomial::<E::Fr, Values>::from_values_unpadded(w_l)?;
let w_r = Polynomial::<E::Fr, Values>::from_values_unpadded(w_r)?;
let w_o = Polynomial::<E::Fr, Values>::from_values_unpadded(w_o)?;
let a_poly = w_l.clone_padded_to_domain()?.ifft_using_bitreversed_ntt(&worker, omegas_inv_bitreversed, &E::Fr::one())?;
let b_poly = w_r.clone_padded_to_domain()?.ifft_using_bitreversed_ntt(&worker, omegas_inv_bitreversed, &E::Fr::one())?;
let c_poly = w_o.clone_padded_to_domain()?.ifft_using_bitreversed_ntt(&worker, omegas_inv_bitreversed, &E::Fr::one())?;
// polynomials inside of these is are values in cosets
let a_commitment_data = Self::commit_single_poly(&a_poly, bases, &worker)?;
let b_commitment_data = Self::commit_single_poly(&b_poly, bases, &worker)?;
let c_commitment_data = Self::commit_single_poly(&c_poly, bases, &worker)?;
transcript.commit_bytes(a_commitment_data.into_compressed().as_ref());
transcript.commit_bytes(b_commitment_data.into_compressed().as_ref());
transcript.commit_bytes(c_commitment_data.into_compressed().as_ref());
// TODO: Add public inputs
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
let mut w_l_plus_gamma = w_l.clone();
w_l_plus_gamma.add_constant(&worker, &gamma);
let mut w_r_plus_gamma = w_r.clone();
w_r_plus_gamma.add_constant(&worker, &gamma);
let mut w_o_plus_gamma = w_o.clone();
w_o_plus_gamma.add_constant(&worker, &gamma);
// we take A, B, C values and form (A + beta*i + gamma), etc and calculate their grand product
let z_1 = {
let n = self.input_gates.len() + self.aux_gates.len();
let s_id_1: Vec<_> = (1..=n).collect();
let s_id_1 = convert_to_field_elements(&s_id_1, &worker);
let s_id_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &s_id_1, &beta);
drop(s_id_1);
let s_id_2: Vec<_> = ((n+1)..=(2*n)).collect();
let s_id_2 = convert_to_field_elements(&s_id_2, &worker);
let s_id_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &s_id_2, &beta);
drop(s_id_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let s_id_3: Vec<_> = ((2*n+1)..=(3*n)).collect();
let s_id_3 = convert_to_field_elements(&s_id_3, &worker);
let s_id_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &s_id_3, &beta);
drop(s_id_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
Polynomial::<E::Fr, Values>::from_values(prepadded)?
};
let z_2 = {
let (sigma_1, sigma_2, sigma_3) = self.calculate_permutations_as_in_a_paper();
let sigma_1 = convert_to_field_elements(&sigma_1, &worker);
let sigma_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &sigma_1, &beta);
drop(sigma_1);
let sigma_2 = convert_to_field_elements(&sigma_2, &worker);
let sigma_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &sigma_2, &beta);
drop(sigma_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let sigma_3 = convert_to_field_elements(&sigma_3, &worker);
let sigma_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &sigma_3, &beta);
drop(sigma_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
let z_2 = Polynomial::<E::Fr, Values>::from_values(prepadded)?;
z_2
};
assert!(z_2.as_ref().last().expect("must exist") == z_1.as_ref().last().expect("must exist"));
// interpolate on the main domain
let z_1 = z_1.ifft_using_bitreversed_ntt(&worker, omegas_inv_bitreversed, &E::Fr::one())?;
let z_2 = z_2.ifft_using_bitreversed_ntt(&worker, omegas_inv_bitreversed, &E::Fr::one())?;
// polynomials inside of these is are values in cosets
let z_1_commitment_data = Self::commit_single_poly(&z_1, &bases, &worker)?;
let z_2_commitment_data = Self::commit_single_poly(&z_2, &bases, &worker)?;
transcript.commit_bytes(z_1_commitment_data.into_compressed().as_ref());
transcript.commit_bytes(z_2_commitment_data.into_compressed().as_ref());
let mut z_1_shifted = z_1.clone();
z_1_shifted.distribute_powers(&worker, z_1.omega);
let mut z_2_shifted = z_2.clone();
z_2_shifted.distribute_powers(&worker, z_2.omega);
let a_coset_lde_bitreversed = a_poly.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
let b_coset_lde_bitreversed = b_poly.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
let c_coset_lde_bitreversed = c_poly.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
let q_l_coset_lde_bitreversed = setup_precomp.q_l_aux.clone();
let q_r_coset_lde_bitreversed = setup_precomp.q_r_aux.clone();
let q_o_coset_lde_bitreversed = setup_precomp.q_o_aux.clone();
let q_m_coset_lde_bitreversed = setup_precomp.q_m_aux.clone();
let q_c_coset_lde_bitreversed = setup_precomp.q_c_aux.clone();
let s_id_coset_lde_bitreversed = setup_precomp.s_id_aux.clone();
let sigma_1_coset_lde_bitreversed = setup_precomp.sigma_1_aux.clone();
let sigma_2_coset_lde_bitreversed = setup_precomp.sigma_2_aux.clone();
let sigma_3_coset_lde_bitreversed = setup_precomp.sigma_3_aux.clone();
let (q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3) = self.output_setup_polynomials(&worker)?;
// we do not commit those cause those are known already
let n_fe = E::Fr::from_str(&n.to_string()).expect("must be valid field element");
let mut two_n_fe = n_fe;
two_n_fe.double();
let alpha = transcript.get_challenge();
// TODO: may be speedup this one too
let mut vanishing_poly_inverse_bitreversed = self.calculate_inverse_vanishing_polynomial_in_a_coset(&worker, q_l_coset_lde_bitreversed.size(), required_domain_size.next_power_of_two())?;
vanishing_poly_inverse_bitreversed.bitreverse_enumeration(&worker);
let mut t_1 = {
let mut t_1 = q_c_coset_lde_bitreversed;
let mut q_l_by_a = q_l_coset_lde_bitreversed;
q_l_by_a.mul_assign(&worker, &a_coset_lde_bitreversed);
t_1.add_assign(&worker, &q_l_by_a);
drop(q_l_by_a);
let mut q_r_by_b = q_r_coset_lde_bitreversed;
q_r_by_b.mul_assign(&worker, &b_coset_lde_bitreversed);
t_1.add_assign(&worker, &q_r_by_b);
drop(q_r_by_b);
let mut q_o_by_c = q_o_coset_lde_bitreversed;
q_o_by_c.mul_assign(&worker, &c_coset_lde_bitreversed);
t_1.add_assign(&worker, &q_o_by_c);
drop(q_o_by_c);
let mut q_m_by_ab = q_m_coset_lde_bitreversed;
q_m_by_ab.mul_assign(&worker, &a_coset_lde_bitreversed);
q_m_by_ab.mul_assign(&worker, &b_coset_lde_bitreversed);
t_1.add_assign(&worker, &q_m_by_ab);
drop(q_m_by_ab);
vanishing_poly_inverse_bitreversed.scale(&worker, alpha);
t_1.mul_assign(&worker, &vanishing_poly_inverse_bitreversed);
t_1
};
fn get_degree<F: PrimeField>(poly: &Polynomial<F, Coefficients>) -> usize {
let mut degree = poly.as_ref().len() - 1;
for c in poly.as_ref().iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break;
}
}
println!("Degree = {}", degree);
degree
}
let z_1_coset_lde_bitreversed = z_1.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
assert!(z_1_coset_lde_bitreversed.size() == required_domain_size*4);
let z_1_shifted_coset_lde_bitreversed = z_1_shifted.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
assert!(z_1_shifted_coset_lde_bitreversed.size() == required_domain_size*4);
let z_2_coset_lde_bitreversed = z_2.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
assert!(z_2_coset_lde_bitreversed.size() == required_domain_size*4);
let z_2_shifted_coset_lde_bitreversed = z_2_shifted.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
assert!(z_2_shifted_coset_lde_bitreversed.size() == required_domain_size*4);
// (A + beta*i + gamma)(B + beta(n+i) + gamma)(C + beta(2n+i) + gamma)*Z(k) = Z(k+1)
{
// TODO: May be optimize number of additions
let mut contrib_z_1 = z_1_coset_lde_bitreversed.clone();
let mut s_id_by_beta = s_id_coset_lde_bitreversed;
s_id_by_beta.scale(&worker, beta);
let mut n_by_beta = n_fe;
n_by_beta.mul_assign(&beta);
let mut a_perm = s_id_by_beta.clone();
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_coset_lde_bitreversed);
contrib_z_1.mul_assign(&worker, &a_perm);
drop(a_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut b_perm = s_id_by_beta.clone();
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_coset_lde_bitreversed);
contrib_z_1.mul_assign(&worker, &b_perm);
drop(b_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut c_perm = s_id_by_beta;
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_coset_lde_bitreversed);
contrib_z_1.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_1.sub_assign(&worker, &z_1_shifted_coset_lde_bitreversed);
vanishing_poly_inverse_bitreversed.scale(&worker, alpha);
contrib_z_1.mul_assign(&worker, &vanishing_poly_inverse_bitreversed);
t_1.add_assign(&worker, &contrib_z_1);
}
{
// TODO: May be optimize number of additions
let mut contrib_z_2 = z_2_coset_lde_bitreversed.clone();
let mut a_perm = sigma_1_coset_lde_bitreversed;
a_perm.scale(&worker, beta);
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_coset_lde_bitreversed);
contrib_z_2.mul_assign(&worker, &a_perm);
drop(a_perm);
let mut b_perm = sigma_2_coset_lde_bitreversed;
b_perm.scale(&worker, beta);
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_coset_lde_bitreversed);
contrib_z_2.mul_assign(&worker, &b_perm);
drop(b_perm);
let mut c_perm = sigma_3_coset_lde_bitreversed;
c_perm.scale(&worker, beta);
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_coset_lde_bitreversed);
contrib_z_2.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_2.sub_assign(&worker, &z_2_shifted_coset_lde_bitreversed);
vanishing_poly_inverse_bitreversed.scale(&worker, alpha);
contrib_z_2.mul_assign(&worker, &vanishing_poly_inverse_bitreversed);
t_1.add_assign(&worker, &contrib_z_2);
}
drop(a_coset_lde_bitreversed);
drop(b_coset_lde_bitreversed);
drop(c_coset_lde_bitreversed);
let l_0 = self.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), 0)?;
let l_n_minus_one = self.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), n-1)?;
{
let mut z_1_minus_z_2_shifted = z_1_shifted_coset_lde_bitreversed.clone();
z_1_minus_z_2_shifted.sub_assign(&worker, &z_2_shifted_coset_lde_bitreversed);
let l_coset_lde_bitreversed = l_n_minus_one.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
z_1_minus_z_2_shifted.mul_assign(&worker, &l_coset_lde_bitreversed);
drop(l_coset_lde_bitreversed);
vanishing_poly_inverse_bitreversed.scale(&worker, alpha);
z_1_minus_z_2_shifted.mul_assign(&worker, &vanishing_poly_inverse_bitreversed);
t_1.add_assign(&worker, &z_1_minus_z_2_shifted);
}
{
let mut z_1_minus_z_2 = z_1_coset_lde_bitreversed.clone();
z_1_minus_z_2.sub_assign(&worker, &z_2_coset_lde_bitreversed);
let l_coset_lde_bitreversed = l_0.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
4,
omegas_bitreversed,
&E::Fr::multiplicative_generator()
)?;
z_1_minus_z_2.mul_assign(&worker, &l_coset_lde_bitreversed);
drop(l_coset_lde_bitreversed);
vanishing_poly_inverse_bitreversed.scale(&worker, alpha);
z_1_minus_z_2.mul_assign(&worker, &vanishing_poly_inverse_bitreversed);
t_1.add_assign(&worker, &z_1_minus_z_2);
}
drop(z_1_coset_lde_bitreversed);
drop(z_2_coset_lde_bitreversed);
drop(z_1_shifted_coset_lde_bitreversed);
drop(z_2_shifted_coset_lde_bitreversed);
t_1.bitreverse_enumeration(&worker);
let t_poly = t_1.icoset_fft_for_generator(&worker, &E::Fr::multiplicative_generator());
debug_assert!(get_degree::<E::Fr>(&t_poly) <= 3*n);
let mut t_poly_parts = t_poly.break_into_multiples(required_domain_size)?;
t_poly_parts.pop().expect("last part is irrelevant");
let t_poly_high = t_poly_parts.pop().expect("high exists");
let t_poly_mid = t_poly_parts.pop().expect("mid exists");
let t_poly_low = t_poly_parts.pop().expect("low exists");
let t_poly_high_commitment_data = Self::commit_single_poly(&t_poly_high, &bases, &worker)?;
let t_poly_mid_commitment_data = Self::commit_single_poly(&t_poly_mid, &bases, &worker)?;
let t_poly_low_commitment_data = Self::commit_single_poly(&t_poly_low, &bases, &worker)?;
transcript.commit_bytes(t_poly_low_commitment_data.into_compressed().as_ref());
transcript.commit_bytes(t_poly_mid_commitment_data.into_compressed().as_ref());
transcript.commit_bytes(t_poly_high_commitment_data.into_compressed().as_ref());
let z = transcript.get_challenge();
let a_at_z = a_poly.evaluate_at(&worker, z);
let b_at_z = b_poly.evaluate_at(&worker, z);
let c_at_z = c_poly.evaluate_at(&worker, z);
let q_l_at_z = q_l.evaluate_at(&worker, z);
let q_r_at_z = q_r.evaluate_at(&worker, z);
let q_o_at_z = q_o.evaluate_at(&worker, z);
let q_m_at_z = q_m.evaluate_at(&worker, z);
let q_c_at_z = q_c.evaluate_at(&worker, z);
let s_id_at_z = s_id.evaluate_at(&worker, z);
let sigma_1_at_z = sigma_1.evaluate_at(&worker, z);
let sigma_2_at_z = sigma_2.evaluate_at(&worker, z);
let sigma_3_at_z = sigma_3.evaluate_at(&worker, z);
let mut inverse_vanishing_at_z = self.evaluate_inverse_vanishing_poly(required_domain_size.next_power_of_two(), z);
let z_1_at_z = z_1.evaluate_at(&worker, z);
let z_2_at_z = z_2.evaluate_at(&worker, z);
let z_1_shifted_at_z = z_1_shifted.evaluate_at(&worker, z);
let z_2_shifted_at_z = z_2_shifted.evaluate_at(&worker, z);
let t_low_at_z = t_poly_low.evaluate_at(&worker, z);
let t_mid_at_z = t_poly_mid.evaluate_at(&worker, z);
let t_high_at_z = t_poly_high.evaluate_at(&worker, z);
let l_0_at_z = l_0.evaluate_at(&worker, z);
let l_n_minus_one_at_z = l_n_minus_one.evaluate_at(&worker, z);
{
transcript.commit_field_element(&a_at_z);
transcript.commit_field_element(&b_at_z);
transcript.commit_field_element(&c_at_z);
transcript.commit_field_element(&q_l_at_z);
transcript.commit_field_element(&q_r_at_z);
transcript.commit_field_element(&q_o_at_z);
transcript.commit_field_element(&q_m_at_z);
transcript.commit_field_element(&q_c_at_z);
transcript.commit_field_element(&s_id_at_z);
transcript.commit_field_element(&sigma_1_at_z);
transcript.commit_field_element(&sigma_2_at_z);
transcript.commit_field_element(&sigma_3_at_z);
transcript.commit_field_element(&t_low_at_z);
transcript.commit_field_element(&t_mid_at_z);
transcript.commit_field_element(&t_high_at_z);
transcript.commit_field_element(&z_1_at_z);
transcript.commit_field_element(&z_2_at_z);
transcript.commit_field_element(&z_1_shifted_at_z);
transcript.commit_field_element(&z_2_shifted_at_z);
}
// let aggregation_challenge = transcript.get_challenge();
let z_in_pow_of_domain_size = z.pow([required_domain_size as u64]);
// this is a sanity check
{
let mut t_1 = {
let mut res = q_c_at_z;
let mut tmp = q_l_at_z;
tmp.mul_assign(&a_at_z);
res.add_assign(&tmp);
let mut tmp = q_r_at_z;
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
let mut tmp = q_o_at_z;
tmp.mul_assign(&c_at_z);
res.add_assign(&tmp);
let mut tmp = q_m_at_z;
tmp.mul_assign(&a_at_z);
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
res
};
{
let mut res = z_1_at_z;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_1_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_2_at_z;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_2_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_shifted_at_z;
res.sub_assign(&z_2_shifted_at_z);
res.mul_assign(&l_n_minus_one_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_at_z;
res.sub_assign(&z_2_at_z);
res.mul_assign(&l_0_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
let mut t_at_z = E::Fr::zero();
t_at_z.add_assign(&t_low_at_z);
let mut tmp = z_in_pow_of_domain_size;
tmp.mul_assign(&t_mid_at_z);
t_at_z.add_assign(&tmp);
let mut tmp = z_in_pow_of_domain_size;
tmp.mul_assign(&z_in_pow_of_domain_size);
tmp.mul_assign(&t_high_at_z);
t_at_z.add_assign(&tmp);
if t_at_z != t_1 {
println!("Sanity check failed, may be due to public inputs ignored");
}
// assert_eq!(t_at_z, t_1, "sanity check failed");
}
// Compute linearization polynomial
let mut linearization_poly = q_m.clone();
let mut linearization_multiplier = alpha;
{
let mut tmp = q_l_at_z;
tmp.mul_assign(&q_r_at_z);
linearization_poly.scale(&worker, tmp);
linearization_poly.add_assign_scaled(&worker, &q_l, &q_l_at_z);
linearization_poly.add_assign_scaled(&worker, &q_r, &q_r_at_z);
linearization_poly.add_assign_scaled(&worker, &q_o, &q_o_at_z);
linearization_poly.add_assign(&worker, &q_c);
linearization_poly.scale(&worker, linearization_multiplier);
}
linearization_multiplier.mul_assign(&alpha);
{
let mut factor = linearization_multiplier;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&a_at_z);
factor.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&b_at_z);
factor.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&c_at_z);
factor.mul_assign(&tmp);
linearization_poly.add_assign_scaled(&worker, &z_1, &tmp);
}
linearization_multiplier.mul_assign(&alpha);
{
let mut factor = linearization_multiplier;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&a_at_z);
factor.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&b_at_z);
factor.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&c_at_z);
factor.mul_assign(&tmp);
linearization_poly.add_assign_scaled(&worker, &z_2, &tmp);
}
linearization_multiplier.mul_assign(&alpha);
linearization_multiplier.mul_assign(&alpha);
{
let mut tmp = z_1.clone();
tmp.sub_assign(&worker, &z_2);
linearization_poly.add_assign_scaled(&worker, &tmp, &linearization_multiplier);
}
let linearization_poly_at_z = linearization_poly.evaluate_at(&worker, z);
transcript.commit_field_element(&linearization_poly_at_z);
let mut z_by_omega = z;
z_by_omega.mul_assign(&z_1.omega);
let request_at_z = OpeningRequest {
polynomials: vec![
&a_poly,
&b_poly,
&c_poly,
&z_1,
&z_2,
&s_id,
&sigma_1,
&sigma_2,
&sigma_3,
&t_poly_low,
&t_poly_mid,
&t_poly_high
],
opening_point: z,
opening_values: vec![
a_at_z,
b_at_z,
c_at_z,
z_1_at_z,
z_2_at_z,
s_id_at_z,
sigma_1_at_z,
sigma_2_at_z,
sigma_3_at_z,
t_low_at_z,
t_mid_at_z,
t_high_at_z,
]
};
let request_at_z_omega = OpeningRequest {
polynomials: vec![
&z_1,
&z_2
],
opening_point: z_by_omega,
opening_values: vec![
z_1_shifted_at_z,
z_2_shifted_at_z,
]
};
let _ = Self::multiopening(request_at_z, &bases, &worker, &mut transcript);
let _ = Self::multiopening(request_at_z_omega, &bases, &worker, &mut transcript);
Ok(())
// let proof = PlonkChunkedNonhomomorphicProof::<E, S> {
// a_opening_value: a_at_z,
// b_opening_value: b_at_z,
// c_opening_value: c_at_z,
// q_l_opening_value: q_l_at_z,
// q_r_opening_value: q_r_at_z,
// q_o_opening_value: q_o_at_z,
// q_m_opening_value: q_m_at_z,
// q_c_opening_value: q_c_at_z,
// s_id_opening_value: s_id_at_z,
// sigma_1_opening_value: sigma_1_at_z,
// sigma_2_opening_value: sigma_2_at_z,
// sigma_3_opening_value: sigma_3_at_z,
// z_1_unshifted_opening_value: z_1_at_z,
// z_2_unshifted_opening_value: z_2_at_z,
// z_1_shifted_opening_value: z_1_shifted_at_z,
// z_2_shifted_opening_value: z_2_shifted_at_z,
// t_low_opening_value: t_low_at_z,
// t_mid_opening_value: t_mid_at_z,
// t_high_opening_value: t_high_at_z,
// a_commitment: a_commitment,
// b_commitment: b_commitment,
// c_commitment: c_commitment,
// z_1_commitment: z_1_commitment,
// z_2_commitment: z_2_commitment,
// t_low_commitment: t_low_commitment,
// t_mid_commitment: t_mid_commitment,
// t_high_commitment: t_high_commitment,
// openings_proof: multiopen_proof,
// };
// Ok(proof)
}
}
#[cfg(test)]
mod test {
use crate::plonk::cs::*;
use crate::pairing::Engine;
use crate::SynthesisError;
use super::*;
use super::super::generator::*;
use crate::ff::{Field, PrimeField};
#[derive(Clone)]
struct BenchmarkCircuit<E: Engine>{
num_steps: usize,
_marker: std::marker::PhantomData<E>
}
impl<E: Engine> Circuit<E> for BenchmarkCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// yeah, fibonacci...
let one = E::Fr::one();
let mut negative_one = one;
negative_one.negate();
let mut two = one;
two.double();
let mut a = cs.alloc(|| {
Ok(E::Fr::one())
})?;
let mut b = cs.alloc(|| {
Ok(E::Fr::one())
})?;
cs.enforce_zero_2((a, b), (one, negative_one))?;
// cs.enforce_zero_2((b, CS::ONE), (one, negative_one))?;
let mut c = cs.alloc(|| {
Ok(two)
})?;
cs.enforce_zero_3((a, b, c), (one, one, negative_one))?;
let mut a_value = one;
let mut b_value = one;
let mut c_value = two;
for _ in 0..self.num_steps {
a = b;
b = c;
a_value = b_value;
b_value = c_value;
c_value.add_assign(&a_value);
c = cs.alloc(|| {
Ok(c_value)
})?;
cs.enforce_zero_3((a, b, c), (one, one, negative_one))?;
}
Ok(())
}
}
#[test]
fn test_bench_plonk_bls12() {
use crate::pairing::Engine;
use crate::pairing::{CurveProjective, CurveAffine};
use crate::pairing::bls12_381::{Bls12, Fr};
use crate::plonk::utils::*;
use crate::worker::Worker;
// use crate::plonk::tester::*;
type Transcr = Blake2sTranscript<Fr>;
type Eng = Bls12;
use std::time::Instant;
use crate::plonk::fft::cooley_tukey_ntt::*;
use crate::plonk::commitments::transparent::fri::coset_combining_fri::precomputation::*;
let sizes: Vec<usize> = vec![(1 << 18) - 10, (1 << 19) - 10, (1 << 20) - 10, (1 << 21) - 10, (1 << 22) - 10, (1 << 23) - 10];
let max_size = *sizes.last().unwrap();
let worker = Worker::new();
println!("Making bases");
let bases = {
use crate::pairing::Wnaf;
let tau = Fr::from_str("42").unwrap();
let powers_of_tau = vec![Fr::one(); max_size.next_power_of_two()];
let mut powers_of_tau = Polynomial::<Fr, _>::from_coeffs(powers_of_tau).unwrap();
powers_of_tau.distribute_powers(&worker, tau);
let powers_of_tau = powers_of_tau.into_coeffs();
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1 = <Eng as Engine>::G1::one();
let g1_wnaf = g1_wnaf.base(g1, max_size.next_power_of_two());
let mut bases = vec![g1; max_size.next_power_of_two()];
// Compute the H query with multiple threads
worker.scope(bases.len(), |scope, chunk| {
for (h, p) in bases.chunks_mut(chunk).zip(powers_of_tau.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (h, p) in h.iter_mut().zip(p.iter())
{
// Exponentiate
*h = g1_wnaf.scalar(p.into_repr());
}
// Batch normalize
<<Eng as Engine>::G1 as CurveProjective>::batch_normalization(h);
});
}
});
bases.iter().map(|el| el.into_affine()).collect::<Vec<_>>()
};
println!("Done making bases");
for size in sizes.into_iter() {
let circuit = BenchmarkCircuit::<Eng> {
// num_steps: 1_000_000,
num_steps: size,
_marker: std::marker::PhantomData
};
let omegas_bitreversed = BitReversedOmegas::<Fr>::new_for_domain_size(size.next_power_of_two());
let omegas_inv_bitreversed = <OmegasInvBitreversed::<Fr> as CTPrecomputations::<Fr>>::new_for_domain_size(size.next_power_of_two());
println!("Start setup and precomputations");
let (_, setup_precomp) = setup_with_precomputations::<Eng, _, _>(
&circuit,
&omegas_bitreversed,
&bases[0..size.next_power_of_two()]
).unwrap();
let mut prover = ProvingAssembly::<Eng>::new();
circuit.synthesize(&mut prover).unwrap();
prover.finalize();
println!("End setup and precomputations");
println!("Start proving");
let start = Instant::now();
let _ = prover.prove_with_setup_precomputed::<_, _, Transcr>(
&setup_precomp,
&worker,
&omegas_bitreversed,
&omegas_inv_bitreversed,
&bases[0..size.next_power_of_two()]
).unwrap();
println!("Proving taken {:?} for size {}", start.elapsed(), size);
}
// {
// let mut tester = TestingAssembly::<Transparent252>::new();
// circuit.synthesize(&mut tester).expect("must synthesize");
// let satisfied = tester.is_satisfied();
// assert!(satisfied);
// println!("Circuit is satisfied");
// }
// println!("Start setup");
// let start = Instant::now();
// let (setup, aux) = setup::<Transparent252, Committer, _>(&circuit, meta.clone()).unwrap();
// println!("Setup taken {:?}", start.elapsed());
// println!("Using circuit with N = {}", setup.n);
// println!("Start proving");
// let start = Instant::now();
// let proof = prove_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &aux, meta.clone()).unwrap();
// println!("Proof taken {:?}", start.elapsed());
// println!("Start verifying");
// let start = Instant::now();
// let valid = verify_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta).unwrap();
// println!("Verification with unnecessary precomputation taken {:?}", start.elapsed());
// assert!(valid);
}
#[test]
fn test_bench_plonk_bn254() {
use crate::pairing::Engine;
use crate::pairing::{CurveProjective, CurveAffine};
use crate::pairing::bn256::{Bn256, Fr};
use crate::plonk::utils::*;
use crate::worker::Worker;
// use crate::plonk::tester::*;
type Transcr = Blake2sTranscript<Fr>;
type Eng = Bn256;
use std::time::Instant;
use crate::plonk::fft::cooley_tukey_ntt::*;
use crate::plonk::commitments::transparent::fri::coset_combining_fri::precomputation::*;
let sizes: Vec<usize> = vec![(1 << 18) - 10, (1 << 19) - 10, (1 << 20) - 10, (1 << 21) - 10, (1 << 22) - 10, (1 << 23) - 10];
let max_size = *sizes.last().unwrap();
let worker = Worker::new();
println!("Making bases");
let bases = {
use crate::pairing::Wnaf;
let tau = Fr::from_str("42").unwrap();
let powers_of_tau = vec![Fr::one(); max_size.next_power_of_two()];
let mut powers_of_tau = Polynomial::<Fr, _>::from_coeffs(powers_of_tau).unwrap();
powers_of_tau.distribute_powers(&worker, tau);
let powers_of_tau = powers_of_tau.into_coeffs();
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1 = <Eng as Engine>::G1::one();
let g1_wnaf = g1_wnaf.base(g1, max_size.next_power_of_two());
let mut bases = vec![g1; max_size.next_power_of_two()];
// Compute the H query with multiple threads
worker.scope(bases.len(), |scope, chunk| {
for (h, p) in bases.chunks_mut(chunk).zip(powers_of_tau.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (h, p) in h.iter_mut().zip(p.iter())
{
// Exponentiate
*h = g1_wnaf.scalar(p.into_repr());
}
// Batch normalize
<<Eng as Engine>::G1 as CurveProjective>::batch_normalization(h);
});
}
});
bases.iter().map(|el| el.into_affine()).collect::<Vec<_>>()
};
println!("Done making bases");
for size in sizes.into_iter() {
println!("Working for size {}", size);
let circuit = BenchmarkCircuit::<Eng> {
// num_steps: 1_000_000,
num_steps: size,
_marker: std::marker::PhantomData
};
let omegas_bitreversed = BitReversedOmegas::<Fr>::new_for_domain_size(size.next_power_of_two());
let omegas_inv_bitreversed = <OmegasInvBitreversed::<Fr> as CTPrecomputations::<Fr>>::new_for_domain_size(size.next_power_of_two());
println!("Start setup and precomputations");
let (_, setup_precomp) = setup_with_precomputations::<Eng, _, _>(
&circuit,
&omegas_bitreversed,
&bases[0..size.next_power_of_two()]
).unwrap();
let mut prover = ProvingAssembly::<Eng>::new();
circuit.synthesize(&mut prover).unwrap();
prover.finalize();
println!("End setup and precomputations");
println!("Start proving");
let start = Instant::now();
let _ = prover.prove_with_setup_precomputed::<_, _, Transcr>(
&setup_precomp,
&worker,
&omegas_bitreversed,
&omegas_inv_bitreversed,
&bases[0..size.next_power_of_two()]
).unwrap();
println!("Proving taken {:?} for size {}", start.elapsed(), size);
}
// {
// let mut tester = TestingAssembly::<Transparent252>::new();
// circuit.synthesize(&mut tester).expect("must synthesize");
// let satisfied = tester.is_satisfied();
// assert!(satisfied);
// println!("Circuit is satisfied");
// }
// println!("Start setup");
// let start = Instant::now();
// let (setup, aux) = setup::<Transparent252, Committer, _>(&circuit, meta.clone()).unwrap();
// println!("Setup taken {:?}", start.elapsed());
// println!("Using circuit with N = {}", setup.n);
// println!("Start proving");
// let start = Instant::now();
// let proof = prove_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>, _>(&circuit, &aux, meta.clone()).unwrap();
// println!("Proof taken {:?}", start.elapsed());
// println!("Start verifying");
// let start = Instant::now();
// let valid = verify_nonhomomorphic_chunked::<Transparent252, Committer, Blake2sTranscript::<Fr>>(&setup, &proof, meta).unwrap();
// println!("Verification with unnecessary precomputation taken {:?}", start.elapsed());
// assert!(valid);
}
}<file_sep>/src/sonic/unhelped/permutation_structure.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use crate::sonic::helped::{Proof, SxyAdvice};
use crate::sonic::helped::batch::Batch;
use crate::sonic::helped::poly::{SxEval, SyEval};
use crate::sonic::helped::Parameters;
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver, ConstraintSystem};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
use crate::sonic::sonic::Preprocess;
use crate::sonic::sonic::M;
use crate::sonic::sonic::PermutationSynthesizer;
use super::s2_proof::*;
use super::permutation_argument::*;
#[derive(Clone)]
pub struct PermutationStructure<E: Engine> {
pub n: usize,
pub q: usize,
pub a: Vec<[Option<(Coeff<E>, usize)>; M]>,
pub b: Vec<[Option<(Coeff<E>, usize)>; M]>,
pub c: Vec<[Option<(Coeff<E>, usize)>; M]>,
}
pub fn create_permutation_structure<E: Engine, C: Circuit<E>>(
circuit: &C,
) -> PermutationStructure<E>
{
let mut backend: Preprocess<E> = Preprocess::new();
let (a, b, c) = {
let mut cs: PermutationSynthesizer<E, &'_ mut Preprocess<E>> = PermutationSynthesizer::new(&mut backend);
let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <PermutationSynthesizer<E, &'_ mut Preprocess<E>> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut cs).expect("should synthesize");
(cs.a, cs.b, cs.c)
};
let n = backend.n;
let q = backend.q;
// println!("Will have {} gates and {} linear constraints", n, q);
PermutationStructure::<E> {
n: n,
q: q,
a: a,
b: b,
c: c
}
}
use rand::{Rng, Rand};
impl<E: Engine> PermutationStructure<E> {
pub fn calculate_s2_commitment_value(&self, srs: &SRS<E>) -> E::G1Affine {
S2Eval::calculate_commitment_element(self.n, srs)
}
pub fn calculate_s2_proof(&self, x: E::Fr, y: E::Fr, srs: &SRS<E>) -> S2Proof<E> {
let s2_eval = S2Eval::new(self.n);
s2_eval.evaluate(x, y, &srs)
}
pub fn create_inverse_permutation_vectors(&self) -> (Vec<Vec<E::Fr>>, Vec<Vec<usize>>) {
// we have to form non-permuted coefficients, as well as permutation structures;
let n = self.n;
let mut non_permuted_coeffs = vec![vec![E::Fr::zero(); 3*n+1]; M];
let mut permutations = vec![vec![0usize; 3*n+1]; M];
let one = E::Fr::one();
let mut minus_one = E::Fr::one();
minus_one.negate();
let mut not_empty = [false; M];
// go other the permutations
for (gate_index, info) in self.a.iter().enumerate() {
let offset = n-1;
for i in 0..M {
// coefficients of A are placed at the offset = 0 from the beginning of the vector
if let Some((coeff, place)) = info[i].as_ref() {
// place it
assert!(*place != 0);
let array_position = offset - gate_index; // special for A
let place_coeff_into = &mut non_permuted_coeffs[i];
let place_permutation_into = &mut permutations[i];
match coeff {
Coeff::Zero => {
},
Coeff::One => {
not_empty[i] = true;
place_coeff_into[array_position] = one;
place_permutation_into[array_position] = *place;
},
Coeff::NegativeOne => {
not_empty[i] = true;
place_coeff_into[array_position] = minus_one;
place_permutation_into[array_position] = *place;
},
Coeff::Full(value) => {
not_empty[i] = true;
place_coeff_into[array_position] = *value;
place_permutation_into[array_position] = *place;
}
}
}
}
}
for (gate_index, info) in self.b.iter().enumerate() {
let offset = n + 1;
for i in 0..M {
if let Some((coeff, place)) = info[i].as_ref() {
// place it
assert!(*place != 0);
let array_position = offset + gate_index;
let place_coeff_into = &mut non_permuted_coeffs[i];
let place_permutation_into = &mut permutations[i];
match coeff {
Coeff::Zero => {
},
Coeff::One => {
not_empty[i] = true;
place_coeff_into[array_position] = one;
place_permutation_into[array_position] = *place;
},
Coeff::NegativeOne => {
not_empty[i] = true;
place_coeff_into[array_position] = minus_one;
place_permutation_into[array_position] = *place;
},
Coeff::Full(value) => {
not_empty[i] = true;
place_coeff_into[array_position] = *value;
place_permutation_into[array_position] = *place;
}
}
}
}
}
for (gate_index, info) in self.c.iter().enumerate() {
let offset = 2*n + 1;
for i in 0..M {
// coefficients of A are placed at the offset = 0 from the beginning of the vector
if let Some((coeff, place)) = info[i].as_ref() {
// place it
assert!(*place != 0);
let array_position = offset + gate_index;
let place_coeff_into = &mut non_permuted_coeffs[i];
let place_permutation_into = &mut permutations[i];
match coeff {
Coeff::Zero => {
},
Coeff::One => {
not_empty[i] = true;
place_coeff_into[array_position] = one;
place_permutation_into[array_position] = *place;
},
Coeff::NegativeOne => {
not_empty[i] = true;
place_coeff_into[array_position] = minus_one;
place_permutation_into[array_position] = *place;
},
Coeff::Full(value) => {
not_empty[i] = true;
place_coeff_into[array_position] = *value;
place_permutation_into[array_position] = *place;
}
}
}
}
}
Self::print_constraints(n, self.q, &non_permuted_coeffs, &permutations);
// need to fill arrays with non-zero indexes just to have full permutation, even while it's just zero coefficient
// TODO: fix
let mut m = M;
for i in (0..M).into_iter().rev() {
// these are no constant terms
assert!(non_permuted_coeffs[i][n].is_zero());
assert!(permutations[i][n] == 0);
}
for i in (0..M).into_iter().rev() {
if !not_empty[i] {
non_permuted_coeffs.pop();
permutations.pop();
m -= 1;
}
}
assert!(m != 0);
// find something faster, although it's still linear
for i in 0..m {
let mut fillers: Vec<usize> = (1..=(3*n+1)).map(|el| el).collect();
for (p, c) in permutations[i].iter_mut().zip(non_permuted_coeffs[i].iter()) {
if *p == 0 {
assert!(c.is_zero());
} else {
fillers[*p - 1] = 0;
}
}
let mut fill_from = 0;
for p in permutations[i].iter_mut() {
if *p == 0 {
loop {
if fillers[fill_from] != 0 {
*p = fillers[fill_from];
fill_from += 1;
break;
} else {
fill_from += 1;
}
}
}
}
}
(non_permuted_coeffs, permutations)
}
pub fn create_permutation_vectors(&self) -> (Vec<Vec<E::Fr>>, Vec<Vec<usize>>) {
// we have to form non-permuted coefficients, as well as permutation structures;
let n = self.n;
let mut non_permuted_coeffs = vec![vec![E::Fr::zero(); 3*n+1]; M];
let mut permutations = vec![vec![0usize; 3*n+1]; M];
let one = E::Fr::one();
let mut minus_one = E::Fr::one();
minus_one.negate();
let mut not_empty = [false; M];
// go other the permutations
for (gate_index, info) in self.a.iter().enumerate() {
let offset = n-1;
for i in 0..M {
// coefficients of A are placed at the offset = 0 from the beginning of the vector
if let Some((coeff, place)) = info[i].as_ref() {
// place it
assert!(*place != 0);
let array_position = offset - gate_index; // special for A
let coeff_position = *place - 1;
let place_coeff_into = &mut non_permuted_coeffs[i];
let place_permutation_into = &mut permutations[i];
match coeff {
Coeff::Zero => {
},
Coeff::One => {
not_empty[i] = true;
place_coeff_into[coeff_position] = one;
place_permutation_into[array_position] = *place;
},
Coeff::NegativeOne => {
not_empty[i] = true;
place_coeff_into[coeff_position] = minus_one;
place_permutation_into[array_position] = *place;
},
Coeff::Full(value) => {
not_empty[i] = true;
place_coeff_into[coeff_position] = *value;
place_permutation_into[array_position] = *place;
}
}
}
}
}
for (gate_index, info) in self.b.iter().enumerate() {
let offset = n + 1;
for i in 0..M {
if let Some((coeff, place)) = info[i].as_ref() {
// place it
assert!(*place != 0);
let array_position = offset + gate_index;
let coeff_position = *place - 1;
let place_coeff_into = &mut non_permuted_coeffs[i];
let place_permutation_into = &mut permutations[i];
match coeff {
Coeff::Zero => {
},
Coeff::One => {
not_empty[i] = true;
place_coeff_into[coeff_position] = one;
place_permutation_into[array_position] = *place;
},
Coeff::NegativeOne => {
not_empty[i] = true;
place_coeff_into[coeff_position] = minus_one;
place_permutation_into[array_position] = *place;
},
Coeff::Full(value) => {
not_empty[i] = true;
place_coeff_into[coeff_position] = *value;
place_permutation_into[array_position] = *place;
}
}
}
}
}
for (gate_index, info) in self.c.iter().enumerate() {
let offset = 2*n + 1;
for i in 0..M {
// coefficients of A are placed at the offset = 0 from the beginning of the vector
if let Some((coeff, place)) = info[i].as_ref() {
// place it
assert!(*place != 0);
let array_position = offset + gate_index;
let coeff_position = *place - 1;
let place_coeff_into = &mut non_permuted_coeffs[i];
let place_permutation_into = &mut permutations[i];
match coeff {
Coeff::Zero => {
},
Coeff::One => {
not_empty[i] = true;
place_coeff_into[coeff_position] = one;
place_permutation_into[array_position] = *place;
},
Coeff::NegativeOne => {
not_empty[i] = true;
place_coeff_into[coeff_position] = minus_one;
place_permutation_into[array_position] = *place;
},
Coeff::Full(value) => {
not_empty[i] = true;
place_coeff_into[coeff_position] = *value;
place_permutation_into[array_position] = *place;
}
}
}
}
}
// Self::print_constraints(n, self.q, &non_permuted_coeffs, &permutations);
// need to fill arrays with non-zero indexes just to have full permutation, even while it's just zero coefficient
// TODO: fix
let mut m = M;
// for i in (0..M).into_iter().rev() {
// // these are no constant terms
// assert!(non_permuted_coeffs[i][n].is_zero());
// assert!(permutations[i][n] == 0);
// }
for i in (0..M).into_iter().rev() {
if !not_empty[i] {
non_permuted_coeffs.pop();
permutations.pop();
m -= 1;
}
}
assert!(m != 0);
// find something faster, although it's still linear
for i in 0..m {
let mut fillers: Vec<usize> = (1..=(3*n+1)).map(|el| el).collect();
for (p, _c) in permutations[i].iter_mut().zip(non_permuted_coeffs[i].iter()) {
if *p == 0 {
continue;
// assert!(c.is_zero());
} else {
fillers[*p - 1] = 0;
}
}
let mut fill_from = 0;
for p in permutations[i].iter_mut() {
if *p == 0 {
loop {
if fillers[fill_from] != 0 {
*p = fillers[fill_from];
fill_from += 1;
break;
} else {
fill_from += 1;
}
}
}
}
}
(non_permuted_coeffs, permutations)
}
pub fn print_constraints(n:usize, q: usize, coeffs: &Vec<Vec<E::Fr>>, permutations: &Vec<Vec<usize>>) {
let m = coeffs.len();
for constraint_idx in 1..=q {
println!("Constraint {} (term for Y^{})", constraint_idx, constraint_idx);
let mut terms = vec![];
for p_idx in 0..m {
if let Some(variable_idx) = permutations[p_idx].iter().position(|&s| s == constraint_idx) {
let coeff = coeffs[p_idx][variable_idx];
terms.push((variable_idx, coeff));
}
}
for (var_idx, coeff) in terms.into_iter() {
if var_idx < n + 1 {
print!("{} * A({})", coeff, n - var_idx);
} else if var_idx < 2*n + 1 {
print!("{} * B({})", coeff, var_idx - n);
} else {
print!("{} * C({})", coeff, var_idx - 2*n);
}
print!("\n");
}
}
}
pub fn create_permutation_special_reference(&self, srs: &SRS<E>) -> SpecializedSRS<E>
{
let (non_permuted_coeffs, permutations) = self.create_permutation_vectors();
let specialized_srs = PermutationArgument::make_specialized_srs(
&non_permuted_coeffs,
&permutations,
&srs
);
specialized_srs
}
pub fn make_signature(&self, y: E::Fr, z: E::Fr, srs: &SRS<E>) -> SignatureOfCorrectComputation<E> {
let (non_permuted_coeffs, permutations) = self.create_permutation_vectors();
let mut s_contrib = E::Fr::zero();
for permutation_index in 0..permutations.len() {
for (variable_index, sigma_i) in permutations[permutation_index].iter().enumerate() {
let y_power = y.pow([*sigma_i as u64]);
let x_power = z.pow([(variable_index+1) as u64]);
let coeff = non_permuted_coeffs[permutation_index][*sigma_i - 1];
let mut result = coeff;
result.mul_assign(&x_power);
result.mul_assign(&y_power);
s_contrib.add_assign(&result);
}
}
let z_n_plus_1_inv = z.pow([(self.n + 1) as u64]).inverse().unwrap();
let y_n = y.pow([self.n as u64]);
println!("Naive S contribution = {}", s_contrib);
s_contrib.mul_assign(&z_n_plus_1_inv);
s_contrib.mul_assign(&y_n);
println!("Naive S contribution scaled = {}", s_contrib);
// let specialized_srs = PermutationArgument::make_specialized_srs(
// &non_permuted_coeffs,
// &permutations,
// &srs
// );
let signature = PermutationArgument::make_signature(
non_permuted_coeffs,
permutations,
y,
z,
&srs,
);
signature
}
pub fn create_permutation_arguments<R: Rng>(&self, y: E::Fr, z: E::Fr, rng: &mut R, srs: &SRS<E>)
-> (Vec<(E::G1Affine, E::G1Affine)>, Vec<E::Fr>, PermutationProof<E>, PermutationArgumentProof<E>, E::Fr, usize, E::Fr)
{
// we have to form non-permuted coefficients, as well as permutation structures;
let n = self.n;
let (non_permuted_coeffs, permutations) = self.create_permutation_vectors();
let m = non_permuted_coeffs.len();
println!("Will need {} permutation polynomials", m);
let specialized_srs = PermutationArgument::make_specialized_srs(
&non_permuted_coeffs,
&permutations,
&srs
);
// evaluate S naively
let mut s_contrib = E::Fr::zero();
for permutation_index in 0..m {
for (variable_index, sigma_i) in permutations[permutation_index].iter().enumerate() {
let y_power = y.pow([*sigma_i as u64]);
let x_power = z.pow([(variable_index+1) as u64]);
let coeff = non_permuted_coeffs[permutation_index][*sigma_i - 1];
let mut result = coeff;
result.mul_assign(&x_power);
result.mul_assign(&y_power);
s_contrib.add_assign(&result);
}
}
println!("Naive S contribution = {}", s_contrib);
let mut argument = PermutationArgument::new(non_permuted_coeffs, permutations);
let challenges = (0..m).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let commitments = argument.commit(y, &srs);
let mut s_commitments = vec![];
let mut s_prime_commitments = vec![];
for (s, s_prime) in commitments.clone().into_iter() {
s_commitments.push(s);
// println!("S' = {}", s_prime);
s_prime_commitments.push(s_prime);
}
let z_prime : E::Fr = rng.gen();
let opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs);
let randomness = (0..2).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let valid = PermutationArgument::verify_s_prime_commitment(n,
&randomness,
&challenges,
&s_prime_commitments,
&opening,
y,
z_prime,
&specialized_srs,
&srs);
assert!(valid, "s' commitment must be valid");
let beta : E::Fr = rng.gen();
let gamma : E::Fr = rng.gen();
let grand_product_challenges = (0..m).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let wellformed_challenges = (0..(2*m)).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let proof = argument.make_argument(
beta,
gamma,
& grand_product_challenges,
& wellformed_challenges,
y,
z,
&specialized_srs, &srs);
let valid = PermutationArgument::verify(&s_commitments, &proof, z, &srs);
assert!(valid, "permutation argument must be valid");
(commitments, challenges, opening, proof, z_prime, m, s_contrib)
}
}
#[test]
fn test_simple_succinct_sonic() {
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use crate::sonic::srs::SRS;
use crate::sonic::cs::{Circuit, ConstraintSystem, LinearCombination};
struct MyCircuit;
impl<E: Engine> Circuit<E> for MyCircuit {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let (a, b, c) = cs.multiply(|| {
Ok((
E::Fr::from_str("10").unwrap(),
E::Fr::from_str("20").unwrap(),
E::Fr::from_str("200").unwrap(),
))
})?;
cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("2").unwrap()), a) - b);
cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("20").unwrap()), a) - c);
cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("10").unwrap()), b) - c);
// let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
// cs.enforce_zero(LinearCombination::from(b) - multiplier);
// let (a1, b1, _) = cs.multiply(|| {
// Ok((
// E::Fr::from_str("5").unwrap(),
// E::Fr::from_str("5").unwrap(),
// E::Fr::from_str("25").unwrap(),
// ))
// })?;
// cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("2").unwrap()), b1) - a);
// cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("4").unwrap()), a1) - b);
// cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("40").unwrap()), b1) - c);
Ok(())
}
}
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
// let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let srs = SRS::<Bls12>::new(100, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
let _rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
use crate::sonic::sonic::Basic;
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use crate::sonic::helped::{MultiVerifier, get_circuit_parameters_for_succinct_sonic};
use crate::sonic::helped::helper::{create_aggregate_on_srs};
use crate::sonic::sonic::Permutation3;
use crate::sonic::unhelped::permutation_structure::*;
// let z: Fr = rng.gen();
// let y: Fr = rng.gen();
let z: Fr = Fr::from_str("2").unwrap();
let y: Fr = Fr::one();
let perm_structure = create_permutation_structure::<Bls12, _>(&MyCircuit);
let (non_permuted_coeffs, permutations) = perm_structure.create_permutation_vectors();
println!("Non-permuted = {:?}", non_permuted_coeffs[0]);
println!("Permutation = {:?}", permutations[0]);
println!("N = {}, Q = {}", perm_structure.n, perm_structure.q);
let n = perm_structure.n;
let szy = {
let mut tmp = SxEval::<Bls12>::new(y, n);
Permutation3::synthesize(&mut tmp, &MyCircuit).unwrap(); // TODO
tmp.finalize(z)
};
let naive_s1 = {
let mut res = Fr::zero();
for j in 0..permutations.len() {
for i in 0..non_permuted_coeffs[j].len() {
let sigma_i = permutations[j][i];
let coeff_i = non_permuted_coeffs[j][i];
// let coeff_sigma_i = non_permuted_coeffs[j][sigma_i - 1];
let y_power = y.pow([sigma_i as u64]);
let x_power = z.pow([(i+1) as u64]);
// let mut result = coeff_sigma_i;
let mut result = coeff_i;
result.mul_assign(&y_power);
result.mul_assign(&x_power);
res.add_assign(&result);
}
}
res
};
println!("Naive s1 = {}", naive_s1);
// perm_structure.create_permutation_arguments(y, z, rng, &srs);
let signature = perm_structure.make_signature(y, z, &srs);
let s2 = S2Eval::new(perm_structure.n);
let s2 = s2.evaluate(z, y, &srs);
let mut s2_value = s2.c_value;
s2_value.add_assign(&s2.d_value);
let mut expected_s2_value = Fr::zero();
let y_inv = y.inverse().unwrap();
let mut p1 = y;
p1.add_assign(&y_inv);
p1.mul_assign(&z);
expected_s2_value.add_assign(&p1);
let mut t0 = y;
t0.square();
let mut t1 = y_inv;
t1.square();
let mut p2 = t0;
p2.add_assign(&t1);
p2.mul_assign(&z);
p2.mul_assign(&z);
expected_s2_value.add_assign(&p2);
let z_n = z.pow([n as u64]);
let z_n_plus_1_inv = z.pow([(n + 1) as u64]).inverse().unwrap();
let y_n = y.pow([n as u64]);
assert!(expected_s2_value == s2_value);
s2_value.mul_assign(&z_n);
let mut s1 = signature.perm_argument_proof.s_zy;
println!("S1 = {}", s1);
s1.mul_assign(&z_n_plus_1_inv);
s1.mul_assign(&y_n);
s1.sub_assign(&s2_value);
let mut naive_s1 = naive_s1;
naive_s1.mul_assign(&z_n_plus_1_inv);
naive_s1.mul_assign(&y_n);
naive_s1.sub_assign(&s2_value);
println!("S1(?) = {}", naive_s1);
assert_eq!(s1, szy);
}
}<file_sep>/src/plonk/commitments/transparent/iop_compiler/coset_combining_blake2s_tree.rs
use crate::pairing::ff::{PrimeField, PrimeFieldRepr};
use blake2s_const::blake2s_const;
use crate::worker::Worker;
use super::super::utils::log2_floor;
use super::*;
#[derive(Debug)]
pub struct FriSpecificBlake2sTree<F: PrimeField> {
size: usize,
nodes: Vec<[u8; 32]>,
params: FriSpecificBlake2sTreeParams,
_marker: std::marker::PhantomData<F>
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FriSpecificBlake2sTreeParams {
pub values_per_leaf: usize
}
// impl<F: PrimeField> FriSpecificBlake2sTree<F> {
// pub fn new() -> Self {
// Self {
// size: 0usize,
// nodes: vec![],
// }
// }
// }
use std::time::Instant;
impl<F: PrimeField> FriSpecificBlake2sTree<F> {
const VALUE_BYTE_SIZE: usize = (((F::NUM_BITS as usize) / 64) + 1) * 8;
fn encode_leaf_values(values: &[F], buffer: &mut [u8]) {
debug_assert!(buffer.len() == values.len() * Self::VALUE_BYTE_SIZE);
for (i, value) in values.iter().enumerate() {
let start = Self::VALUE_BYTE_SIZE * i;
let end = start + Self::VALUE_BYTE_SIZE;
let raw_repr = value.into_raw_repr();
raw_repr.write_le(&mut buffer[start..end]).expect("will write");
}
}
fn decode_leaf_values(buffer: &[u8]) -> Vec<F> {
debug_assert!(buffer.len() % Self::VALUE_BYTE_SIZE == 0);
let num_elements = buffer.len() / Self::VALUE_BYTE_SIZE;
let mut result = Vec::with_capacity(num_elements);
let mut raw_repr = F::zero().into_raw_repr();
for i in 0..num_elements {
let start = Self::VALUE_BYTE_SIZE * i;
let end = start + Self::VALUE_BYTE_SIZE;
raw_repr.read_le(&buffer[start..end]).expect("will write");
result.push(F::from_raw_repr(raw_repr).expect("must work"));
}
result
}
fn hash_into_leaf(values: &[F], scratch_space: &mut [u8]) -> [u8; 32] {
Self::encode_leaf_values(values, scratch_space);
*blake2s_const(scratch_space).as_array()
}
fn make_full_path(&self, leaf_index: usize, leaf_pair_hash: [u8; 32]) -> Vec<[u8; 32]> {
let mut nodes = &self.nodes[..];
let mut path = vec![];
path.push(leaf_pair_hash);
let mut idx = leaf_index;
idx >>= 1;
for _ in 0..log2_floor(nodes.len() / 2) {
let half_len = nodes.len() / 2;
let (next_level, this_level) = nodes.split_at(half_len);
let pair_idx = idx ^ 1usize;
let value = this_level[pair_idx];
path.push(value);
idx >>= 1;
nodes = next_level;
}
path
}
}
impl<F: PrimeField> IopInstance<F> for FriSpecificBlake2sTree<F> {
type Commitment = [u8; 32];
type Params = FriSpecificBlake2sTreeParams;
type Query = CosetCombinedQuery<F>;
fn size(&self) -> usize {
self.size
}
fn create(values: &[F], params: &Self::Params) -> Self {
assert!(params.values_per_leaf.is_power_of_two());
let values_per_leaf = params.values_per_leaf;
let num_leafs = values.len() / values_per_leaf;
assert!(num_leafs.is_power_of_two());
let num_nodes = num_leafs;
let size = values.len();
let mut nodes = vec![[0u8; 32]; num_nodes];
let worker = Worker::new();
let mut leaf_hashes = vec![[0u8; 32]; num_leafs];
{
worker.scope(leaf_hashes.len(), |scope, chunk| {
for (i, lh) in leaf_hashes.chunks_mut(chunk)
.enumerate() {
scope.spawn(move |_| {
let base_idx = i*chunk;
let mut scratch_space = vec![0u8; Self::VALUE_BYTE_SIZE * values_per_leaf];
for (j, lh) in lh.iter_mut().enumerate() {
let idx = base_idx + j;
let values_start = idx * values_per_leaf;
let values_end = values_start + values_per_leaf;
*lh = Self::hash_into_leaf(&values[values_start..values_end], &mut scratch_space);
}
});
}
});
}
// leafs are now encoded and hashed, so let's make a tree
let num_levels = log2_floor(num_leafs) as usize;
let mut nodes_for_hashing = &mut nodes[..];
// separately hash last level, which hashes leaf hashes into first nodes
{
let _level = num_levels-1;
let inputs = &mut leaf_hashes[..];
let (_, outputs) = nodes_for_hashing.split_at_mut(nodes_for_hashing.len()/2);
assert!(outputs.len() * 2 == inputs.len());
assert!(outputs.len().is_power_of_two());
worker.scope(outputs.len(), |scope, chunk| {
for (o, i) in outputs.chunks_mut(chunk)
.zip(inputs.chunks(chunk*2)) {
scope.spawn(move |_| {
let mut hash_input = [0u8; 64];
for (o, i) in o.iter_mut().zip(i.chunks(2)) {
hash_input[0..32].copy_from_slice(&i[0]);
hash_input[32..64].copy_from_slice(&i[1]);
*o = *blake2s_const(&hash_input).as_array();
}
});
}
});
}
for _ in (0..(num_levels-1)).rev() {
// do the trick - split
let (next_levels, inputs) = nodes_for_hashing.split_at_mut(nodes_for_hashing.len()/2);
let (_, outputs) = next_levels.split_at_mut(next_levels.len() / 2);
assert!(outputs.len() * 2 == inputs.len());
assert!(outputs.len().is_power_of_two());
worker.scope(outputs.len(), |scope, chunk| {
for (o, i) in outputs.chunks_mut(chunk)
.zip(inputs.chunks(chunk*2)) {
scope.spawn(move |_| {
let mut hash_input = [0u8; 64];
for (o, i) in o.iter_mut().zip(i.chunks(2)) {
hash_input[0..32].copy_from_slice(&i[0]);
hash_input[32..64].copy_from_slice(&i[1]);
*o = *blake2s_const(&hash_input).as_array();
}
});
}
});
nodes_for_hashing = next_levels;
}
Self {
size: size,
nodes: nodes,
params: params.clone(),
_marker: std::marker::PhantomData
}
}
fn get_commitment(&self) -> Self::Commitment {
self.nodes[1]
}
fn produce_query(&self, indexes: Vec<usize>, values: &[F]) -> Self::Query {
// we never expect that query is mis-alligned, so check it
debug_assert!(indexes[0] % self.params.values_per_leaf == 0);
debug_assert!(indexes.len() == self.params.values_per_leaf);
debug_assert!(indexes == (indexes[0]..(indexes[0]+self.params.values_per_leaf)).collect::<Vec<_>>());
debug_assert!(*indexes.last().expect("is some") < self.size());
debug_assert!(*indexes.last().expect("is some") < values.len());
let query_values = Vec::from(&values[indexes[0]..(indexes[0]+self.params.values_per_leaf)]);
let leaf_index = indexes[0] / self.params.values_per_leaf;
let pair_index = leaf_index ^ 1;
let mut scratch_space = vec![0u8; Self::VALUE_BYTE_SIZE * self.params.values_per_leaf];
let leaf_pair_hash = Self::hash_into_leaf(&values[(pair_index*self.params.values_per_leaf)..((pair_index+1)*self.params.values_per_leaf)], &mut scratch_space);
let path = self.make_full_path(leaf_index, leaf_pair_hash);
CosetCombinedQuery::<F> {
indexes: indexes,
values: query_values,
path: path,
}
}
fn verify_query(commitment: &Self::Commitment, query: &Self::Query, params: &Self::Params) -> bool {
if query.values().len() != params.values_per_leaf {
return false;
}
let mut scratch_space = vec![0u8; Self::VALUE_BYTE_SIZE * params.values_per_leaf];
let mut hash = Self::hash_into_leaf(query.values(), &mut scratch_space);
let mut idx = query.indexes()[0] / params.values_per_leaf;
let mut hash_input = [0u8; 64];
for el in query.path.iter() {
{
let (left, right) = hash_input.split_at_mut(32);
if idx & 1usize == 0 {
left.copy_from_slice(&hash[..]);
right.copy_from_slice(&el[..]);
} else {
right.copy_from_slice(&hash[..]);
left.copy_from_slice(&el[..]);
}
}
hash = *blake2s_const(&hash_input).as_array();
idx >>= 1;
}
&hash == commitment
}
}
impl<F: PrimeField> PartialEq for FriSpecificBlake2sTree<F> {
fn eq(&self, other: &Self) -> bool {
self.get_commitment() == other.get_commitment()
}
}
impl<F: PrimeField> Eq for FriSpecificBlake2sTree<F> {}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CosetCombinedQuery<F: PrimeField> {
indexes: Vec<usize>,
values: Vec<F>,
path: Vec<[u8; 32]>,
}
impl<F: PrimeField> IopQuery<F> for CosetCombinedQuery<F> {
fn indexes(&self) -> Vec<usize> {
self.indexes.clone()
}
fn values(&self) -> &[F] {
&self.values
}
}
#[test]
fn make_small_iop() {
use crate::ff::Field;
use crate::plonk::transparent_engine::Fr;
const SIZE: usize = 16;
const VALUES_PER_LEAF: usize = 4;
let params = FriSpecificBlake2sTreeParams {
values_per_leaf: VALUES_PER_LEAF
};
let mut inputs = vec![];
let mut f = Fr::one();
for _ in 0..SIZE {
inputs.push(f);
f.double();
}
let iop = FriSpecificBlake2sTree::create(&inputs, ¶ms);
let commitment = iop.get_commitment();
let tree_size = iop.size();
assert!(tree_size == SIZE);
assert!(iop.nodes.len() == (SIZE / VALUES_PER_LEAF));
for i in 0..(SIZE / VALUES_PER_LEAF) {
let indexes: Vec<_> = ((i*VALUES_PER_LEAF)..(VALUES_PER_LEAF + i*VALUES_PER_LEAF)).collect();
let query = iop.produce_query(indexes, &inputs);
let valid = FriSpecificBlake2sTree::verify_query(&commitment, &query, ¶ms);
assert!(valid, "invalid query for leaf index {}", i);
}
}
#[test]
fn test_bench_large_fri_specific_iop() {
use crate::ff::Field;
use crate::plonk::transparent_engine::Fr;
const SIZE: usize = 1 << (20 + 4);
const VALUES_PER_LEAF: usize = 8;
let params = FriSpecificBlake2sTreeParams {
values_per_leaf: VALUES_PER_LEAF
};
let mut inputs = vec![];
let mut f = Fr::one();
for _ in 0..SIZE {
inputs.push(f);
f.double();
}
let iop = FriSpecificBlake2sTree::create(&inputs, ¶ms);
let commitment = iop.get_commitment();
let tree_size = iop.size();
assert!(tree_size == SIZE);
assert!(iop.nodes.len() == (SIZE / VALUES_PER_LEAF));
for i in 0..128 {
let indexes: Vec<_> = ((i*VALUES_PER_LEAF)..(VALUES_PER_LEAF + i*VALUES_PER_LEAF)).collect();
let query = iop.produce_query(indexes, &inputs);
let valid = FriSpecificBlake2sTree::verify_query(&commitment, &query, ¶ms);
assert!(valid, "invalid query for leaf index {}", i);
}
}<file_sep>/src/plonk/prover/mod.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
use crate::worker::*;
use super::polynomials::*;
use super::domains::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::utils::*;
use crate::plonk::generator::*;
#[derive(Debug)]
struct ProvingAssembly<E: Engine> {
m: usize,
n: usize,
input_gates: Vec<Gate<E::Fr>>,
aux_gates: Vec<Gate<E::Fr>>,
num_inputs: usize,
num_aux: usize,
input_assingments: Vec<E::Fr>,
aux_assingments: Vec<E::Fr>,
inputs_map: Vec<usize>,
is_finalized: bool
}
impl<E: Engine> ConstraintSystem<E> for ProvingAssembly<E> {
// allocate a variable
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_aux += 1;
let index = self.num_aux;
self.aux_assingments.push(value);
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_inputs += 1;
let index = self.num_inputs;
self.input_assingments.push(value);
let input_var = Variable(Index::Input(index));
let gate = Gate::<E::Fr>::new_enforce_constant_gate(input_var, Some(E::Fr::zero()), self.dummy_variable());
// let gate = Gate::<E>::new_enforce_constant_gate(input_var, Some(value), self.dummy_variable());
self.input_gates.push(gate);
Ok(input_var)
}
// enforce variable as boolean
fn enforce_boolean(&mut self, variable: Variable) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_enforce_boolean_gate(variable, self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate an abstract gate
fn new_gate(&mut self, variables: (Variable, Variable, Variable),
coeffs:(E::Fr,E::Fr,E::Fr,E::Fr,E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a constant
fn enforce_constant(&mut self, variable: Variable, constant: E::Fr) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_constant_gate(variable, Some(constant), self.dummy_variable());
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_2(&mut self, variables: (Variable, Variable)) -> Result<(), SynthesisError> {
// q_l, q_r, q_o, q_c = 0, q_m = 1
let (v_0, v_1) = variables;
let zero = E::Fr::zero();
let one = E::Fr::one();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (zero, zero, zero, one, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a multiplication gate
fn enforce_mul_3(&mut self, variables: (Variable, Variable, Variable)) -> Result<(), SynthesisError> {
let gate = Gate::<E::Fr>::new_multiplication_gate(variables);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_2(&mut self, variables: (Variable, Variable), coeffs:(E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let (v_0, v_1) = variables;
let (c_0, c_1) = coeffs;
let zero = E::Fr::zero();
let gate = Gate::<E::Fr>::new_gate((v_0, v_1, self.dummy_variable()), (c_0, c_1, zero, zero, zero));
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
// allocate a linear combination gate
fn enforce_zero_3(&mut self, variables: (Variable, Variable, Variable), coeffs:(E::Fr, E::Fr, E::Fr)) -> Result<(), SynthesisError>
{
let gate = Gate::<E::Fr>::new_enforce_zero_gate(variables, coeffs);
self.aux_gates.push(gate);
self.n += 1;
Ok(())
}
fn get_dummy_variable(&self) -> Variable {
self.dummy_variable()
}
}
impl<E: Engine> ProvingAssembly<E> {
fn new_empty_gate(&mut self) -> usize {
self.n += 1;
let index = self.n;
self.aux_gates.push(Gate::<E::Fr>::empty());
index
}
fn set_gate(&mut self, gate: Gate<E::Fr>, index: usize) {
self.aux_gates[index-1] = gate;
}
pub(crate) fn new() -> Self {
let mut tmp = Self {
n: 0,
m: 0,
input_gates: vec![],
aux_gates: vec![],
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: vec![],
inputs_map: vec![],
is_finalized: false,
};
let zero = tmp.alloc(|| Ok(E::Fr::zero())).expect("should have no issues");
tmp.enforce_constant(zero, E::Fr::zero()).expect("should have no issues");
match (tmp.dummy_variable(), zero) {
(Variable(Index::Aux(1)), Variable(Index::Aux(1))) => {},
_ => panic!("zero variable is incorrect")
}
tmp
}
// return variable that is not in a constraint formally, but has some value
fn dummy_variable(&self) -> Variable {
Variable(Index::Aux(1))
}
pub(crate) fn make_wire_assingments(&self) -> (Vec<E::Fr>, Vec<E::Fr>, Vec<E::Fr>) {
assert!(self.is_finalized);
// create a vector of gate assingments
// if a_i = j then w_j = f_l(g^i)
let total_num_gates = self.input_gates.len() + self.aux_gates.len();
let mut f_l = vec![E::Fr::zero(); total_num_gates];
let mut f_r = vec![E::Fr::zero(); total_num_gates];
let mut f_o = vec![E::Fr::zero(); total_num_gates];
for (i, gate) in self.input_gates.iter().chain(&self.aux_gates).enumerate()
{
match gate.a_wire() {
Variable(Index::Input(index)) => {
f_l[i] = self.input_assingments[index - 1];
},
Variable(Index::Aux(index)) => {
f_l[i] = self.aux_assingments[index - 1];
},
}
match gate.b_wire() {
Variable(Index::Input(index)) => {
f_r[i] = self.input_assingments[index - 1];
},
Variable(Index::Aux(index)) => {
f_r[i] = self.aux_assingments[index - 1];
},
}
match gate.c_wire() {
Variable(Index::Input(index)) => {
f_o[i] = self.input_assingments[index - 1];
},
Variable(Index::Aux(index)) => {
f_o[i] = self.aux_assingments[index - 1];
},
}
}
(f_l, f_r, f_o)
}
pub(crate) fn make_circuit_description_polynomials(&self, worker: &Worker) -> Result<(
Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>,
Polynomial::<E::Fr, Values>, Polynomial::<E::Fr, Values>
), SynthesisError> {
assert!(self.is_finalized);
let total_num_gates = self.input_gates.len() + self.aux_gates.len();
let mut q_l = vec![E::Fr::zero(); total_num_gates];
let mut q_r = vec![E::Fr::zero(); total_num_gates];
let mut q_o = vec![E::Fr::zero(); total_num_gates];
let mut q_m = vec![E::Fr::zero(); total_num_gates];
let mut q_c = vec![E::Fr::zero(); total_num_gates];
fn coeff_into_field_element<F: PrimeField>(coeff: & Coeff<F>) -> F {
match coeff {
Coeff::Zero => {
F::zero()
},
Coeff::One => {
F::one()
},
Coeff::NegativeOne => {
let mut tmp = F::one();
tmp.negate();
tmp
},
Coeff::Full(c) => {
*c
},
}
}
// expect a small number of inputs
for (((((gate, q_l), q_r), q_o), q_m), q_c) in self.input_gates.iter()
.zip(q_l.iter_mut())
.zip(q_r.iter_mut())
.zip(q_o.iter_mut())
.zip(q_m.iter_mut())
.zip(q_c.iter_mut())
{
*q_l = coeff_into_field_element(&gate.q_l);
*q_r = coeff_into_field_element(&gate.q_r);
*q_o = coeff_into_field_element(&gate.q_o);
*q_m = coeff_into_field_element(&gate.q_m);
*q_c = coeff_into_field_element(&gate.q_c);
}
let num_input_gates = self.input_gates.len();
let q_l_aux = &mut q_l[num_input_gates..];
let q_r_aux = &mut q_r[num_input_gates..];
let q_o_aux = &mut q_o[num_input_gates..];
let q_m_aux = &mut q_m[num_input_gates..];
let q_c_aux = &mut q_c[num_input_gates..];
debug_assert!(self.aux_gates.len() == q_l_aux.len());
worker.scope(self.aux_gates.len(), |scope, chunk| {
for (((((gate, q_l), q_r), q_o), q_m), q_c) in self.aux_gates.chunks(chunk)
.zip(q_l_aux.chunks_mut(chunk))
.zip(q_r_aux.chunks_mut(chunk))
.zip(q_o_aux.chunks_mut(chunk))
.zip(q_m_aux.chunks_mut(chunk))
.zip(q_c_aux.chunks_mut(chunk))
{
scope.spawn(move |_| {
for (((((gate, q_l), q_r), q_o), q_m), q_c) in gate.iter()
.zip(q_l.iter_mut())
.zip(q_r.iter_mut())
.zip(q_o.iter_mut())
.zip(q_m.iter_mut())
.zip(q_c.iter_mut())
{
*q_l = coeff_into_field_element(&gate.q_l);
*q_r = coeff_into_field_element(&gate.q_r);
*q_o = coeff_into_field_element(&gate.q_o);
*q_m = coeff_into_field_element(&gate.q_m);
*q_c = coeff_into_field_element(&gate.q_c);
}
});
}
});
let q_l = Polynomial::from_values(q_l)?;
let q_r = Polynomial::from_values(q_r)?;
let q_o = Polynomial::from_values(q_o)?;
let q_m = Polynomial::from_values(q_m)?;
let q_c = Polynomial::from_values(q_c)?;
Ok((q_l, q_r, q_o, q_m, q_c))
}
pub(crate) fn calculate_permutations_as_in_a_paper(&self) -> (Vec<usize>, Vec<usize>, Vec<usize>) {
assert!(self.is_finalized);
let num_gates = self.input_gates.len() + self.aux_gates.len();
let num_partitions = self.num_inputs + self.num_aux;
let num_inputs = self.num_inputs;
// in the partition number i there is a set of indexes in V = (a, b, c) such that V_j = i
let mut partitions = vec![vec![]; num_partitions + 1];
for (j, gate) in self.input_gates.iter().chain(&self.aux_gates).enumerate()
{
match gate.a_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j+1);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j+1);
}
},
}
match gate.b_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j + 1 + num_gates);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j + 1 + num_gates);
}
},
}
match gate.c_wire() {
Variable(Index::Input(index)) => {
let i = *index;
partitions[i].push(j + 1 + 2*num_gates);
},
Variable(Index::Aux(index)) => {
if *index != 0 {
let i = index + num_inputs;
partitions[i].push(j + 1 + 2*num_gates);
}
},
}
}
let mut sigma_1: Vec<_> = (1..=num_gates).collect();
let mut sigma_2: Vec<_> = ((num_gates+1)..=(2*num_gates)).collect();
let mut sigma_3: Vec<_> = ((2*num_gates + 1)..=(3*num_gates)).collect();
let mut permutations = vec![vec![]; num_partitions + 1];
fn rotate(mut vec: Vec<usize>) -> Vec<usize> {
if vec.len() > 0 {
let els: Vec<_> = vec.drain(0..1).collect();
vec.push(els[0]);
}
vec
}
for (i, partition) in partitions.into_iter().enumerate().skip(1) {
// copy-permutation should have a cycle around the partition
let permutation = rotate(partition.clone());
permutations[i] = permutation.clone();
for (original, new) in partition.into_iter()
.zip(permutation.into_iter())
{
if original <= num_gates {
debug_assert!(sigma_1[original - 1] == original);
sigma_1[original - 1] = new;
} else if original <= 2*num_gates {
debug_assert!(sigma_2[original - num_gates - 1] == original);
sigma_2[original - num_gates - 1] = new;
} else {
debug_assert!(sigma_3[original - 2*num_gates - 1] == original);
sigma_3[original - 2*num_gates - 1] = new;
}
}
}
(sigma_1, sigma_2, sigma_3)
}
fn make_s_id(&self) -> Vec<usize> {
let size = self.input_gates.len() + self.aux_gates.len();
let result: Vec<_> = (1..=size).collect();
result
}
pub(crate) fn output_setup_polynomials(&self, worker: &Worker) -> Result<
(
Polynomial::<E::Fr, Coefficients>, // q_l
Polynomial::<E::Fr, Coefficients>, // q_r
Polynomial::<E::Fr, Coefficients>, // q_o
Polynomial::<E::Fr, Coefficients>, // q_m
Polynomial::<E::Fr, Coefficients>, // q_c
Polynomial::<E::Fr, Coefficients>, // s_id
Polynomial::<E::Fr, Coefficients>, // sigma_1
Polynomial::<E::Fr, Coefficients>, // sigma_2
Polynomial::<E::Fr, Coefficients>, // sigma_3
), SynthesisError>
{
assert!(self.is_finalized);
let s_id = self.make_s_id();
let (sigma_1, sigma_2, sigma_3) = self.calculate_permutations_as_in_a_paper();
let s_id = convert_to_field_elements::<E::Fr>(&s_id, &worker);
let sigma_1 = convert_to_field_elements::<E::Fr>(&sigma_1, &worker);
let sigma_2 = convert_to_field_elements::<E::Fr>(&sigma_2, &worker);
let sigma_3 = convert_to_field_elements::<E::Fr>(&sigma_3, &worker);
let s_id = Polynomial::from_values(s_id)?;
let sigma_1 = Polynomial::from_values(sigma_1)?;
let sigma_2 = Polynomial::from_values(sigma_2)?;
let sigma_3 = Polynomial::from_values(sigma_3)?;
let (q_l, q_r, q_o, q_m, q_c) = self.make_circuit_description_polynomials(&worker)?;
let s_id = s_id.ifft(&worker);
let sigma_1 = sigma_1.ifft(&worker);
let sigma_2 = sigma_2.ifft(&worker);
let sigma_3 = sigma_3.ifft(&worker);
let q_l = q_l.ifft(&worker);
let q_r = q_r.ifft(&worker);
let q_o = q_o.ifft(&worker);
let q_m = q_m.ifft(&worker);
let q_c = q_c.ifft(&worker);
Ok((q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3))
}
pub(crate) fn num_gates(&self) -> usize {
assert!(self.is_finalized);
self.input_gates.len() + self.aux_gates.len()
}
fn finalize(&mut self) {
if self.is_finalized {
return;
}
let n = self.input_gates.len() + self.aux_gates.len();
if (n+1).is_power_of_two() {
return;
}
let empty_gate = Gate::<E::Fr>::new_empty_gate(self.dummy_variable());
let new_aux_len = (n+1).next_power_of_two() - 1 - self.input_gates.len();
self.aux_gates.resize(new_aux_len, empty_gate);
self.is_finalized = true;
}
fn calculate_inverse_vanishing_polynomial_in_a_coset(&self, worker: &Worker, poly_size:usize, vahisning_size: usize) -> Result<Polynomial::<E::Fr, Values>, SynthesisError> {
assert!(poly_size.is_power_of_two());
assert!(vahisning_size.is_power_of_two());
// update from the paper - it should not hold for the last generator, omega^(n) in original notations
// Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64)?;
let n_domain_omega = domain.generator;
let mut root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
root.negate();
let multiplicative_generator = E::Fr::multiplicative_generator();
let mut negative_one = E::Fr::one();
negative_one.negate();
let mut numerator = Polynomial::<E::Fr, Values>::from_values(vec![multiplicative_generator; poly_size])?;
// evaluate X in linear time
numerator.distribute_powers(&worker, numerator.omega);
numerator.add_constant(&worker, &root);
// numerator.add_constant(&worker, &negative_one);
// now it's a series of values in a coset
// now we should evaluate X^(n+1) - 1 in a linear time
let shift = multiplicative_generator.pow([vahisning_size as u64]);
let mut denominator = Polynomial::<E::Fr, Values>::from_values(vec![shift; poly_size])?;
// elements are h^size - 1, (hg)^size - 1, (hg^2)^size - 1, ...
denominator.distribute_powers(&worker, denominator.omega.pow([vahisning_size as u64]));
denominator.add_constant(&worker, &negative_one);
denominator.batch_inversion(&worker)?;
numerator.mul_assign(&worker, &denominator);
Ok(numerator)
}
fn evaluate_inverse_vanishing_poly(&self, vahisning_size: usize, point: E::Fr) -> E::Fr {
assert!(vahisning_size.is_power_of_two());
// update from the paper - it should not hold for the last generator, omega^(n) in original notations
// Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
let domain = Domain::<E::Fr>::new_for_size(vahisning_size as u64).expect("should fit");
let n_domain_omega = domain.generator;
let root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
let mut numerator = point;
numerator.sub_assign(&root);
let mut denominator = point.pow([vahisning_size as u64]);
denominator.sub_assign(&E::Fr::one());
let denominator = denominator.inverse().expect("must exist");
numerator.mul_assign(&denominator);
numerator
}
fn calculate_lagrange_poly(&self, worker: &Worker, poly_size:usize, poly_number: usize) -> Result<Polynomial::<E::Fr, Coefficients>, SynthesisError> {
assert!(poly_size.is_power_of_two());
assert!(poly_number < poly_size);
let mut poly = Polynomial::<E::Fr, Values>::from_values(vec![E::Fr::zero(); poly_size])?;
poly.as_mut()[poly_number] = E::Fr::one();
Ok(poly.ifft(&worker))
}
}
// for a non-homomorphic case we do not need r(x) polynomial at all, just open all the parts of t(x) at z
pub struct PlonkNonhomomorphicProof<E: Engine, S: CommitmentScheme<E::Fr> >{
pub a_opening_value: E::Fr,
pub b_opening_value: E::Fr,
pub c_opening_value: E::Fr,
pub q_l_opening_value: E::Fr,
pub q_r_opening_value: E::Fr,
pub q_o_opening_value: E::Fr,
pub q_m_opening_value: E::Fr,
pub q_c_opening_value: E::Fr,
pub s_id_opening_value: E::Fr,
pub sigma_1_opening_value: E::Fr,
pub sigma_2_opening_value: E::Fr,
pub sigma_3_opening_value: E::Fr,
pub z_1_unshifted_opening_value: E::Fr,
pub z_2_unshifted_opening_value: E::Fr,
pub z_1_shifted_opening_value: E::Fr,
pub z_2_shifted_opening_value: E::Fr,
pub t_opening_value: E::Fr,
pub a_commitment: S::Commitment,
pub b_commitment: S::Commitment,
pub c_commitment: S::Commitment,
pub z_1_commitment: S::Commitment,
pub z_2_commitment: S::Commitment,
pub t_commitment: S::Commitment,
pub openings_proof: S::OpeningProof,
// pub shifted_openings_proof: S::OpeningProof,
pub t_opening_proof: S::OpeningProof,
}
pub struct PlonkChunkedNonhomomorphicProof<E: Engine, S: CommitmentScheme<E::Fr> >{
pub a_opening_value: E::Fr,
pub b_opening_value: E::Fr,
pub c_opening_value: E::Fr,
pub q_l_opening_value: E::Fr,
pub q_r_opening_value: E::Fr,
pub q_o_opening_value: E::Fr,
pub q_m_opening_value: E::Fr,
pub q_c_opening_value: E::Fr,
pub s_id_opening_value: E::Fr,
pub sigma_1_opening_value: E::Fr,
pub sigma_2_opening_value: E::Fr,
pub sigma_3_opening_value: E::Fr,
pub z_1_unshifted_opening_value: E::Fr,
pub z_2_unshifted_opening_value: E::Fr,
pub z_1_shifted_opening_value: E::Fr,
pub z_2_shifted_opening_value: E::Fr,
pub t_low_opening_value: E::Fr,
pub t_mid_opening_value: E::Fr,
pub t_high_opening_value: E::Fr,
pub a_commitment: S::Commitment,
pub b_commitment: S::Commitment,
pub c_commitment: S::Commitment,
pub z_1_commitment: S::Commitment,
pub z_2_commitment: S::Commitment,
pub t_low_commitment: S::Commitment,
pub t_mid_commitment: S::Commitment,
pub t_high_commitment: S::Commitment,
pub openings_proof: S::OpeningProof,
}
use crate::plonk::commitments::transparent::StatelessTransparentCommitter;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::TrivialBlake2sIOP;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::NaiveFriIop;
type Iop<E: Engine> = TrivialBlake2sIOP<E::Fr>;
type Fri<E: Engine> = NaiveFriIop<E::Fr, Iop<E>>;
impl<E: Engine> PlonkChunkedNonhomomorphicProof<E, StatelessTransparentCommitter<E::Fr, Fri<E>, Blake2sTranscript<E::Fr>>> {
pub fn estimate_proof_size(&self) -> usize {
let mut proofs_size = 0;
let num_poly_oracles = 3 + 5 + 4 + 4 + 3;
proofs_size += num_poly_oracles * std::mem::size_of::<E::Fr>(); // openings at z or z*omega
let num_prover_provided_poly_committments = 8;
proofs_size += num_poly_oracles * std::mem::size_of_val(&self.t_low_commitment); // extra oracles
let num_queries = self.openings_proof.1[0].len();
let (q_value, query) = &self.openings_proof.1[0][0];
let query_depth_per_poly_oracle = query.path.len();
let query_proof_element_size = std::mem::size_of_val(&query.path[0]);
let query_element_size = std::mem::size_of_val(&query.value[0]);
let per_poly_oracle_query_size = query_element_size + (query_proof_element_size * query_depth_per_poly_oracle);
let per_query_size = num_poly_oracles * per_poly_oracle_query_size;
println!("Non-FRI paths per repeated query take {} bytes", per_query_size);
proofs_size += num_queries * per_query_size;
// now only FRI part remains
let fri_proof = &self.openings_proof.0;
// intermediate oracles
proofs_size += fri_proof.roots.len() * std::mem::size_of_val(&fri_proof.roots[0]);
// final coefficients
proofs_size += fri_proof.final_coefficients.len() * std::mem::size_of_val(&fri_proof.final_coefficients[0]);
// queries
assert_eq!(num_queries, fri_proof.queries.len());
let mut total_queries_size_per_round = 0;
for q in fri_proof.queries[0].iter() {
let query_depth = query.path.len();
let query_proof_element_size = std::mem::size_of_val(&query.path[0]);
let query_element_size = std::mem::size_of_val(&query.value[0]);
let num_elements = query.value.len();
let total = num_elements * query_element_size + query_proof_element_size * query_depth;
total_queries_size_per_round += total;
}
println!("FRI path per repeated query take {} bytes", total_queries_size_per_round);
proofs_size += total_queries_size_per_round * num_queries;
proofs_size
}
}
pub fn prove_nonhomomorphic<E: Engine, S: CommitmentScheme<E::Fr, Prng = T>, T: Transcript<E::Fr, Input = S::Commitment>, C: Circuit<E>>(
circuit: &C,
setup: &PlonkSetup<E, S>,
aux: &PlonkSetupAuxData<E, S>,
meta: S::Meta,
large_meta: S::Meta
) -> Result<PlonkNonhomomorphicProof<E, S>, SynthesisError> {
assert!(S::IS_HOMOMORPHIC == false);
let mut assembly = ProvingAssembly::<E>::new();
circuit.synthesize(&mut assembly)?;
assembly.finalize();
let num_gates = assembly.num_gates();
let committer = S::new_for_size(num_gates.next_power_of_two(), meta);
let large_committer = S::new_for_size(4 * num_gates.next_power_of_two(), large_meta);
let worker = Worker::new();
let mut transcript = T::new();
let n = assembly.input_gates.len() + assembly.aux_gates.len();
// we need n+1 to be a power of two and can not have n to be power of two
let required_domain_size = n + 1;
assert!(required_domain_size.is_power_of_two());
println!("Start work with polynomials");
let (w_l, w_r, w_o) = assembly.make_wire_assingments();
let w_l = Polynomial::<E::Fr, Values>::from_values_unpadded(w_l)?;
let w_r = Polynomial::<E::Fr, Values>::from_values_unpadded(w_r)?;
let w_o = Polynomial::<E::Fr, Values>::from_values_unpadded(w_o)?;
let a_poly = w_l.clone_padded_to_domain()?.ifft(&worker);
let b_poly = w_r.clone_padded_to_domain()?.ifft(&worker);
let c_poly = w_o.clone_padded_to_domain()?.ifft(&worker);
let (a_commitment, a_aux_data) = committer.commit_single(&a_poly);
let (b_commitment, b_aux_data) = committer.commit_single(&b_poly);
let (c_commitment, c_aux_data) = committer.commit_single(&c_poly);
transcript.commit_input(&a_commitment);
transcript.commit_input(&b_commitment);
transcript.commit_input(&c_commitment);
// TODO: Add public inputs
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
let mut w_l_plus_gamma = w_l.clone();
w_l_plus_gamma.add_constant(&worker, &gamma);
let mut w_r_plus_gamma = w_r.clone();
w_r_plus_gamma.add_constant(&worker, &gamma);
let mut w_o_plus_gamma = w_o.clone();
w_o_plus_gamma.add_constant(&worker, &gamma);
let z_1 = {
let n = assembly.input_gates.len() + assembly.aux_gates.len();
let s_id_1: Vec<_> = (1..=n).collect();
let s_id_1 = convert_to_field_elements(&s_id_1, &worker);
let s_id_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &s_id_1, &beta);
drop(s_id_1);
let s_id_2: Vec<_> = ((n+1)..=(2*n)).collect();
let s_id_2 = convert_to_field_elements(&s_id_2, &worker);
let s_id_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &s_id_2, &beta);
drop(s_id_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let s_id_3: Vec<_> = ((2*n+1)..=(3*n)).collect();
let s_id_3 = convert_to_field_elements(&s_id_3, &worker);
let s_id_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &s_id_3, &beta);
drop(s_id_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
Polynomial::<E::Fr, Values>::from_values(prepadded)?
};
let z_2 = {
let (sigma_1, sigma_2, sigma_3) = assembly.calculate_permutations_as_in_a_paper();
let sigma_1 = convert_to_field_elements(&sigma_1, &worker);
let sigma_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &sigma_1, &beta);
drop(sigma_1);
let sigma_2 = convert_to_field_elements(&sigma_2, &worker);
let sigma_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &sigma_2, &beta);
drop(sigma_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let sigma_3 = convert_to_field_elements(&sigma_3, &worker);
let sigma_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &sigma_3, &beta);
drop(sigma_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
let z_2 = Polynomial::<E::Fr, Values>::from_values(prepadded)?;
z_2
};
let z_1 = z_1.ifft(&worker);
let z_2 = z_2.ifft(&worker);
let (z_1_commitment, z_1_aux) = committer.commit_single(&z_1);
let (z_2_commitment, z_2_aux) = committer.commit_single(&z_2);
transcript.commit_input(&z_1_commitment);
transcript.commit_input(&z_2_commitment);
let mut z_1_shifted = z_1.clone();
z_1_shifted.distribute_powers(&worker, z_1.omega);
let mut z_2_shifted = z_2.clone();
z_2_shifted.distribute_powers(&worker, z_2.omega);
let a_lde = a_poly.clone().coset_lde(&worker, 4)?;
let b_lde = b_poly.clone().coset_lde(&worker, 4)?;
let c_lde = c_poly.clone().coset_lde(&worker, 4)?;
let (q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3) = assembly.output_setup_polynomials(&worker)?;
let q_l_lde = q_l.clone().coset_lde(&worker, 4)?;
let q_r_lde = q_r.clone().coset_lde(&worker, 4)?;
let q_o_lde = q_o.clone().coset_lde(&worker, 4)?;
let q_m_lde = q_m.clone().coset_lde(&worker, 4)?;
let q_c_lde = q_c.clone().coset_lde(&worker, 4)?;
let s_id_lde = s_id.clone().coset_lde(&worker, 4)?;
let sigma_1_lde = sigma_1.clone().coset_lde(&worker, 4)?;
let sigma_2_lde = sigma_2.clone().coset_lde(&worker, 4)?;
let sigma_3_lde = sigma_3.clone().coset_lde(&worker, 4)?;
// we do not commit those cause those are known already
let n_fe = E::Fr::from_str(&n.to_string()).expect("must be valid field element");
let mut two_n_fe = n_fe;
two_n_fe.double();
let alpha = transcript.get_challenge();
let mut vanishing_poly_inverse = assembly.calculate_inverse_vanishing_polynomial_in_a_coset(&worker, q_c_lde.size(), required_domain_size.next_power_of_two())?;
let mut t_1 = {
let mut t_1 = q_c_lde;
let mut q_l_by_a = q_l_lde;
q_l_by_a.mul_assign(&worker, &a_lde);
t_1.add_assign(&worker, &q_l_by_a);
drop(q_l_by_a);
let mut q_r_by_b = q_r_lde;
q_r_by_b.mul_assign(&worker, &b_lde);
t_1.add_assign(&worker, &q_r_by_b);
drop(q_r_by_b);
let mut q_o_by_c = q_o_lde;
q_o_by_c.mul_assign(&worker, &c_lde);
t_1.add_assign(&worker, &q_o_by_c);
drop(q_o_by_c);
let mut q_m_by_ab = q_m_lde;
q_m_by_ab.mul_assign(&worker, &a_lde);
q_m_by_ab.mul_assign(&worker, &b_lde);
t_1.add_assign(&worker, &q_m_by_ab);
drop(q_m_by_ab);
vanishing_poly_inverse.scale(&worker, alpha);
t_1.mul_assign(&worker, &vanishing_poly_inverse);
t_1
};
let z_1_lde = z_1.clone().coset_lde(&worker, 4)?;
let z_1_shifted_lde = z_1_shifted.clone().coset_lde(&worker, 4)?;
let z_2_lde = z_2.clone().coset_lde(&worker, 4)?;
let z_2_shifted_lde = z_2_shifted.clone().coset_lde(&worker, 4)?;
{
// TODO: May be optimize number of additions
let mut contrib_z_1 = z_1_lde.clone();
let mut s_id_by_beta = s_id_lde;
s_id_by_beta.scale(&worker, beta);
let mut n_by_beta = n_fe;
n_by_beta.mul_assign(&beta);
let mut a_perm = s_id_by_beta.clone();
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_lde);
contrib_z_1.mul_assign(&worker, &a_perm);
drop(a_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut b_perm = s_id_by_beta.clone();
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_lde);
contrib_z_1.mul_assign(&worker, &b_perm);
drop(b_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut c_perm = s_id_by_beta;
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_lde);
contrib_z_1.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_1.sub_assign(&worker, &z_1_shifted_lde);
vanishing_poly_inverse.scale(&worker, alpha);
contrib_z_1.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &contrib_z_1);
}
{
// TODO: May be optimize number of additions
let mut contrib_z_2 = z_2_lde.clone();
let mut a_perm = sigma_1_lde;
a_perm.scale(&worker, beta);
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_lde);
contrib_z_2.mul_assign(&worker, &a_perm);
drop(a_perm);
let mut b_perm = sigma_2_lde;
b_perm.scale(&worker, beta);
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_lde);
contrib_z_2.mul_assign(&worker, &b_perm);
drop(b_perm);
let mut c_perm = sigma_3_lde;
c_perm.scale(&worker, beta);
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_lde);
contrib_z_2.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_2.sub_assign(&worker, &z_2_shifted_lde);
vanishing_poly_inverse.scale(&worker, alpha);
contrib_z_2.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &contrib_z_2);
}
drop(a_lde);
drop(b_lde);
drop(c_lde);
let l_0 = assembly.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), 0)?;
let l_n_minus_one = assembly.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), n-1)?;
{
let mut z_1_minus_z_2_shifted = z_1_shifted_lde.clone();
z_1_minus_z_2_shifted.sub_assign(&worker, &z_2_shifted_lde);
let l = l_n_minus_one.clone().coset_lde(&worker, 4)?;
z_1_minus_z_2_shifted.mul_assign(&worker, &l);
drop(l);
vanishing_poly_inverse.scale(&worker, alpha);
z_1_minus_z_2_shifted.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &z_1_minus_z_2_shifted);
}
{
let mut z_1_minus_z_2= z_1_lde.clone();
z_1_minus_z_2.sub_assign(&worker, &z_2_lde);
let l = l_0.clone().coset_lde(&worker, 4)?;
z_1_minus_z_2.mul_assign(&worker, &l);
drop(l);
vanishing_poly_inverse.scale(&worker, alpha);
z_1_minus_z_2.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &z_1_minus_z_2);
}
let t_poly = t_1.icoset_fft(&worker);
println!("End work with polynomials");
// let degree = get_degree::<E>(&t_poly);
// assert!(degree <= 3*n);
fn get_degree<E:Engine>(poly: &Polynomial<E::Fr, Coefficients>) -> usize {
let mut degree = poly.as_ref().len() - 1;
for c in poly.as_ref().iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break;
}
}
println!("Degree = {}", degree);
degree
}
let (t_commitment, t_aux) = large_committer.commit_single(&t_poly);
transcript.commit_input(&t_commitment);
let z = transcript.get_challenge();
// this is a sanity check
let a_at_z = a_poly.evaluate_at(&worker, z);
let b_at_z = b_poly.evaluate_at(&worker, z);
let c_at_z = c_poly.evaluate_at(&worker, z);
let q_l_at_z = q_l.evaluate_at(&worker, z);
let q_r_at_z = q_r.evaluate_at(&worker, z);
let q_o_at_z = q_o.evaluate_at(&worker, z);
let q_m_at_z = q_m.evaluate_at(&worker, z);
let q_c_at_z = q_c.evaluate_at(&worker, z);
let s_id_at_z = s_id.evaluate_at(&worker, z);
let sigma_1_at_z = sigma_1.evaluate_at(&worker, z);
let sigma_2_at_z = sigma_2.evaluate_at(&worker, z);
let sigma_3_at_z = sigma_3.evaluate_at(&worker, z);
let mut inverse_vanishing_at_z = assembly.evaluate_inverse_vanishing_poly(required_domain_size.next_power_of_two(), z);
let z_1_at_z = z_1.evaluate_at(&worker, z);
let z_2_at_z = z_2.evaluate_at(&worker, z);
let z_1_shifted_at_z = z_1_shifted.evaluate_at(&worker, z);
let z_2_shifted_at_z = z_2_shifted.evaluate_at(&worker, z);
let t_at_z = t_poly.evaluate_at(&worker, z);
let l_0_at_z = l_0.evaluate_at(&worker, z);
let l_n_minus_one_at_z = l_n_minus_one.evaluate_at(&worker, z);
{
transcript.commit_field_element(&a_at_z);
transcript.commit_field_element(&b_at_z);
transcript.commit_field_element(&c_at_z);
transcript.commit_field_element(&q_l_at_z);
transcript.commit_field_element(&q_r_at_z);
transcript.commit_field_element(&q_o_at_z);
transcript.commit_field_element(&q_m_at_z);
transcript.commit_field_element(&q_c_at_z);
transcript.commit_field_element(&s_id_at_z);
transcript.commit_field_element(&sigma_1_at_z);
transcript.commit_field_element(&sigma_2_at_z);
transcript.commit_field_element(&sigma_3_at_z);
transcript.commit_field_element(&t_at_z);
transcript.commit_field_element(&z_1_at_z);
transcript.commit_field_element(&z_2_at_z);
transcript.commit_field_element(&z_1_shifted_at_z);
transcript.commit_field_element(&z_2_shifted_at_z);
}
let aggregation_challenge = transcript.get_challenge();
// let shifted_opening_aggregation_challenge = transcript.get_challenge();
// this is a sanity check
{
let mut t_1 = {
let mut res = q_c_at_z;
let mut tmp = q_l_at_z;
tmp.mul_assign(&a_at_z);
res.add_assign(&tmp);
let mut tmp = q_r_at_z;
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
let mut tmp = q_o_at_z;
tmp.mul_assign(&c_at_z);
res.add_assign(&tmp);
let mut tmp = q_m_at_z;
tmp.mul_assign(&a_at_z);
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
res
};
{
let mut res = z_1_at_z;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_1_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_2_at_z;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_2_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_shifted_at_z;
res.sub_assign(&z_2_shifted_at_z);
res.mul_assign(&l_n_minus_one_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_at_z;
res.sub_assign(&z_2_at_z);
res.mul_assign(&l_0_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
assert_eq!(t_at_z, t_1, "sanity check failed");
}
// we do NOT compute linearization polynomial for non-homomorphic case
let mut z_by_omega = z;
z_by_omega.mul_assign(&z_1.omega);
let opening_polynomials = vec![
&a_poly,
&b_poly,
&c_poly,
&q_l,
&q_r,
&q_o,
&q_m,
&q_c,
&s_id,
&sigma_1,
&sigma_2,
&sigma_3,
&z_1,
&z_2,
&z_1,
&z_2,
];
let degrees: Vec<usize> = opening_polynomials.iter().map(|el| el.size()).collect();
let precomputations = Some(vec![
a_aux_data.as_ref().expect("is some"),
b_aux_data.as_ref().expect("is some"),
c_aux_data.as_ref().expect("is some"),
aux.q_l_aux.as_ref().expect("is some"),
aux.q_r_aux.as_ref().expect("is some"),
aux.q_o_aux.as_ref().expect("is some"),
aux.q_m_aux.as_ref().expect("is some"),
aux.q_c_aux.as_ref().expect("is some"),
aux.s_id_aux.as_ref().expect("is some"),
aux.sigma_1_aux.as_ref().expect("is some"),
aux.sigma_2_aux.as_ref().expect("is some"),
aux.sigma_3_aux.as_ref().expect("is some"),
z_1_aux.as_ref().expect("is some"),
z_2_aux.as_ref().expect("is some"),
z_1_aux.as_ref().expect("is some"),
z_2_aux.as_ref().expect("is some"),
]);
let opening_values = vec![
a_at_z,
b_at_z,
c_at_z,
q_l_at_z,
q_r_at_z,
q_o_at_z,
q_m_at_z,
q_c_at_z,
s_id_at_z,
sigma_1_at_z,
sigma_2_at_z,
sigma_3_at_z,
z_1_at_z,
z_2_at_z,
z_1_shifted_at_z,
z_2_shifted_at_z
];
let opening_points = vec![
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z_by_omega,
z_by_omega
];
let multiopen_proof = committer.open_multiple(
opening_polynomials,
degrees,
aggregation_challenge,
opening_points,
opening_values,
&precomputations,
&mut transcript
);
let t_opening_proof = large_committer.open_single(
&t_poly,
z,
t_at_z,
&t_aux.as_ref(),
&mut transcript
);
// let opening_polynomials = vec![
// &z_1,
// &z_2,
// ];
// let degrees: Vec<usize> = opening_polynomials.iter().map(|el| el.size()).collect();
// let precomputations = Some(vec![
// z_1_aux.as_ref().expect("is some"),
// z_2_aux.as_ref().expect("is some"),
// ]);
// let opening_values = vec![
// z_1_shifted_at_z,
// z_2_shifted_at_z
// ];
// let shifted_proof = committer.open_multiple(
// opening_polynomials,
// degrees,
// shifted_opening_aggregation_challenge,
// opening_point,
// opening_values,
// &precomputations,
// &mut transcript
// );
let proof = PlonkNonhomomorphicProof::<E, S> {
a_opening_value: a_at_z,
b_opening_value: b_at_z,
c_opening_value: c_at_z,
q_l_opening_value: q_l_at_z,
q_r_opening_value: q_r_at_z,
q_o_opening_value: q_o_at_z,
q_m_opening_value: q_m_at_z,
q_c_opening_value: q_c_at_z,
s_id_opening_value: s_id_at_z,
sigma_1_opening_value: sigma_1_at_z,
sigma_2_opening_value: sigma_2_at_z,
sigma_3_opening_value: sigma_3_at_z,
z_1_unshifted_opening_value: z_1_at_z,
z_2_unshifted_opening_value: z_2_at_z,
z_1_shifted_opening_value: z_1_shifted_at_z,
z_2_shifted_opening_value: z_2_shifted_at_z,
t_opening_value: t_at_z,
a_commitment: a_commitment,
b_commitment: b_commitment,
c_commitment: c_commitment,
z_1_commitment: z_1_commitment,
z_2_commitment: z_2_commitment,
t_commitment: t_commitment,
openings_proof: multiopen_proof,
// shifted_openings_proof: shifted_proof,
t_opening_proof: t_opening_proof,
};
Ok(proof)
}
pub fn prove_nonhomomorphic_chunked<E: Engine, S: CommitmentScheme<E::Fr, Prng = T>, T: Transcript<E::Fr, Input = S::Commitment>, C: Circuit<E>>(
circuit: &C,
aux: &PlonkSetupAuxData<E, S>,
meta: S::Meta,
) -> Result<PlonkChunkedNonhomomorphicProof<E, S>, SynthesisError> {
assert!(S::IS_HOMOMORPHIC == false);
let mut assembly = ProvingAssembly::<E>::new();
circuit.synthesize(&mut assembly)?;
assembly.finalize();
let num_gates = assembly.num_gates();
let committer = S::new_for_size(num_gates.next_power_of_two(), meta);
let worker = Worker::new();
let mut transcript = T::new();
let n = assembly.input_gates.len() + assembly.aux_gates.len();
// we need n+1 to be a power of two and can not have n to be power of two
let required_domain_size = n + 1;
assert!(required_domain_size.is_power_of_two());
println!("Start work with polynomials");
let (w_l, w_r, w_o) = assembly.make_wire_assingments();
let w_l = Polynomial::<E::Fr, Values>::from_values_unpadded(w_l)?;
let w_r = Polynomial::<E::Fr, Values>::from_values_unpadded(w_r)?;
let w_o = Polynomial::<E::Fr, Values>::from_values_unpadded(w_o)?;
let a_poly = w_l.clone_padded_to_domain()?.ifft(&worker);
let b_poly = w_r.clone_padded_to_domain()?.ifft(&worker);
let c_poly = w_o.clone_padded_to_domain()?.ifft(&worker);
let (a_commitment, a_aux_data) = committer.commit_single(&a_poly);
let (b_commitment, b_aux_data) = committer.commit_single(&b_poly);
let (c_commitment, c_aux_data) = committer.commit_single(&c_poly);
transcript.commit_input(&a_commitment);
transcript.commit_input(&b_commitment);
transcript.commit_input(&c_commitment);
// TODO: Add public inputs
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
let mut w_l_plus_gamma = w_l.clone();
w_l_plus_gamma.add_constant(&worker, &gamma);
let mut w_r_plus_gamma = w_r.clone();
w_r_plus_gamma.add_constant(&worker, &gamma);
let mut w_o_plus_gamma = w_o.clone();
w_o_plus_gamma.add_constant(&worker, &gamma);
let z_1 = {
let n = assembly.input_gates.len() + assembly.aux_gates.len();
let s_id_1: Vec<_> = (1..=n).collect();
let s_id_1 = convert_to_field_elements(&s_id_1, &worker);
let s_id_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &s_id_1, &beta);
drop(s_id_1);
let s_id_2: Vec<_> = ((n+1)..=(2*n)).collect();
let s_id_2 = convert_to_field_elements(&s_id_2, &worker);
let s_id_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &s_id_2, &beta);
drop(s_id_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let s_id_3: Vec<_> = ((2*n+1)..=(3*n)).collect();
let s_id_3 = convert_to_field_elements(&s_id_3, &worker);
let s_id_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &s_id_3, &beta);
drop(s_id_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
Polynomial::<E::Fr, Values>::from_values(prepadded)?
};
let z_2 = {
let (sigma_1, sigma_2, sigma_3) = assembly.calculate_permutations_as_in_a_paper();
let sigma_1 = convert_to_field_elements(&sigma_1, &worker);
let sigma_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &sigma_1, &beta);
drop(sigma_1);
let sigma_2 = convert_to_field_elements(&sigma_2, &worker);
let sigma_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &sigma_2, &beta);
drop(sigma_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let sigma_3 = convert_to_field_elements(&sigma_3, &worker);
let sigma_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &sigma_3, &beta);
drop(sigma_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
let z_2 = Polynomial::<E::Fr, Values>::from_values(prepadded)?;
z_2
};
let z_1 = z_1.ifft(&worker);
let z_2 = z_2.ifft(&worker);
let (z_1_commitment, z_1_aux) = committer.commit_single(&z_1);
let (z_2_commitment, z_2_aux) = committer.commit_single(&z_2);
transcript.commit_input(&z_1_commitment);
transcript.commit_input(&z_2_commitment);
let mut z_1_shifted = z_1.clone();
z_1_shifted.distribute_powers(&worker, z_1.omega);
let mut z_2_shifted = z_2.clone();
z_2_shifted.distribute_powers(&worker, z_2.omega);
let a_lde = a_poly.clone().coset_lde(&worker, 4)?;
let b_lde = b_poly.clone().coset_lde(&worker, 4)?;
let c_lde = c_poly.clone().coset_lde(&worker, 4)?;
let (q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3) = assembly.output_setup_polynomials(&worker)?;
let q_l_lde = q_l.clone().coset_lde(&worker, 4)?;
let q_r_lde = q_r.clone().coset_lde(&worker, 4)?;
let q_o_lde = q_o.clone().coset_lde(&worker, 4)?;
let q_m_lde = q_m.clone().coset_lde(&worker, 4)?;
let q_c_lde = q_c.clone().coset_lde(&worker, 4)?;
let s_id_lde = s_id.clone().coset_lde(&worker, 4)?;
let sigma_1_lde = sigma_1.clone().coset_lde(&worker, 4)?;
let sigma_2_lde = sigma_2.clone().coset_lde(&worker, 4)?;
let sigma_3_lde = sigma_3.clone().coset_lde(&worker, 4)?;
// we do not commit those cause those are known already
let n_fe = E::Fr::from_str(&n.to_string()).expect("must be valid field element");
let mut two_n_fe = n_fe;
two_n_fe.double();
let alpha = transcript.get_challenge();
let mut vanishing_poly_inverse = assembly.calculate_inverse_vanishing_polynomial_in_a_coset(&worker, q_c_lde.size(), required_domain_size.next_power_of_two())?;
let mut t_1 = {
let mut t_1 = q_c_lde;
let mut q_l_by_a = q_l_lde;
q_l_by_a.mul_assign(&worker, &a_lde);
t_1.add_assign(&worker, &q_l_by_a);
drop(q_l_by_a);
let mut q_r_by_b = q_r_lde;
q_r_by_b.mul_assign(&worker, &b_lde);
t_1.add_assign(&worker, &q_r_by_b);
drop(q_r_by_b);
let mut q_o_by_c = q_o_lde;
q_o_by_c.mul_assign(&worker, &c_lde);
t_1.add_assign(&worker, &q_o_by_c);
drop(q_o_by_c);
let mut q_m_by_ab = q_m_lde;
q_m_by_ab.mul_assign(&worker, &a_lde);
q_m_by_ab.mul_assign(&worker, &b_lde);
t_1.add_assign(&worker, &q_m_by_ab);
drop(q_m_by_ab);
vanishing_poly_inverse.scale(&worker, alpha);
t_1.mul_assign(&worker, &vanishing_poly_inverse);
t_1
};
let z_1_lde = z_1.clone().coset_lde(&worker, 4)?;
let z_1_shifted_lde = z_1_shifted.clone().coset_lde(&worker, 4)?;
let z_2_lde = z_2.clone().coset_lde(&worker, 4)?;
let z_2_shifted_lde = z_2_shifted.clone().coset_lde(&worker, 4)?;
{
// TODO: May be optimize number of additions
let mut contrib_z_1 = z_1_lde.clone();
let mut s_id_by_beta = s_id_lde;
s_id_by_beta.scale(&worker, beta);
let mut n_by_beta = n_fe;
n_by_beta.mul_assign(&beta);
let mut a_perm = s_id_by_beta.clone();
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_lde);
contrib_z_1.mul_assign(&worker, &a_perm);
drop(a_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut b_perm = s_id_by_beta.clone();
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_lde);
contrib_z_1.mul_assign(&worker, &b_perm);
drop(b_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut c_perm = s_id_by_beta;
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_lde);
contrib_z_1.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_1.sub_assign(&worker, &z_1_shifted_lde);
vanishing_poly_inverse.scale(&worker, alpha);
contrib_z_1.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &contrib_z_1);
}
{
// TODO: May be optimize number of additions
let mut contrib_z_2 = z_2_lde.clone();
let mut a_perm = sigma_1_lde;
a_perm.scale(&worker, beta);
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_lde);
contrib_z_2.mul_assign(&worker, &a_perm);
drop(a_perm);
let mut b_perm = sigma_2_lde;
b_perm.scale(&worker, beta);
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_lde);
contrib_z_2.mul_assign(&worker, &b_perm);
drop(b_perm);
let mut c_perm = sigma_3_lde;
c_perm.scale(&worker, beta);
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_lde);
contrib_z_2.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_2.sub_assign(&worker, &z_2_shifted_lde);
vanishing_poly_inverse.scale(&worker, alpha);
contrib_z_2.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &contrib_z_2);
}
drop(a_lde);
drop(b_lde);
drop(c_lde);
let l_0 = assembly.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), 0)?;
let l_n_minus_one = assembly.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), n-1)?;
{
let mut z_1_minus_z_2_shifted = z_1_shifted_lde.clone();
z_1_minus_z_2_shifted.sub_assign(&worker, &z_2_shifted_lde);
let l = l_n_minus_one.clone().coset_lde(&worker, 4)?;
z_1_minus_z_2_shifted.mul_assign(&worker, &l);
drop(l);
vanishing_poly_inverse.scale(&worker, alpha);
z_1_minus_z_2_shifted.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &z_1_minus_z_2_shifted);
}
{
let mut z_1_minus_z_2= z_1_lde.clone();
z_1_minus_z_2.sub_assign(&worker, &z_2_lde);
let l = l_0.clone().coset_lde(&worker, 4)?;
z_1_minus_z_2.mul_assign(&worker, &l);
drop(l);
vanishing_poly_inverse.scale(&worker, alpha);
z_1_minus_z_2.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &z_1_minus_z_2);
}
let t_poly = t_1.icoset_fft(&worker);
println!("End work with polynomials");
let mut t_poly_parts = t_poly.break_into_multiples(required_domain_size)?;
let last = t_poly_parts.pop().expect("last part is irrelevant");
for el in last.as_ref().iter() {
if !el.is_zero() {
panic!("T poly degree is too large");
}
}
let t_poly_high = t_poly_parts.pop().expect("high exists");
let t_poly_mid = t_poly_parts.pop().expect("mid exists");
let t_poly_low = t_poly_parts.pop().expect("low exists");
let (t_low_commitment, t_low_aux) = committer.commit_single(&t_poly_low);
let (t_mid_commitment, t_mid_aux) = committer.commit_single(&t_poly_mid);
let (t_high_commitment, t_high_aux) = committer.commit_single(&t_poly_high);
transcript.commit_input(&t_low_commitment);
transcript.commit_input(&t_mid_commitment);
transcript.commit_input(&t_high_commitment);
let z = transcript.get_challenge();
let a_at_z = a_poly.evaluate_at(&worker, z);
let b_at_z = b_poly.evaluate_at(&worker, z);
let c_at_z = c_poly.evaluate_at(&worker, z);
let q_l_at_z = q_l.evaluate_at(&worker, z);
let q_r_at_z = q_r.evaluate_at(&worker, z);
let q_o_at_z = q_o.evaluate_at(&worker, z);
let q_m_at_z = q_m.evaluate_at(&worker, z);
let q_c_at_z = q_c.evaluate_at(&worker, z);
let s_id_at_z = s_id.evaluate_at(&worker, z);
let sigma_1_at_z = sigma_1.evaluate_at(&worker, z);
let sigma_2_at_z = sigma_2.evaluate_at(&worker, z);
let sigma_3_at_z = sigma_3.evaluate_at(&worker, z);
let mut inverse_vanishing_at_z = assembly.evaluate_inverse_vanishing_poly(required_domain_size.next_power_of_two(), z);
let z_1_at_z = z_1.evaluate_at(&worker, z);
let z_2_at_z = z_2.evaluate_at(&worker, z);
let z_1_shifted_at_z = z_1_shifted.evaluate_at(&worker, z);
let z_2_shifted_at_z = z_2_shifted.evaluate_at(&worker, z);
let t_low_at_z = t_poly_low.evaluate_at(&worker, z);
let t_mid_at_z = t_poly_mid.evaluate_at(&worker, z);
let t_high_at_z = t_poly_high.evaluate_at(&worker, z);
let l_0_at_z = l_0.evaluate_at(&worker, z);
let l_n_minus_one_at_z = l_n_minus_one.evaluate_at(&worker, z);
{
transcript.commit_field_element(&a_at_z);
transcript.commit_field_element(&b_at_z);
transcript.commit_field_element(&c_at_z);
transcript.commit_field_element(&q_l_at_z);
transcript.commit_field_element(&q_r_at_z);
transcript.commit_field_element(&q_o_at_z);
transcript.commit_field_element(&q_m_at_z);
transcript.commit_field_element(&q_c_at_z);
transcript.commit_field_element(&s_id_at_z);
transcript.commit_field_element(&sigma_1_at_z);
transcript.commit_field_element(&sigma_2_at_z);
transcript.commit_field_element(&sigma_3_at_z);
transcript.commit_field_element(&t_low_at_z);
transcript.commit_field_element(&t_mid_at_z);
transcript.commit_field_element(&t_high_at_z);
transcript.commit_field_element(&z_1_at_z);
transcript.commit_field_element(&z_2_at_z);
transcript.commit_field_element(&z_1_shifted_at_z);
transcript.commit_field_element(&z_2_shifted_at_z);
}
let aggregation_challenge = transcript.get_challenge();
let z_in_pow_of_domain_size = z.pow([required_domain_size as u64]);
// this is a sanity check
{
let mut t_1 = {
let mut res = q_c_at_z;
let mut tmp = q_l_at_z;
tmp.mul_assign(&a_at_z);
res.add_assign(&tmp);
let mut tmp = q_r_at_z;
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
let mut tmp = q_o_at_z;
tmp.mul_assign(&c_at_z);
res.add_assign(&tmp);
let mut tmp = q_m_at_z;
tmp.mul_assign(&a_at_z);
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
res
};
{
let mut res = z_1_at_z;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_1_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_2_at_z;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_2_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_shifted_at_z;
res.sub_assign(&z_2_shifted_at_z);
res.mul_assign(&l_n_minus_one_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_at_z;
res.sub_assign(&z_2_at_z);
res.mul_assign(&l_0_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
let mut t_at_z = E::Fr::zero();
t_at_z.add_assign(&t_low_at_z);
let mut tmp = z_in_pow_of_domain_size;
tmp.mul_assign(&t_mid_at_z);
t_at_z.add_assign(&tmp);
let mut tmp = z_in_pow_of_domain_size;
tmp.mul_assign(&z_in_pow_of_domain_size);
tmp.mul_assign(&t_high_at_z);
t_at_z.add_assign(&tmp);
assert_eq!(t_at_z, t_1, "sanity check failed");
}
// we do NOT compute linearization polynomial for non-homomorphic case
let mut z_by_omega = z;
z_by_omega.mul_assign(&z_1.omega);
let opening_polynomials = vec![
&a_poly,
&b_poly,
&c_poly,
&q_l,
&q_r,
&q_o,
&q_m,
&q_c,
&s_id,
&sigma_1,
&sigma_2,
&sigma_3,
&z_1,
&z_2,
&z_1,
&z_2,
&t_poly_low,
&t_poly_mid,
&t_poly_high
];
let degrees: Vec<usize> = opening_polynomials.iter().map(|el| el.size()).collect();
let precomputations = Some(vec![
a_aux_data.as_ref().expect("is some"),
b_aux_data.as_ref().expect("is some"),
c_aux_data.as_ref().expect("is some"),
aux.q_l_aux.as_ref().expect("is some"),
aux.q_r_aux.as_ref().expect("is some"),
aux.q_o_aux.as_ref().expect("is some"),
aux.q_m_aux.as_ref().expect("is some"),
aux.q_c_aux.as_ref().expect("is some"),
aux.s_id_aux.as_ref().expect("is some"),
aux.sigma_1_aux.as_ref().expect("is some"),
aux.sigma_2_aux.as_ref().expect("is some"),
aux.sigma_3_aux.as_ref().expect("is some"),
z_1_aux.as_ref().expect("is some"),
z_2_aux.as_ref().expect("is some"),
z_1_aux.as_ref().expect("is some"),
z_2_aux.as_ref().expect("is some"),
t_low_aux.as_ref().expect("is some"),
t_mid_aux.as_ref().expect("is some"),
t_high_aux.as_ref().expect("is some"),
]);
let opening_values = vec![
a_at_z,
b_at_z,
c_at_z,
q_l_at_z,
q_r_at_z,
q_o_at_z,
q_m_at_z,
q_c_at_z,
s_id_at_z,
sigma_1_at_z,
sigma_2_at_z,
sigma_3_at_z,
z_1_at_z,
z_2_at_z,
z_1_shifted_at_z,
z_2_shifted_at_z,
t_low_at_z,
t_mid_at_z,
t_high_at_z,
];
let opening_points = vec![
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z,
z_by_omega,
z_by_omega,
z,
z,
z,
];
let multiopen_proof = committer.open_multiple(
opening_polynomials,
degrees,
aggregation_challenge,
opening_points,
opening_values,
&precomputations,
&mut transcript
);
let proof = PlonkChunkedNonhomomorphicProof::<E, S> {
a_opening_value: a_at_z,
b_opening_value: b_at_z,
c_opening_value: c_at_z,
q_l_opening_value: q_l_at_z,
q_r_opening_value: q_r_at_z,
q_o_opening_value: q_o_at_z,
q_m_opening_value: q_m_at_z,
q_c_opening_value: q_c_at_z,
s_id_opening_value: s_id_at_z,
sigma_1_opening_value: sigma_1_at_z,
sigma_2_opening_value: sigma_2_at_z,
sigma_3_opening_value: sigma_3_at_z,
z_1_unshifted_opening_value: z_1_at_z,
z_2_unshifted_opening_value: z_2_at_z,
z_1_shifted_opening_value: z_1_shifted_at_z,
z_2_shifted_opening_value: z_2_shifted_at_z,
t_low_opening_value: t_low_at_z,
t_mid_opening_value: t_mid_at_z,
t_high_opening_value: t_high_at_z,
a_commitment: a_commitment,
b_commitment: b_commitment,
c_commitment: c_commitment,
z_1_commitment: z_1_commitment,
z_2_commitment: z_2_commitment,
t_low_commitment: t_low_commitment,
t_mid_commitment: t_mid_commitment,
t_high_commitment: t_high_commitment,
openings_proof: multiopen_proof,
};
Ok(proof)
}
#[cfg(test)]
mod test {
use super::*;
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
struct TestCircuit<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for TestCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| {
Ok(E::Fr::from_str("10").unwrap())
})?;
println!("A = {:?}", a);
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
println!("B = {:?}", b);
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
println!("C = {:?}", c);
let one = E::Fr::one();
let mut two = one;
two.double();
let mut negative_one = one;
negative_one.negate();
cs.enforce_zero_2((a, b), (two, negative_one))?;
let ten = E::Fr::from_str("10").unwrap();
cs.enforce_zero_2((b, c), (ten, negative_one))?;
cs.enforce_mul_3((a, b, c))?;
Ok(())
}
}
#[test]
fn test_trivial_circuit() {
use crate::pairing::bn256::{Bn256, Fr};
let mut assembly = ProvingAssembly::<Bn256>::new();
let circuit = TestCircuit::<Bn256> {
_marker: PhantomData
};
circuit.synthesize(&mut assembly).expect("must work");
println!("{:?}", assembly);
assembly.finalize();
let (f_l, f_r, f_o) = assembly.make_wire_assingments();
let (sigma_1, sigma_2, sigma_3) = assembly.calculate_permutations_as_in_a_paper();
let num_gates = assembly.num_gates();
let id_1: Vec<_> = (1..=num_gates).collect();
let id_2: Vec<_> = ((num_gates+1)..=(2*num_gates)).collect();
let id_3: Vec<_> = ((2*num_gates + 1)..=(3*num_gates)).collect();
let beta = Fr::from_str("15").unwrap();
let gamma = Fr::from_str("4").unwrap();
let mut f_1_poly = vec![];
let mut g_1_poly = vec![];
for (i, el) in f_l.iter().enumerate() {
let mut tmp = Fr::from_str(&id_1[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
f_1_poly.push(tmp);
}
for (i, el) in f_l.iter().enumerate() {
let mut tmp = Fr::from_str(&sigma_1[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
g_1_poly.push(tmp);
}
let mut f_2_poly = vec![];
let mut g_2_poly = vec![];
for (i, el) in f_r.iter().enumerate() {
let mut tmp = Fr::from_str(&id_2[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
f_2_poly.push(tmp);
}
for (i, el) in f_r.iter().enumerate() {
let mut tmp = Fr::from_str(&sigma_2[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
g_2_poly.push(tmp);
}
let mut f_3_poly = vec![];
let mut g_3_poly = vec![];
for (i, el) in f_o.iter().enumerate() {
let mut tmp = Fr::from_str(&id_3[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
f_3_poly.push(tmp);
}
for (i, el) in f_o.iter().enumerate() {
let mut tmp = Fr::from_str(&sigma_3[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
g_3_poly.push(tmp);
}
let mut f_poly = vec![];
let mut g_poly = vec![];
for i in 0..f_1_poly.len() {
let mut tmp = f_1_poly[i];
tmp.mul_assign(&f_2_poly[i]);
tmp.mul_assign(&f_3_poly[i]);
f_poly.push(tmp);
}
for i in 0..g_1_poly.len() {
let mut tmp = g_1_poly[i];
tmp.mul_assign(&g_2_poly[i]);
tmp.mul_assign(&g_3_poly[i]);
g_poly.push(tmp);
}
let mut tmp = Fr::one();
let mut f_prime = vec![tmp];
for el in f_poly.iter() {
tmp.mul_assign(&el);
f_prime.push(tmp);
}
let mut tmp = Fr::one();
let mut g_prime = vec![tmp];
for el in g_poly.iter() {
tmp.mul_assign(&el);
g_prime.push(tmp);
}
assert!(f_prime[0] == g_prime[0]);
assert!(f_prime[num_gates] == g_prime[num_gates]);
let worker = Worker::new();
let _ = assembly.output_setup_polynomials(&worker).unwrap();
}
}<file_sep>/src/constants.rs
pub const ETH_BLOCK_10_000_000_HASH: &'static str
= "aa20f7bde5be60603f11a45fc4923aab7552be775403fc00c2e6b805e6297dbe";
use crate::pairing::{Engine, CurveProjective};
use crate::byteorder::{BigEndian, ReadBytesExt};
pub fn make_random_points_with_unknown_discrete_log_from_seed<E: Engine>(
dst: &[u8],
seed: &[u8],
num_points: usize
) -> Vec<E::G1Affine> {
let mut result = vec![];
use rand::{Rng, SeedableRng};
use rand::chacha::ChaChaRng;
// Create an RNG based on the outcome of the random beacon
let mut rng = {
// if we use Blake hasher
let input: Vec<u8> = dst.iter().chain(seed.iter()).cloned().collect();
let h = blake2s_simd::blake2s(&input);
assert!(h.as_bytes().len() == 32);
let mut seed = [0u32; 8];
for (i, chunk) in h.as_bytes().chunks_exact(8).enumerate() {
seed[i] = (&chunk[..]).read_u32::<BigEndian>().expect("digest is large enough for this to work");
}
ChaChaRng::from_seed(&seed)
};
for _ in 0..num_points {
let point: E::G1 = rng.gen();
result.push(point.into_affine());
}
result
}
pub fn make_random_points_with_unknown_discrete_log<E: Engine>(
dst: &[u8],
num_points: usize
) -> Vec<E::G1Affine> {
make_random_points_with_unknown_discrete_log_from_seed::<E>(
dst,
&hex::decode(crate::constants::ETH_BLOCK_10_000_000_HASH).unwrap(),
num_points
)
}
<file_sep>/src/sonic/tests/sonics.rs
extern crate rand;
// For randomness (during paramgen and proof generation)
use rand::{thread_rng, Rng};
// For benchmarking
use std::time::{Duration, Instant};
// Bring in some tools for using pairing-friendly curves
use crate::pairing::{
Engine
};
use crate::pairing::ff::{
Field,
};
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
use crate::pairing::bls12_381::{
Bls12
};
use crate::pairing::bn256::{
Bn256
};
// We'll use these interfaces to construct our circuit.
use crate::{
Circuit,
ConstraintSystem,
SynthesisError
};
// const MIMC_ROUNDS: usize = 322;
const MIMC_ROUNDS: usize = 1000000;
fn mimc<E: Engine>(
mut xl: E::Fr,
mut xr: E::Fr,
constants: &[E::Fr]
) -> E::Fr
{
assert_eq!(constants.len(), MIMC_ROUNDS);
for i in 0..MIMC_ROUNDS {
let mut tmp1 = xl;
tmp1.add_assign(&constants[i]);
let mut tmp2 = tmp1;
tmp2.square();
tmp2.mul_assign(&tmp1);
tmp2.add_assign(&xr);
xr = xl;
xl = tmp2;
}
xl
}
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
#[derive(Clone)]
struct MiMCDemo<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
constants: &'a [E::Fr]
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(|| "preimage xl", || {
xl_value.ok_or(SynthesisError::AssignmentMissing)
})?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(|| "preimage xr", || {
xr_value.ok_or(SynthesisError::AssignmentMissing)
})?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square();
e
});
let tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp
);
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let new_xl = if i == (MIMC_ROUNDS-1) {
// This is the last round, xL is our image and so
// we allocate a public input.
cs.alloc_input(|| "image", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
} else {
cs.alloc(|| "new_xl", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr
);
// xR = xL
xr = xl;
xr_value = xl_value;
// xL = new_xL
xl = new_xl;
xl_value = new_xl_value;
}
Ok(())
}
}
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
#[derive(Clone)]
struct MiMCDemoNoInputs<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
image: Option<E::Fr>,
constants: &'a [E::Fr]
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for MiMCDemoNoInputs<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(|| "preimage xl", || {
xl_value.ok_or(SynthesisError::AssignmentMissing)
})?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(|| "preimage xr", || {
xr_value.ok_or(SynthesisError::AssignmentMissing)
})?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square();
e
});
let tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp
);
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let new_xl = if i == (MIMC_ROUNDS-1) {
// This is the last round, xL is our image and so
// we use the image
let image_value = self.image;
cs.alloc(|| "image", || {
image_value.ok_or(SynthesisError::AssignmentMissing)
})?
} else {
cs.alloc(|| "new_xl", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr
);
// xR = xL
xr = xl;
xr_value = xl_value;
// xL = new_xL
xl = new_xl;
xl_value = new_xl_value;
}
Ok(())
}
}
#[test]
fn test_sonic_mimc() {
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bls12>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemoNoInputs {
xl: Some(xl),
xr: Some(xr),
image: Some(image),
constants: &constants
};
use crate::sonic::sonic::Basic;
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use crate::sonic::helped::{MultiVerifier, get_circuit_parameters};
use crate::sonic::helped::helper::{create_aggregate_on_srs};
println!("creating proof");
let start = Instant::now();
let proof = create_proof_on_srs::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice_on_srs::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate_on_srs::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 100 proofs with advice");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
#[test]
fn test_sonic_mimc_in_permutation_driver() {
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bls12>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemoNoInputs {
xl: Some(xl),
xr: Some(xr),
image: Some(image),
constants: &constants
};
use crate::sonic::sonic::Basic;
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use crate::sonic::helped::{MultiVerifier, get_circuit_parameters};
use crate::sonic::helped::helper::{create_aggregate_on_srs};
use crate::sonic::sonic::Permutation3;
println!("creating proof");
let start = Instant::now();
let proof = create_proof_on_srs::<Bls12, _, Permutation3>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice_on_srs::<Bls12, _, Permutation3>(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate_on_srs::<Bls12, _, Permutation3>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Permutation3, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Permutation3, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Permutation3, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 100 proofs with advice");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
#[test]
fn test_succinct_sonic_mimc() {
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
// let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let srs = SRS::<Bls12>::dummy(40000000, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// Generate the MiMC round constants
// let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let constants = (0..MIMC_ROUNDS).map(|_| Fr::one()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bls12>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemoNoInputs::<Bls12> {
xl: Some(xl),
xr: Some(xr),
image: Some(image),
constants: &constants
};
use crate::sonic::sonic::Basic;
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use crate::sonic::helped::{get_circuit_parameters_for_succinct_sonic, MultiVerifier};
use crate::sonic::sonic::Permutation3;
use crate::sonic::unhelped::permutation_structure::*;
use crate::sonic::unhelped::SuccinctMultiVerifier;
use crate::sonic::unhelped::{create_aggregate_on_srs};
use crate::sonic::cs::{Circuit, ConstraintSystem, LinearCombination, Coeff};
let perm_structure = create_permutation_structure::<Bls12, _>(&AdaptorCircuit(circuit.clone()));
let s1_srs = perm_structure.create_permutation_special_reference(&srs);
// let s2_srs = perm_structure.calculate_s2_commitment_value(&srs);
let info = get_circuit_parameters_for_succinct_sonic::<Bls12, _>(circuit.clone()).expect("Must get circuit info");
println!("{:?}", info);
println!("creating proof");
let start = Instant::now();
let proof = create_proof_on_srs::<Bls12, _, Permutation3>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice_on_srs::<Bls12, _, Permutation3>(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate_on_srs::<Bls12, _, Permutation3>(&AdaptorCircuit(circuit.clone()), &proofs, &srs, &s1_srs);
println!("done in {:?}", start.elapsed());
// {
// let rng = thread_rng();
// let mut verifier = MultiVerifier::<Bls12, _, Permutation3, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
// println!("verifying 1 proof without advice");
// let start = Instant::now();
// {
// for _ in 0..1 {
// verifier.add_proof(&proof, &[], |_, _| None);
// }
// assert_eq!(verifier.check_all(), true); // TODO
// }
// println!("done in {:?}", start.elapsed());
// }
// {
// let rng = thread_rng();
// let mut verifier = MultiVerifier::<Bls12, _, Permutation3, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
// println!("verifying {} proofs without advice", samples);
// let start = Instant::now();
// {
// for _ in 0..samples {
// verifier.add_proof(&proof, &[], |_, _| None);
// }
// assert_eq!(verifier.check_all(), true); // TODO
// }
// println!("done in {:?}", start.elapsed());
// }
{
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut verifier = SuccinctMultiVerifier::<Bls12, _, Permutation3, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 100 proofs with succinct advice");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[], advice);
}
verifier.add_aggregate(
&proofs,
&aggregate,
&srs,
);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
#[test]
fn test_inputs_into_sonic_mimc() {
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::pairing::bn256::{Bn256, Fr};
// use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bn256>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bn256>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
};
use crate::sonic::sonic::Basic;
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use crate::sonic::helped::{MultiVerifier, get_circuit_parameters};
use crate::sonic::helped::helper::{create_aggregate_on_srs};
let info = get_circuit_parameters::<Bn256, _>(circuit.clone()).expect("Must get circuit info");
println!("{:?}", info);
println!("creating proof");
let start = Instant::now();
let proof = create_proof_on_srs::<Bn256, _, Basic>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice_on_srs::<Bn256, _, Basic>(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate_on_srs::<Bn256, _, Basic>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bn256, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[image], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bn256, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[image], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bn256, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 100 proofs with advice and aggregate");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[image], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
#[test]
fn test_high_level_sonic_api() {
use crate::pairing::bn256::{Bn256};
use std::time::{Instant};
use crate::sonic::helped::{
generate_random_parameters,
verify_aggregate,
verify_proofs,
create_proof,
create_advice,
create_aggregate,
get_circuit_parameters
};
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let mut rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bn256>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
};
let info = get_circuit_parameters::<Bn256, _>(circuit.clone()).expect("Must get circuit info");
println!("{:?}", info);
let params = generate_random_parameters(circuit.clone(), &mut rng).unwrap();
println!("creating proof");
let start = Instant::now();
let proof = create_proof(circuit.clone(), ¶ms).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice(circuit.clone(), &proof, ¶ms).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let start = Instant::now();
let aggregate = create_aggregate::<Bn256, _>(circuit.clone(), &proofs, ¶ms);
println!("done in {:?}", start.elapsed());
{
println!("verifying 1 proof without advice");
let rng = thread_rng();
let start = Instant::now();
assert_eq!(verify_proofs(&vec![proof.clone()], &vec![vec![image.clone()]], circuit.clone(), rng, ¶ms).unwrap(), true);
println!("done in {:?}", start.elapsed());
}
{
println!("verifying {} proofs without advice", samples);
let rng = thread_rng();
let start = Instant::now();
assert_eq!(verify_proofs(&vec![proof.clone(); 100], &vec![vec![image.clone()]; 100], circuit.clone(), rng, ¶ms).unwrap(), true);
println!("done in {:?}", start.elapsed());
}
{
println!("verifying 100 proofs with advice and aggregate");
let rng = thread_rng();
let start = Instant::now();
assert_eq!(verify_aggregate(&vec![(proof.clone(), advice.clone()); 100], &aggregate, &vec![vec![image.clone()]; 100], circuit.clone(), rng, ¶ms).unwrap(), true);
println!("done in {:?}", start.elapsed());
}
}
}
// #[test]
// fn test_constraints_info() {
// use crate::pairing::bn256::{Bn256};
// use std::time::{Instant};
// use crate::sonic::unhelped::padding::{constraints_info};
// {
// // This may not be cryptographically safe, use
// // `OsRng` (for example) in production software.
// let mut rng = &mut thread_rng();
// // Generate the MiMC round constants
// let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
// let xl = rng.gen();
// let xr = rng.gen();
// let image = mimc::<Bn256>(xl, xr, &constants);
// // Create an instance of our circuit (with the
// // witness)
// let circuit = MiMCDemo {
// xl: Some(xl),
// xr: Some(xr),
// constants: &constants
// };
// constraints_info::<Bn256, _>(circuit.clone());
// }
// }
// #[test]
// fn test_padding_using_mimc() {
// use crate::pairing::ff::{Field, PrimeField};
// use crate::pairing::{Engine, CurveAffine, CurveProjective};
// use crate::pairing::bls12_381::{Bls12, Fr};
// use std::time::{Instant};
// use crate::sonic::srs::SRS;
// let srs_x = Fr::from_str("23923").unwrap();
// let srs_alpha = Fr::from_str("23728792").unwrap();
// println!("making srs");
// let start = Instant::now();
// let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
// println!("done in {:?}", start.elapsed());
// {
// // This may not be cryptographically safe, use
// // `OsRng` (for example) in production software.
// let rng = &mut thread_rng();
// // Generate the MiMC round constants
// let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
// let samples: usize = 100;
// let xl = rng.gen();
// let xr = rng.gen();
// let image = mimc::<Bls12>(xl, xr, &constants);
// // Create an instance of our circuit (with the
// // witness)
// let circuit = MiMCDemoNoInputs {
// xl: Some(xl),
// xr: Some(xr),
// image: Some(image),
// constants: &constants
// };
// use crate::sonic::cs::Basic;
// use crate::sonic::sonic::AdaptorCircuit;
// use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
// use crate::sonic::helped::{MultiVerifier, get_circuit_parameters};
// use crate::sonic::helped::helper::{create_aggregate_on_srs};
// use crate::sonic::unhelped::padding::Padding;
// let info = get_circuit_parameters::<Bls12, _>(circuit.clone()).expect("Must get circuit info");
// println!("{:?}", info);
// println!("creating proof");
// let start = Instant::now();
// let proof = create_proof_on_srs::<Bls12, _, Padding>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
// println!("done in {:?}", start.elapsed());
// {
// let rng = thread_rng();
// let mut verifier = MultiVerifier::<Bls12, _, Padding, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
// println!("K map = {:?}", verifier.get_k_map());
// println!("verifying 1 proof without advice");
// let start = Instant::now();
// {
// for _ in 0..1 {
// verifier.add_proof(&proof, &[], |_, _| None);
// }
// assert_eq!(verifier.check_all(), true); // TODO
// }
// println!("done in {:?}", start.elapsed());
// }
// }
// }<file_sep>/src/marlin/mod.rs
use crate::pairing::Engine;
use crate::plonk::polynomials::*;
pub mod generator;
pub mod prover;
pub struct IndexedSetup<E: Engine> {
pub a_num_non_zero: usize,
pub b_num_non_zero: usize,
pub c_num_non_zero: usize,
pub domain_h_size: usize,
pub domain_k_size: usize,
pub a_matrix_poly: Polynomial<E::Fr, Coefficients>,
pub b_matrix_poly: Polynomial<E::Fr, Coefficients>,
pub c_matrix_poly: Polynomial<E::Fr, Coefficients>,
pub a_row_poly: Polynomial<E::Fr, Coefficients>,
pub b_row_poly: Polynomial<E::Fr, Coefficients>,
pub c_row_poly: Polynomial<E::Fr, Coefficients>,
pub a_col_poly: Polynomial<E::Fr, Coefficients>,
pub b_col_poly: Polynomial<E::Fr, Coefficients>,
pub c_col_poly: Polynomial<E::Fr, Coefficients>,
pub a_row_indexes: Vec<usize>,
pub b_row_indexes: Vec<usize>,
pub c_row_indexes: Vec<usize>,
pub a_col_indexes: Vec<usize>,
pub b_col_indexes: Vec<usize>,
pub c_col_indexes: Vec<usize>,
}
pub struct Proof<E: Engine> {
pub opening_on_domain_k_for_beta_3: E::G1Affine,
pub a_val_on_beta_3: E::Fr,
pub b_val_on_beta_3: E::Fr,
pub c_val_on_beta_3: E::Fr,
pub a_row_on_beta_3: E::Fr,
pub b_row_on_beta_3: E::Fr,
pub c_row_on_beta_3: E::Fr,
pub a_col_on_beta_3: E::Fr,
pub b_col_on_beta_3: E::Fr,
pub c_col_on_beta_3: E::Fr,
pub f_3_at_beta_3: E::Fr
}<file_sep>/src/plonk/fft/cooley_tukey_ntt/partial_reduction.rs
use crate::pairing::ff::PrimeField;
use crate::worker::*;
use crate::plonk::domains::*;
use crate::plonk::transparent_engine::PartialTwoBitReductionField;
use super::CTPrecomputations;
use super::log2_floor;
pub(crate) fn best_ct_ntt_partial_reduction<F: PartialTwoBitReductionField, P: CTPrecomputations<F>>(
a: &mut [F],
worker: &Worker,
log_n: u32,
use_cpus_hint: Option<usize>,
precomputed_omegas: &P
)
{
let log_cpus = if let Some(hint) = use_cpus_hint {
assert!(hint <= worker.cpus);
let hint = if hint > 0 {
log2_floor(hint)
} else {
0
};
hint
} else {
worker.log_num_cpus()
};
if log_cpus == 0 || log_n <= log_cpus {
serial_ct_ntt_partial_reduction(a, log_n, precomputed_omegas);
} else {
parallel_ct_ntt_partial_reduction(a, worker, log_n, log_cpus, precomputed_omegas);
}
}
pub(crate) fn serial_ct_ntt_partial_reduction<F: PartialTwoBitReductionField, P: CTPrecomputations<F>>(
a: &mut [F],
log_n: u32,
precomputed_omegas: &P
)
{
assert_eq!(a.len(), precomputed_omegas.domain_size(), "precomputation size is invalid for ntt");
assert_eq!(a.len(), (1<<log_n) as usize);
assert!(64 - (F::NUM_BITS % 64) >= 2);
let n = a.len();
if n == 1 {
return;
}
let half_n = n / 2;
let mut pairs_per_group = n / 2;
let mut num_groups = 1;
let mut distance = n / 2;
let omegas_bit_reversed = precomputed_omegas.bit_reversed_omegas();
{
// special case for omega = 1
debug_assert!(num_groups == 1);
let idx_1 = 0;
let idx_2 = pairs_per_group;
for j in idx_1..idx_2 {
let u = a[j];
let v = a[j+distance];
let mut tmp = u;
tmp.sub_assign_unreduced(&v);
a[j+distance] = tmp;
a[j].add_assign_unreduced(&v);
debug_assert!(a[j].overflow_factor() < 2);
debug_assert!(a[j+distance].overflow_factor() < 2);
}
pairs_per_group /= 2;
num_groups *= 2;
distance /= 2;
}
// all elements are [0, 2p)
while num_groups < half_n {
debug_assert!(num_groups > 1);
for k in 0..num_groups {
let idx_1 = k * pairs_per_group * 2;
let idx_2 = idx_1 + pairs_per_group;
let s = omegas_bit_reversed[k];
for j in idx_1..idx_2 {
let mut u = a[j];
let mut v = a[j+distance];
debug_assert!(u.overflow_factor() < 4, "factor is {} for num groups {}", u.overflow_factor(), num_groups);
u.reduce_twice();
debug_assert!(v.overflow_factor() < 4, "factor is {} for num groups {}", v.overflow_factor(), num_groups);
v.mul_assign_unreduced(&s);
debug_assert!(v.overflow_factor() < 2, "factor is {} for num groups {}", v.overflow_factor(), num_groups);
let mut tmp_v = u;
let mut tmp_u = u;
tmp_u.add_assign_unreduced(&v);
tmp_v.sub_assign_twice_unreduced(&v);
debug_assert!(tmp_u.overflow_factor() < 4, "factor is {} for num groups {}", tmp_u.overflow_factor(), num_groups);
debug_assert!(tmp_v.overflow_factor() < 4, "factor is {} for num groups {}", tmp_v.overflow_factor(), num_groups);
a[j+distance] = tmp_v;
a[j] = tmp_u;
}
}
pairs_per_group /= 2;
num_groups *= 2;
distance /= 2;
}
// here we should reduce completely
if num_groups < n {
debug_assert!(num_groups > 1);
for k in 0..num_groups {
let idx_1 = k * pairs_per_group * 2;
let idx_2 = idx_1 + pairs_per_group;
let s = omegas_bit_reversed[k];
for j in idx_1..idx_2 {
let mut u = a[j];
let mut v = a[j+distance];
debug_assert!(u.overflow_factor() < 4, "factor is {} for num groups {}", u.overflow_factor(), num_groups);
u.reduce_twice();
debug_assert!(u.overflow_factor() < 2, "factor is {} for num groups {}", u.overflow_factor(), num_groups);
debug_assert!(v.overflow_factor() < 4, "factor is {} for num groups {}", v.overflow_factor(), num_groups);
v.mul_assign_unreduced(&s);
debug_assert!(v.overflow_factor() < 2, "factor is {} for num groups {}", v.overflow_factor(), num_groups);
let mut tmp_v = u;
let mut tmp_u = u;
tmp_u.add_assign_unreduced(&v);
PartialTwoBitReductionField::reduce_completely(&mut tmp_u);
tmp_v.sub_assign_twice_unreduced(&v);
PartialTwoBitReductionField::reduce_completely(&mut tmp_v);
debug_assert!(tmp_u.overflow_factor() < 1, "factor is {} for num groups {}", tmp_u.overflow_factor(), num_groups);
debug_assert!(tmp_v.overflow_factor() < 1, "factor is {} for num groups {}", tmp_v.overflow_factor(), num_groups);
a[j+distance] = tmp_v;
a[j] = tmp_u;
}
}
}
}
pub(crate) fn parallel_ct_ntt_partial_reduction<F: PartialTwoBitReductionField, P: CTPrecomputations<F>>(
a: &mut [F],
worker: &Worker,
log_n: u32,
log_cpus: u32,
precomputed_omegas: &P
)
{
assert!(log_n >= log_cpus);
assert_eq!(a.len(), precomputed_omegas.domain_size(), "precomputation size is invalid for ntt");
assert!(64 - (F::NUM_BITS % 64) >= 2);
let n = a.len();
if n == 1 {
return;
}
let half_n = n / 2;
let pairs_per_group = n / 2;
let num_groups = 1;
let distance = n / 2;
let omegas_bit_reversed = precomputed_omegas.bit_reversed_omegas();
let a = a as *mut [F];
use std::sync::{Arc, Barrier};
let num_remaining_rounds = log_n as usize;
// TODO: later find a way to utilize all the cores in case of not power of two
let to_spawn = (1 << log_cpus) as usize;
let mut barriers = Vec::with_capacity(num_remaining_rounds);
for _ in 0..num_remaining_rounds {
let barrier = Barrier::new(to_spawn);
barriers.push(barrier);
}
let barriers = Arc::new(barriers);
worker.scope(0, |scope, _| {
for thread_id in 0..to_spawn {
let a = unsafe {&mut *a};
let mut pairs_per_group = pairs_per_group;
let mut num_groups = num_groups;
let mut distance = distance;
let barriers = barriers.clone();
scope.spawn(move |_| {
let mut round_id = 0;
{
// special case for omega = 1
debug_assert!(num_groups == 1);
let group_start_idx = 0;
let group_end_idx = pairs_per_group;
let group_size = pairs_per_group;
let chunk = Worker::chunk_size_for_num_spawned_threads(group_size, to_spawn);
let start = group_start_idx + thread_id * chunk;
let end = if start + chunk <= group_end_idx {
start + chunk
} else {
group_end_idx
};
for j in start..end {
let u = unsafe { *a.get_unchecked(j) };
let v = unsafe { *a.get_unchecked(j+distance) };
// let u = a[j];
// let v = a[j+distance];
let mut tmp = u;
tmp.sub_assign_unreduced(&v);
unsafe {
*a.get_unchecked_mut(j+distance) = tmp;
a.get_unchecked_mut(j).add_assign_unreduced(&v);
};
// a[j+distance] = tmp;
// a[j].add_assign_unreduced(&v);
}
pairs_per_group /= 2;
num_groups *= 2;
distance /= 2;
(&barriers[round_id]).wait();
round_id += 1;
}
// if pairs per group << num cpus we use splitting in k,
// otherwise use splitting in indexes
while num_groups < half_n {
if num_groups >= to_spawn {
// for each k we start at k*pairs*2 and end on k*pairs*2 + pairs
// for k+1 we start at (k+1)*pairs*2 = k*pairs*2 + pairs*2
// and end on (k+1)*pairs*2 + pairs = k*pairs*2 + pairs*3
// for k+2 we start at (k+2)*pairs*2 = k*pairs*2 + pairs*4
// and end on (k+2)*pairs*2 + pairs = k*pairs*2 + pairs*5
// so we do not overlap during the full run and do not need to sync
let chunk = Worker::chunk_size_for_num_spawned_threads(num_groups, to_spawn);
let start = thread_id * chunk;
let end = if start + chunk <= num_groups {
start + chunk
} else {
num_groups
};
for k in start..end {
let group_start_idx = k * pairs_per_group * 2;
let group_end_idx = group_start_idx + pairs_per_group;
let s = omegas_bit_reversed[k];
for j in group_start_idx..group_end_idx {
let mut u = unsafe { *a.get_unchecked(j) };
let mut v = unsafe { *a.get_unchecked(j+distance) };
// let mut u = a[j];
// let mut v = a[j+distance];
u.reduce_twice();
v.mul_assign_unreduced(&s);
let mut tmp_v = u;
let mut tmp_u = u;
tmp_u.add_assign_unreduced(&v);
tmp_v.sub_assign_twice_unreduced(&v);
unsafe {
*a.get_unchecked_mut(j+distance) = tmp_v;
*a.get_unchecked_mut(j) = tmp_u;
};
// a[j+distance] = tmp_v;
// a[j] = tmp_u;
}
}
} else {
for k in 0..num_groups {
// for each k we start at k*pairs*2 and end on k*pairs*2 + pairs
// for k+1 we start at (k+1)*pairs*2 = k*pairs*2 + pairs*2
// and end on (k+1)*pairs*2 + pairs = k*pairs*2 + pairs*3
// for k+2 we start at (k+2)*pairs*2 = k*pairs*2 + pairs*4
// and end on (k+2)*pairs*2 + pairs = k*pairs*2 + pairs*5
// so we do not overlap during the full run and do not need to sync
let group_start_idx = k * pairs_per_group * 2;
let group_end_idx = group_start_idx + pairs_per_group;
let group_size = pairs_per_group;
let s = omegas_bit_reversed[k];
// we always split thread work in here
let chunk = Worker::chunk_size_for_num_spawned_threads(group_size, to_spawn);
let start = group_start_idx + thread_id * chunk;
let end = if start + chunk <= group_end_idx {
start + chunk
} else {
group_end_idx
};
for j in start..end {
let mut u = unsafe { *a.get_unchecked(j) };
let mut v = unsafe { *a.get_unchecked(j+distance) };
// let mut u = a[j];
// let mut v = a[j+distance];
u.reduce_twice();
v.mul_assign_unreduced(&s);
let mut tmp_v = u;
let mut tmp_u = u;
tmp_u.add_assign_unreduced(&v);
tmp_v.sub_assign_twice_unreduced(&v);
unsafe {
*a.get_unchecked_mut(j+distance) = tmp_v;
*a.get_unchecked_mut(j) = tmp_u;
};
// a[j+distance] = tmp_v;
// a[j] = tmp_u;
}
}
}
pairs_per_group /= 2;
num_groups *= 2;
distance /= 2;
// use barrier to wait for all other threads
(&barriers[round_id]).wait();
round_id += 1;
}
// if pairs per group << num cpus we use splitting in k,
// otherwise use splitting in indexes
if num_groups < n {
if num_groups >= to_spawn {
// for each k we start at k*pairs*2 and end on k*pairs*2 + pairs
// for k+1 we start at (k+1)*pairs*2 = k*pairs*2 + pairs*2
// and end on (k+1)*pairs*2 + pairs = k*pairs*2 + pairs*3
// for k+2 we start at (k+2)*pairs*2 = k*pairs*2 + pairs*4
// and end on (k+2)*pairs*2 + pairs = k*pairs*2 + pairs*5
// so we do not overlap during the full run and do not need to sync
let chunk = Worker::chunk_size_for_num_spawned_threads(num_groups, to_spawn);
let start = thread_id * chunk;
let end = if start + chunk <= num_groups {
start + chunk
} else {
num_groups
};
for k in start..end {
let group_start_idx = k * pairs_per_group * 2;
let group_end_idx = group_start_idx + pairs_per_group;
let s = omegas_bit_reversed[k];
for j in group_start_idx..group_end_idx {
let mut u = unsafe { *a.get_unchecked(j) };
let mut v = unsafe { *a.get_unchecked(j+distance) };
// let mut u = a[j];
// let mut v = a[j+distance];
u.reduce_twice();
v.mul_assign_unreduced(&s);
let mut tmp_v = u;
let mut tmp_u = u;
tmp_u.add_assign_unreduced(&v);
PartialTwoBitReductionField::reduce_completely(&mut tmp_u);
tmp_v.sub_assign_twice_unreduced(&v);
PartialTwoBitReductionField::reduce_completely(&mut tmp_v);
unsafe {
*a.get_unchecked_mut(j+distance) = tmp_v;
*a.get_unchecked_mut(j) = tmp_u;
};
// a[j+distance] = tmp_v;
// a[j] = tmp_u;
}
}
} else {
for k in 0..num_groups {
// for each k we start at k*pairs*2 and end on k*pairs*2 + pairs
// for k+1 we start at (k+1)*pairs*2 = k*pairs*2 + pairs*2
// and end on (k+1)*pairs*2 + pairs = k*pairs*2 + pairs*3
// for k+2 we start at (k+2)*pairs*2 = k*pairs*2 + pairs*4
// and end on (k+2)*pairs*2 + pairs = k*pairs*2 + pairs*5
// so we do not overlap during the full run and do not need to sync
let group_start_idx = k * pairs_per_group * 2;
let group_end_idx = group_start_idx + pairs_per_group;
let group_size = pairs_per_group;
let s = omegas_bit_reversed[k];
// we always split thread work in here
let chunk = Worker::chunk_size_for_num_spawned_threads(group_size, to_spawn);
let start = group_start_idx + thread_id * chunk;
let end = if start + chunk <= group_end_idx {
start + chunk
} else {
group_end_idx
};
for j in start..end {
let mut u = unsafe { *a.get_unchecked(j) };
let mut v = unsafe { *a.get_unchecked(j+distance) };
// let mut u = a[j];
// let mut v = a[j+distance];
u.reduce_twice();
v.mul_assign_unreduced(&s);
let mut tmp_v = u;
let mut tmp_u = u;
tmp_u.add_assign_unreduced(&v);
PartialTwoBitReductionField::reduce_completely(&mut tmp_u);
tmp_v.sub_assign_twice_unreduced(&v);
PartialTwoBitReductionField::reduce_completely(&mut tmp_v);
unsafe {
*a.get_unchecked_mut(j+distance) = tmp_v;
*a.get_unchecked_mut(j) = tmp_u;
};
// a[j+distance] = tmp_v;
// a[j] = tmp_u;
}
}
}
// use barrier to wait for all other threads
(&barriers[round_id]).wait();
}
});
}
});
}
#[cfg(test)]
mod test {
use crate::plonk::fft::cooley_tukey_ntt::*;
#[test]
fn test_bench_ct_serial_fft() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::plonk::transparent_engine::proth::Fr;
use crate::plonk::polynomials::*;
use std::time::Instant;
use super::*;
use crate::worker::*;
use crate::plonk::commitments::transparent::utils::*;
use crate::plonk::fft::fft::serial_fft;
use super::CTPrecomputations;
use super::super::BitReversedOmegas;
use crate::plonk::domains::Domain;
let poly_sizes = if cfg!(debug_assertions) {
vec![10_000]
} else {
vec![1_000_000, 2_000_000, 4_000_000, 8_000_000]
};
// let poly_sizes = vec![8];
fn check_permutation<F: PrimeField>(one: &[F], two: &[F]) -> (bool, Vec<usize>) {
let mut permutation: Vec<usize> = (0..one.len()).collect();
let mut valid = true;
for (i, el) in one.iter().enumerate() {
let mut idx = 0;
let mut found = false;
for (j, el2) in two.iter().enumerate() {
if *el == *el2 {
idx = j;
found = true;
break;
}
}
if !found {
println!("Not found for {}", i);
valid = false;
break;
}
permutation[i] = idx;
}
(valid, permutation)
}
// let worker = Worker::new();
for poly_size in poly_sizes.into_iter() {
let poly_size = poly_size as usize;
let poly_size = poly_size.next_power_of_two();
let precomp = BitReversedOmegas::<Fr>::new_for_domain_size(poly_size);
// println!("{:?}", precomp.bit_reversed_omegas());
let domain = Domain::<Fr>::new_for_size(poly_size as u64).unwrap();
let omega = domain.generator;
let log_n = domain.power_of_two as u32;
let res1 = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let start = Instant::now();
serial_fft(&mut coeffs, &omega, log_n);
println!("serial FFT for size {} taken {:?}", poly_size, start.elapsed());
coeffs
};
let (res2, elapsed2) = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
// println!("Coeffs = {:?}", coeffs);
let start = Instant::now();
serial_ct_ntt(&mut coeffs, log_n, &precomp);
let finish = start.elapsed();
println!("serial NTT for size {} taken {:?}", poly_size, finish);
let log_n = log_n as usize;
for k in 0..poly_size {
let rk = bitreverse(k, log_n);
if k < rk {
coeffs.swap(rk, k);
}
}
(coeffs, finish)
};
let (res3, elapsed3) = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
// println!("Coeffs = {:?}", coeffs);
let start = Instant::now();
serial_ct_ntt_partial_reduction(&mut coeffs, log_n, &precomp);
let finish = start.elapsed();
println!("serial PRR for size {} taken {:?}", poly_size, finish);
let log_n = log_n as usize;
for k in 0..poly_size {
let rk = bitreverse(k, log_n);
if k < rk {
coeffs.swap(rk, k);
}
}
(coeffs, finish)
};
let (res5, elapsed5) = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
// println!("Coeffs = {:?}", coeffs);
let start = Instant::now();
serial_ct_ntt(&mut coeffs, log_n, &precomp);
let finish = start.elapsed();
println!("serial NTT for size {} taken {:?}", poly_size, finish);
let log_n = log_n as usize;
for k in 0..poly_size {
let rk = bitreverse(k, log_n);
if k < rk {
coeffs.swap(rk, k);
}
}
(coeffs, finish)
};
let ntt_time = (elapsed2 + elapsed5).div_f32(2.);
let diff_pr = ntt_time.checked_sub(elapsed3);
if let Some(diff) = diff_pr {
println!("Partial reduction: speed up is {}%.", diff.as_nanos()*100/ntt_time.as_nanos());
} else {
println!("Partial reduction: no speed up.");
}
assert!(res1 == res2);
assert!(res1 == res5);
assert!(res1 == res3);
}
}
#[test]
fn test_bench_ct_parallel_fft() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::plonk::transparent_engine::proth::Fr;
use crate::plonk::polynomials::*;
use std::time::Instant;
use super::*;
use crate::worker::*;
use crate::plonk::commitments::transparent::utils::*;
use crate::plonk::fft::fft::parallel_fft;
use super::CTPrecomputations;
use super::super::BitReversedOmegas;
use crate::plonk::domains::Domain;
let poly_sizes = if cfg!(debug_assertions) {
vec![10_000]
} else {
vec![2_000_000, 4_000_000, 8_000_000, 16_000_000]
};
// let poly_sizes = vec![1000usize];
let worker = Worker::new();
for poly_size in poly_sizes.into_iter() {
let poly_size = poly_size as usize;
let poly_size = poly_size.next_power_of_two();
let precomp = BitReversedOmegas::<Fr>::new_for_domain_size(poly_size);
let domain = Domain::<Fr>::new_for_size(poly_size as u64).unwrap();
let omega = domain.generator;
let log_n = domain.power_of_two as u32;
let res1 = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let start = Instant::now();
parallel_fft(&mut coeffs, &worker, &omega, log_n, worker.log_num_cpus());
println!("parallel FFT for size {} taken {:?}", poly_size, start.elapsed());
coeffs
};
let (res2, elapsed2) = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let start = Instant::now();
parallel_ct_ntt(&mut coeffs, &worker, log_n, worker.log_num_cpus(), &precomp);
let finish = start.elapsed();
println!("parallel NTT for size {} taken {:?}", poly_size, finish);
let log_n = log_n as usize;
for k in 0..poly_size {
let rk = bitreverse(k, log_n);
if k < rk {
coeffs.swap(rk, k);
}
}
(coeffs, finish)
};
let (res3, elapsed3) = {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut coeffs = (0..poly_size).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let start = Instant::now();
parallel_ct_ntt_partial_reduction(&mut coeffs, &worker, log_n, worker.log_num_cpus(), &precomp);
let finish = start.elapsed();
println!("parallel NTT with partial reduction for size {} taken {:?}", poly_size, finish);
let log_n = log_n as usize;
for k in 0..poly_size {
let rk = bitreverse(k, log_n);
if k < rk {
coeffs.swap(rk, k);
}
}
(coeffs, finish)
};
let ntt_time = elapsed2;
let diff_pr = ntt_time.checked_sub(elapsed3);
if let Some(diff) = diff_pr {
println!("Partial reduction: speed up is {}%.", diff.as_nanos()*100/ntt_time.as_nanos());
} else {
println!("Partial reduction: no speed up.");
}
assert!(res1 == res2);
assert!(res1 == res3);
}
}
}
<file_sep>/src/plonk/transparent_engine/proth_engine.rs
pub use super::proth::Fr;
use super::impl_macro::*;
use super::TransparentEngine;
transparent_engine_impl!{Transparent252, Fr}
impl TransparentEngine for Transparent252 {}<file_sep>/src/plonk/better_better_cs/redshift/mod.rs
pub mod multioracle;
pub mod setup;
pub mod prover;
pub mod simple_fri;
// pub mod poseidon_tree_hash;
pub use super::trees::binary_tree;
pub use super::trees::tree_hash;<file_sep>/src/plonk/better_better_cs/redshift/setup.rs
use crate::pairing::{Engine};
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::worker::Worker;
use crate::plonk::commitments::transparent::utils::log2_floor;
use super::*;
use super::tree_hash::*;
use super::binary_tree::{BinaryTree, BinaryTreeParams};
use crate::plonk::polynomials::*;
use super::multioracle::Multioracle;
use super::super::cs_old::*;
use crate::SynthesisError;
pub struct SetupMultioracle<E: Engine, H: BinaryTreeHasher<E::Fr>> {
pub polynomials_in_monomial_form: Vec<Polynomial<E::Fr, Coefficients>>,
pub setup_poly_values: Vec<E::Fr>,
pub setup_point: E::Fr,
pub polynomial_ldes: Vec<Polynomial<E::Fr, Values>>,
pub setup_ids: Vec<PolyIdentifier>,
pub permutations_ranges: Vec<std::ops::Range<usize>>,
pub gate_selectors_indexes: Vec<usize>,
pub tree: BinaryTree<E, H>
}
pub const LDE_FACTOR: usize = 16;
pub const FRI_VALUES_PER_LEAF: usize = 8;
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> SetupMultioracle<E, H> {
pub fn from_assembly<P: PlonkConstraintSystemParams<E>, MG: MainGateEquation>(
assembly: TrivialAssembly<E, P, MG>,
tree_hasher: H,
worker: &Worker
) -> Result<(Self, Vec<Polynomial<E::Fr, Values>>), SynthesisError> {
use crate::plonk::fft::cooley_tukey_ntt::*;
let size = assembly.n().next_power_of_two();
println!("Using LDE to size {}", size * LDE_FACTOR);
let (mut storage, permutations) = assembly.perform_setup(&worker)?;
let gate_selectors = assembly.output_gate_selectors(&worker)?;
let ids = assembly.sorted_setup_polynomial_ids.clone();
drop(assembly);
let mut setup_polys = vec![];
let mut mononial_forms = vec![];
let omegas_bitreversed = BitReversedOmegas::<E::Fr>::new_for_domain_size(size.next_power_of_two());
let omegas_inv_bitreversed = <OmegasInvBitreversed::<E::Fr> as CTPrecomputations::<E::Fr>>::new_for_domain_size(size.next_power_of_two());
for id in ids.iter() {
let mut setup_poly = storage.remove(&id).expect(&format!("must contain a poly for id {:?}", id));
setup_poly.pad_to_domain()?;
let coeffs = setup_poly.ifft_using_bitreversed_ntt(&worker, &omegas_inv_bitreversed, &E::Fr::one())?;
mononial_forms.push(coeffs.clone());
let lde = coeffs.bitreversed_lde_using_bitreversed_ntt(&worker, LDE_FACTOR, &omegas_bitreversed, &E::Fr::multiplicative_generator())?;
setup_polys.push(lde);
}
println!("Setup LDEs completed");
let mut permutations_ranges = vec![];
let before = setup_polys.len();
for mut p in permutations.iter().cloned() {
p.pad_to_domain()?;
let coeffs = p.ifft_using_bitreversed_ntt(&worker, &omegas_inv_bitreversed, &E::Fr::one())?;
mononial_forms.push(coeffs.clone());
let lde = coeffs.bitreversed_lde_using_bitreversed_ntt(&worker, LDE_FACTOR, &omegas_bitreversed, &E::Fr::multiplicative_generator())?;
setup_polys.push(lde);
}
let after = setup_polys.len();
permutations_ranges.push(before..after);
println!("Permutations LDEs completed");
let mut gate_selectors_indexes = vec![];
for mut selector in gate_selectors.into_iter() {
let before = setup_polys.len();
gate_selectors_indexes.push(before);
selector.pad_to_domain()?;
let coeffs = selector.ifft_using_bitreversed_ntt(&worker, &omegas_inv_bitreversed, &E::Fr::one())?;
mononial_forms.push(coeffs.clone());
let lde = coeffs.bitreversed_lde_using_bitreversed_ntt(&worker, LDE_FACTOR, &omegas_bitreversed, &E::Fr::multiplicative_generator())?;
setup_polys.push(lde);
}
println!("Num gate selectors: {}", gate_selectors_indexes.len());
println!("Gate selectors LDEs completed");
let multioracle = Multioracle::<E, H>::new_from_polynomials(
&setup_polys,
tree_hasher,
FRI_VALUES_PER_LEAF,
&worker
);
let tree = multioracle.tree;
let setup_point = E::Fr::from_str("1234567890").unwrap();
let mut setup_poly_values = vec![];
for p in mononial_forms.iter() {
let value = p.evaluate_at(&worker, setup_point);
setup_poly_values.push(value);
}
let setup = Self {
polynomials_in_monomial_form: mononial_forms,
setup_poly_values,
setup_point,
polynomial_ldes: setup_polys,
tree,
setup_ids: ids,
permutations_ranges,
gate_selectors_indexes,
};
Ok((setup, permutations))
}
}
<file_sep>/src/source.rs
use crate::pairing::{
CurveAffine,
CurveProjective,
Engine
};
use crate::pairing::ff::{
PrimeField,
Field,
PrimeFieldRepr,
ScalarEngine};
use std::sync::Arc;
use std::io;
use bit_vec::{self, BitVec};
use std::iter;
use super::SynthesisError;
/// An object that builds a source of bases.
pub trait SourceBuilder<G: CurveAffine>: Send + Sync + 'static + Clone {
type Source: Source<G>;
fn new(self) -> Self::Source;
}
/// A source of bases, like an iterator.
pub trait Source<G: CurveAffine> {
/// Parses the element from the source. Fails if the point is at infinity.
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError>;
/// Skips `amt` elements from the source, avoiding deserialization.
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>;
}
impl<G: CurveAffine> SourceBuilder<G> for (Arc<Vec<G>>, usize) {
type Source = (Arc<Vec<G>>, usize);
fn new(self) -> (Arc<Vec<G>>, usize) {
(self.0.clone(), self.1)
}
}
impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases when adding from source").into());
}
if self.0[self.1].is_zero() {
return Err(SynthesisError::UnexpectedIdentity)
}
to.add_assign_mixed(&self.0[self.1]);
self.1 += 1;
Ok(())
}
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases skipping from source").into());
}
self.1 += amt;
Ok(())
}
}
pub trait QueryDensity {
/// Returns whether the base exists.
type Iter: Iterator<Item=bool>;
fn iter(self) -> Self::Iter;
fn get_query_size(self) -> Option<usize>;
}
#[derive(Clone)]
pub struct FullDensity;
impl AsRef<FullDensity> for FullDensity {
fn as_ref(&self) -> &FullDensity {
self
}
}
impl<'a> QueryDensity for &'a FullDensity {
type Iter = iter::Repeat<bool>;
fn iter(self) -> Self::Iter {
iter::repeat(true)
}
fn get_query_size(self) -> Option<usize> {
None
}
}
#[derive(Clone)]
pub struct DensityTracker {
pub(crate) bv: BitVec,
total_density: usize
}
impl<'a> QueryDensity for &'a DensityTracker {
type Iter = bit_vec::Iter<'a>;
fn iter(self) -> Self::Iter {
self.bv.iter()
}
fn get_query_size(self) -> Option<usize> {
Some(self.bv.len())
}
}
impl DensityTracker {
pub fn new() -> DensityTracker {
DensityTracker {
bv: BitVec::new(),
total_density: 0
}
}
pub fn add_element(&mut self) {
self.bv.push(false);
}
pub fn pad(&mut self, to_size: usize) {
assert!(to_size >= self.bv.len());
let padding = to_size - self.bv.len();
self.bv.extend(BitVec::from_elem(padding, false));
}
pub fn inc(&mut self, idx: usize) {
if !self.bv.get(idx).unwrap() {
self.bv.set(idx, true);
self.total_density += 1;
}
}
pub fn get_total_density(&self) -> usize {
self.total_density
}
}
#[derive(Clone)]
pub struct DensityTrackerersChain {
pub(crate) tracker_0: DensityTracker,
pub(crate) tracker_1: DensityTracker,
total_density: usize
}
impl DensityTrackerersChain {
pub fn new(tracker_0: DensityTracker, tracker_1: DensityTracker) -> Self {
let total_density = tracker_0.total_density + tracker_1.total_density;
Self {
tracker_0,
tracker_1,
total_density
}
}
}
impl<'a> QueryDensity for &'a DensityTrackerersChain {
type Iter = std::iter::Chain<bit_vec::Iter<'a>, bit_vec::Iter<'a>>;
fn iter(self) -> Self::Iter {
self.tracker_0.bv.iter().chain(&self.tracker_1.bv)
}
fn get_query_size(self) -> Option<usize> {
Some(self.tracker_0.bv.len() + self.tracker_1.bv.len())
}
}<file_sep>/src/plonk/commitments/transcript/prng.rs
use blake2s_simd::{Params, State};
use crate::pairing::ff::{PrimeField, PrimeFieldRepr};
use super::Prng;
lazy_static! {
static ref STATELESS_PRNG_BLAKE2S_PARAMS: State = {
Params::new()
.hash_length(32)
.key(b"Squeamish Ossifrage")
.personal(b"S_Prng_F")
.to_state()
};
}
#[derive(Clone)]
pub struct StatelessBlake2sPrng<F: PrimeField> {
state: State,
_marker: std::marker::PhantomData<F>
}
impl<F: PrimeField> StatelessBlake2sPrng<F> {
const SHAVE_BITS: u32 = 256 - F::CAPACITY;
// const REPR_SIZE: usize = std::mem::size_of::<F::Repr>();
const REPR_SIZE: usize = (((F::NUM_BITS as usize)/ 64) + 1) * 8;
}
impl<F: PrimeField> Prng<F> for StatelessBlake2sPrng<F> {
type Input = F;
type InitializationParameters = ();
fn new() -> Self {
assert!(F::NUM_BITS < 256);
Self {
state: STATELESS_PRNG_BLAKE2S_PARAMS.clone(),
_marker: std::marker::PhantomData
}
}
fn commit_input(&mut self, input: &Self::Input) {
let mut state = STATELESS_PRNG_BLAKE2S_PARAMS.clone();
let repr = input.into_repr();
let mut bytes: Vec<u8> = vec![0u8; Self::REPR_SIZE];
repr.write_be(&mut bytes[..]).expect("should write");
state.update(&bytes[..]);
self.state = state;
}
fn get_challenge(&mut self) -> F {
let value = *(self.state.finalize().as_array());
self.state = STATELESS_PRNG_BLAKE2S_PARAMS.clone();
let mut repr = F::Repr::default();
let shaving_mask: u64 = 0xffffffffffffffff >> (Self::SHAVE_BITS % 64);
repr.read_be(&value[..]).expect("will read");
let last_limb_idx = repr.as_ref().len() - 1;
repr.as_mut()[last_limb_idx] &= shaving_mask;
let value = F::from_repr(repr).expect("in a field");
value
}
}
<file_sep>/src/sonic/helped/prover.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters, NUM_BLINDINGS};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
use crate::sonic::sonic::{CountN, Basic};
pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
srs: &SRS<E>,
n: usize
) -> Result<SxyAdvice<E>, SynthesisError>
{
let z: E::Fr;
let y: E::Fr;
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y = transcript.get_challenge_scalar();
transcript.commit_point(&proof.t);
z = transcript.get_challenge_scalar();
}
let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?;
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(y, n);
S::synthesize(&mut tmp, circuit)?;
tmp.poly()
};
// Compute S commitment
let s = multiexp(
srs.g_positive_x_alpha[0..(2 * n)]
.iter()
.chain_ext(srs.g_negative_x_alpha[0..(n)].iter()),
s_poly_positive.iter().chain_ext(s_poly_negative.iter())
).into_affine();
// Compute s(z, y)
let mut szy = E::Fr::zero();
{
szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_positive[..], z, z));
szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_negative[..], z_inv, z_inv));
}
// let mut szy = E::Fr::zero();
// {
// let mut tmp = z;
// for &p in &s_poly_positive {
// let mut p = p;
// p.mul_assign(&tmp);
// szy.add_assign(&p);
// tmp.mul_assign(&z);
// }
// let mut tmp = z_inv;
// for &p in &s_poly_negative {
// let mut p = p;
// p.mul_assign(&tmp);
// szy.add_assign(&p);
// tmp.mul_assign(&z_inv);
// }
// }
// Compute kate opening
let opening = {
let mut open = szy;
open.negate();
let poly = kate_divison(
s_poly_negative.iter().rev().chain_ext(Some(open).iter()).chain_ext(s_poly_positive.iter()),
z,
);
let negative_poly = poly[0..n].iter().rev();
let positive_poly = poly[n..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
};
Ok(SxyAdvice {
s,
szy,
opening
})
}
pub fn create_advice<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
parameters: &Parameters<E>,
) -> Result<SxyAdvice<E>, SynthesisError>
{
let n = parameters.vk.n;
create_advice_on_information_and_srs::<E, C, S>(circuit, proof, ¶meters.srs, n)
}
pub fn create_advice_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
srs: &SRS<E>
) -> Result<SxyAdvice<E>, SynthesisError>
{
// annoying, but we need n to compute s(z, y), and this isn't
// precomputed anywhere yet
let n = {
let mut tmp = CountN::<S>::new();
S::synthesize(&mut tmp, circuit)?;
tmp.n
};
create_advice_on_information_and_srs::<E, C, S>(circuit, proof, srs, n)
}
pub fn create_proof<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
parameters: &Parameters<E>
) -> Result<Proof<E>, SynthesisError> {
create_proof_on_srs::<E, C, S>(circuit, ¶meters.srs)
}
extern crate rand;
use self::rand::{Rand, Rng, thread_rng};
use crate::sonic::sonic::Wires;
pub fn create_proof_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
srs: &SRS<E>
) -> Result<Proof<E>, SynthesisError>
{
let mut wires = Wires::new();
S::synthesize(&mut wires, circuit)?;
let n = wires.a.len();
let mut transcript = Transcript::new(&[]);
let rng = &mut thread_rng();
// c_{n+1}, c_{n+2}, c_{n+3}, c_{n+4}
let blindings: Vec<E::Fr> = (0..NUM_BLINDINGS).into_iter().map(|_| E::Fr::rand(rng)).collect();
// r is a commitment to r(X, 1)
let r = polynomial_commitment::<E, _>(
n,
2*n + NUM_BLINDINGS,
n,
&srs,
blindings.iter().rev()
.chain_ext(wires.c.iter().rev())
.chain_ext(wires.b.iter().rev())
.chain_ext(Some(E::Fr::zero()).iter())
.chain_ext(wires.a.iter()),
);
transcript.commit_point(&r);
let y: E::Fr = transcript.get_challenge_scalar();
// create r(X, 1) by observation that it's just a series of coefficients.
// Used representation is for powers X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}, X^{1}...X^{n}
// Same representation is ok for r(X, Y) too cause powers always match
let mut rx1 = wires.b;
rx1.extend(wires.c);
rx1.extend(blindings.clone());
rx1.reverse();
rx1.push(E::Fr::zero());
rx1.extend(wires.a);
let mut rxy = rx1.clone();
let y_inv = y.inverse().ok_or(SynthesisError::DivisionByZero)?;
// y^(-2n - num blindings)
let tmp = y_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
mut_distribute_consequitive_powers(
&mut rxy,
tmp,
y,
);
// negative powers [-1, -2n], positive [1, n]
let (mut s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(y, n);
S::synthesize(&mut tmp, circuit)?;
tmp.poly()
};
// r'(X, y) = r(X, y) + s(X, y). Note `y` - those are evaluated at the point already
let mut rxy_prime = rxy.clone();
{
// extend to have powers [n+1, 2n]
rxy_prime.resize(4 * n + 1 + NUM_BLINDINGS, E::Fr::zero());
s_poly_negative.reverse();
let neg_poly_len = s_poly_negative.len();
add_polynomials(&mut rxy_prime[(NUM_BLINDINGS+neg_poly_len)..(2 * n + NUM_BLINDINGS)], &s_poly_negative[..]);
s_poly_negative.reverse();
add_polynomials(&mut rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..], &s_poly_positive[..])
// // add coefficients in front of X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}
// for (r, s) in rxy_prime[NUM_BLINDINGS..(2 * n + NUM_BLINDINGS)]
// .iter_mut()
// .rev()
// .zip(s_poly_negative)
// {
// r.add_assign(&s);
// }
// // add coefficients in front of X^{1}...X^{n}, X^{n+1}...X^{2*n}
// for (r, s) in rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..].iter_mut().zip(s_poly_positive) {
// r.add_assign(&s);
// }
}
// by this point all R related polynomials are blinded and evaluated for Y variable
// t(X, y) = r'(X, y)*r(X, 1) and will be later evaluated at z
// contained degree in respect to X are from -4*n to 3*n including X^0
let mut txy = multiply_polynomials::<E>(rx1.clone(), rxy_prime);
txy[4 * n + 2 * NUM_BLINDINGS] = E::Fr::zero(); // -k(y)
// commit to t(X, y) to later open at z
let t = polynomial_commitment(
srs.d,
(4 * n) + 2*NUM_BLINDINGS,
3 * n,
srs,
// skip what would be zero power
txy[0..(4 * n) + 2*NUM_BLINDINGS].iter()
.chain_ext(txy[(4 * n + 2*NUM_BLINDINGS + 1)..].iter()),
);
transcript.commit_point(&t);
let z: E::Fr = transcript.get_challenge_scalar();
let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?;
let rz = {
let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&rx1, tmp, z)
};
// rzy is evaluation of r(X, Y) at z, y
let rzy = {
let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&rxy, tmp, z)
};
transcript.commit_scalar(&rz);
transcript.commit_scalar(&rzy);
let r1: E::Fr = transcript.get_challenge_scalar();
let zy_opening = {
// r(X, 1) - r(z, y)
// subtract constant term from R(X, 1)
rx1[(2 * n + NUM_BLINDINGS)].sub_assign(&rzy);
let mut point = y;
point.mul_assign(&z);
polynomial_commitment_opening(
2 * n + NUM_BLINDINGS,
n,
&rx1,
point,
srs
)
};
assert_eq!(rx1.len(), 3*n + NUM_BLINDINGS + 1);
// it's an opening of t(X, y) at z
let z_opening = {
rx1[(2 * n + NUM_BLINDINGS)].add_assign(&rzy); // restore
let rx1_len = rx1.len();
mul_add_polynomials(&mut txy[(2 * n + NUM_BLINDINGS)..(2 * n + NUM_BLINDINGS + rx1_len)], &rx1[..], r1);
// // skip powers from until reach -2n - NUM_BLINDINGS
// for (t, &r) in txy[(2 * n + NUM_BLINDINGS)..].iter_mut().zip(rx1.iter()) {
// let mut r = r;
// r.mul_assign(&r1);
// t.add_assign(&r);
// }
let val = {
let tmp = z_inv.pow(&[(4*n + 2*NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&txy, tmp, z)
};
txy[(4 * n + 2*NUM_BLINDINGS)].sub_assign(&val);
polynomial_commitment_opening(
4*n + 2*NUM_BLINDINGS,
3*n,
&txy,
z,
srs)
};
Ok(Proof {
r, rz, rzy, t, z_opening, zy_opening
})
}
#[test]
fn my_fun_circuit_test() {
use crate::pairing::ff::PrimeField;
use crate::pairing::bls12_381::{Bls12, Fr};
use super::*;
use crate::sonic::cs::{ConstraintSystem, LinearCombination};
use crate::sonic::sonic::Basic;
use rand::{thread_rng};
struct MyCircuit;
impl<E: Engine> Circuit<E> for MyCircuit {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let (a, b, _) = cs.multiply(|| {
Ok((
E::Fr::from_str("10").unwrap(),
E::Fr::from_str("20").unwrap(),
E::Fr::from_str("200").unwrap(),
))
})?;
cs.enforce_zero(LinearCombination::from(a) + a - b);
//let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
//cs.enforce_zero(LinearCombination::from(b) - multiplier);
Ok(())
}
}
let srs = SRS::<Bls12>::new(
20,
Fr::from_str("22222").unwrap(),
Fr::from_str("33333333").unwrap(),
);
let proof = self::create_proof_on_srs::<Bls12, _, Basic>(&MyCircuit, &srs).unwrap();
use std::time::{Instant};
let start = Instant::now();
let rng = thread_rng();
let mut batch = MultiVerifier::<Bls12, _, Basic, _>::new(MyCircuit, &srs, rng).unwrap();
for _ in 0..1 {
batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None);
}
assert!(batch.check_all());
let elapsed = start.elapsed();
println!("time to verify: {:?}", elapsed);
}
#[test]
fn polynomial_commitment_test() {
use crate::pairing::ff::PrimeField;
use crate::pairing::ff::PrimeFieldRepr;
use crate::pairing::bls12_381::{Bls12, Fr};
use super::*;
use crate::sonic::cs::{ConstraintSystem, LinearCombination};
use crate::sonic::sonic::Basic;
use rand::{thread_rng};
use crate::pairing::{CurveAffine};
let srs = SRS::<Bls12>::new(
20,
Fr::from_str("22222").unwrap(),
Fr::from_str("33333333").unwrap(),
);
let mut rng = thread_rng();
// x^-4 + x^-3 + x^-2 + x^-1 + x + x^2
let mut poly = vec![Fr::one(), Fr::one(), Fr::one(), Fr::one(), Fr::zero(), Fr::one(), Fr::one()];
// make commitment to the poly
let commitment = polynomial_commitment(2, 4, 2, &srs, poly.iter());
let point: Fr = rng.gen();
let mut tmp = point.inverse().unwrap();
tmp.square();
let value = evaluate_at_consequitive_powers(&poly, tmp, point);
// evaluate f(z)
poly[4] = value;
poly[4].negate();
// f(x) - f(z)
let opening = polynomial_commitment_opening(4, 2, poly.iter(), point, &srs);
// e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{−d +max}} )
let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut neg_x_n_minus_d_precomp = srs.h_negative_x[srs.d - 2];
neg_x_n_minus_d_precomp.negate();
let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare();
// let neg_x_n_minus_d_precomp = srs.h_negative_x[0].prepare();
let w = opening.prepare();
let mut gv = srs.g_positive_x[0].mul(value.into_repr());
let mut z_neg = point;
z_neg.negate();
let w_minus_z = opening.mul(z_neg.into_repr());
gv.add_assign(&w_minus_z);
let gv = gv.into_affine().prepare();
assert!(Bls12::final_exponentiation(&Bls12::miller_loop(&[
(&w, &alpha_x_precomp),
(&gv, &alpha_precomp),
(&commitment.prepare(), &neg_x_n_minus_d_precomp),
])).unwrap() == <Bls12 as Engine>::Fqk::one());
}
<file_sep>/src/plonk/better_better_cs/trees/tree_hash.rs
use crate::pairing::ff::{Field, PrimeField};
pub trait BinaryTreeHasher<F: PrimeField>: Sized + Send + Sync + Clone {
type Output: Sized + Clone + Copy + Send + Sync + PartialEq + Eq;
fn placeholder_output() -> Self::Output;
fn leaf_hash(&self, input: &[F]) -> Self::Output;
fn node_hash(&self, input: &[Self::Output; 2], level: usize) -> Self::Output;
}
<file_sep>/src/plonk/cs/gates.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use std::ops::{Add, Sub, Neg};
pub use super::variable::{Variable, Index};
pub enum Coeff<F: PrimeField> {
Zero,
One,
NegativeOne,
Full(F),
}
impl<F: PrimeField> std::fmt::Debug for Coeff<F> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Coeff::Zero => {
write!(f, "Coeff 0x0")
},
Coeff::One => {
write!(f, "Coeff 0x1")
},
Coeff::NegativeOne => {
write!(f, "Coeff -0x1")
},
Coeff::Full(c) => {
write!(f, "Coeff {:?}", c)
},
}
}
}
impl<F: PrimeField> Coeff<F> {
pub fn multiply(&self, with: &mut F) {
match self {
Coeff::Zero => {
*with = F::zero();
},
Coeff::One => {},
Coeff::NegativeOne => {
with.negate();
},
Coeff::Full(val) => {
with.mul_assign(val);
}
}
}
pub fn new(coeff: F) -> Self {
let mut negative_one = F::one();
negative_one.negate();
if coeff.is_zero() {
Coeff::<F>::Zero
} else if coeff == F::one() {
Coeff::<F>::One
} else if coeff == negative_one {
Coeff::<F>::NegativeOne
} else {
Coeff::<F>::Full(coeff)
}
}
}
impl<F: PrimeField> Copy for Coeff<F> {}
impl<F: PrimeField> Clone for Coeff<F> {
fn clone(&self) -> Self {
*self
}
}
impl<F: PrimeField> Neg for Coeff<F> {
type Output = Coeff<F>;
fn neg(self) -> Self {
match self {
Coeff::Zero => Coeff::Zero,
Coeff::One => Coeff::NegativeOne,
Coeff::NegativeOne => Coeff::One,
Coeff::Full(mut a) => {
a.negate();
Coeff::Full(a)
}
}
}
}
#[derive(Copy, Clone)]
pub struct Gate<F: PrimeField> {
a_wire: Variable,
b_wire: Variable,
c_wire: Variable,
pub(crate) q_l: Coeff<F>,
pub(crate) q_r: Coeff<F>,
pub(crate) q_o: Coeff<F>,
pub(crate) q_m: Coeff<F>,
pub(crate) q_c: Coeff<F>,
}
impl<F: PrimeField> std::fmt::Debug for Gate<F> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Gate A = {:?}, B = {:?}, C = {:?}, q_l = {:?}, q_r = {:?}, q_o = {:?}, q_m = {:?}, q_c = {:?}",
self.a_wire, self.b_wire, self.c_wire, self.q_l, self.q_r, self.q_o, self.q_m, self.q_c)
}
}
impl<F: PrimeField> Gate<F> {
pub(crate) fn empty() -> Self {
Self {
a_wire: Variable(Index::Aux(0)),
b_wire: Variable(Index::Aux(0)),
c_wire: Variable(Index::Aux(0)),
q_l: Coeff::<F>::Zero,
q_r: Coeff::<F>::Zero,
q_o: Coeff::<F>::Zero,
q_m: Coeff::<F>::Zero,
q_c: Coeff::<F>::Zero,
}
}
pub(crate) fn a_wire(&self) -> &Variable {
&self.a_wire
}
pub(crate) fn b_wire(&self) -> &Variable {
&self.b_wire
}
pub(crate) fn c_wire(&self) -> &Variable {
&self.c_wire
}
pub(crate) fn new_multiplication_gate(variables: (Variable, Variable, Variable)) -> Self {
Self {
a_wire: variables.0,
b_wire: variables.1,
c_wire: variables.2,
q_l: Coeff::<F>::Zero,
q_r: Coeff::<F>::Zero,
q_o: Coeff::<F>::NegativeOne,
q_m: Coeff::<F>::One,
q_c: Coeff::<F>::Zero,
}
}
pub(crate) fn new_addition_gate(variables: (Variable, Variable, Variable)) -> Self {
Self {
a_wire: variables.0,
b_wire: variables.1,
c_wire: variables.2,
q_l: Coeff::<F>::One,
q_r: Coeff::<F>::One,
q_o: Coeff::<F>::NegativeOne,
q_m: Coeff::<F>::Zero,
q_c: Coeff::<F>::Zero,
}
}
pub(crate) fn new_lc_gate(variables: (Variable, Variable, Variable), coeffs: (F, F, F), constant: F) -> Self {
let (a_coeff, b_coeff, c_coeff) = coeffs;
Self {
a_wire: variables.0,
b_wire: variables.1,
c_wire: variables.2,
q_l: Coeff::<F>::Full(a_coeff),
q_r: Coeff::<F>::Full(b_coeff),
q_o: Coeff::<F>::Full(c_coeff),
q_m: Coeff::<F>::Zero,
q_c: Coeff::<F>::new(constant),
}
}
pub(crate) fn new_enforce_zero_gate(variables: (Variable, Variable, Variable), coeffs: (F, F, F)) -> Self {
let (a_coeff, b_coeff, c_coeff) = coeffs;
Self {
a_wire: variables.0,
b_wire: variables.1,
c_wire: variables.2,
q_l: Coeff::<F>::Full(a_coeff),
q_r: Coeff::<F>::Full(b_coeff),
q_o: Coeff::<F>::Full(c_coeff),
q_m: Coeff::<F>::Zero,
q_c: Coeff::<F>::Zero,
}
}
pub(crate) fn new_enforce_boolean_gate(variable: Variable, dummy_variable: Variable) -> Self {
Self {
a_wire: variable,
b_wire: variable,
c_wire: dummy_variable,
q_l: Coeff::<F>::NegativeOne,
q_r: Coeff::<F>::Zero,
q_o: Coeff::<F>::Zero,
q_m: Coeff::<F>::One,
q_c: Coeff::<F>::Zero,
}
}
pub(crate) fn new_empty_gate(dummy_variable: Variable) -> Self {
Self {
a_wire: dummy_variable,
b_wire: dummy_variable,
c_wire: dummy_variable,
q_l: Coeff::<F>::Zero,
q_r: Coeff::<F>::Zero,
q_o: Coeff::<F>::Zero,
q_m: Coeff::<F>::Zero,
q_c: Coeff::<F>::Zero,
}
}
pub(crate) fn new_enforce_constant_gate(variable: Variable, constant: Option<F>, dummy_variable: Variable) -> Self {
let mut negative_one = F::one();
negative_one.negate();
let q_c = if let Some(constant) = constant {
let mut const_negated = constant;
const_negated.negate();
let coeff = if const_negated.is_zero() {
Coeff::<F>::Zero
} else if const_negated == F::one() {
Coeff::<F>::One
} else if const_negated == negative_one {
Coeff::<F>::NegativeOne
} else {
Coeff::<F>::Full(const_negated)
};
coeff
} else {
Coeff::<F>::Zero
};
Self {
a_wire: variable,
b_wire: dummy_variable,
c_wire: dummy_variable,
q_l: Coeff::<F>::One,
q_r: Coeff::<F>::Zero,
q_o: Coeff::<F>::Zero,
q_m: Coeff::<F>::Zero,
q_c: q_c,
}
}
pub(crate) fn new_gate(variables: (Variable, Variable, Variable),
coeffs: (F, F, F, F, F)) -> Self {
let (q_l, q_r, q_o, q_m, q_c) = coeffs;
Self {
a_wire: variables.0,
b_wire: variables.1,
c_wire: variables.2,
q_l: Coeff::new(q_l),
q_r: Coeff::new(q_r),
q_o: Coeff::new(q_o),
q_m: Coeff::new(q_m),
q_c: Coeff::new(q_c),
}
}
}
<file_sep>/src/plonk/better_better_cs/mod.rs
pub mod cs;
pub mod lookup_tables;
pub mod utils;
pub mod data_structures;
pub mod setup;
pub mod proof;
pub mod verifier;
pub mod trees;
pub mod gates;
#[cfg(feature = "redshift")]
pub mod redshift;<file_sep>/src/sonic/sonic/mod.rs
mod adaptor;
mod synthesis_drivers;
mod backends;
mod constraint_systems;
pub use self::adaptor::{Adaptor, AdaptorCircuit};
pub use self::synthesis_drivers::{Basic, Nonassigning, Permutation3};
pub use self::backends::{CountNandQ, CountN, Preprocess, Wires};
pub use self::constraint_systems::{NonassigningSynthesizer, Synthesizer, PermutationSynthesizer};
pub const M: usize = 3;<file_sep>/src/plonk/prover/homomorphic.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
use crate::worker::*;
use super::polynomials::*;
use super::domains::*;
use crate::plonk::commitments::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::utils::*;
pub struct PlonkNonhomomorphicProof<E: Engine, S: CommitmentScheme<E::Fr> >{
q_l_opening_value: E::Fr,
q_r_opening_value: E::Fr,
q_o_opening_value: E::Fr,
q_m_opening_value: E::Fr,
q_c_opening_value: E::Fr,
s_id_opening_value: E::Fr,
sigma_1_opening_value: E::Fr,
sigma_2_opening_value: E::Fr,
sigma_3_opening_value: E::Fr,
z_1_shifted_opening_value: E::Fr,
z_2_shifted_opening_value: E::Fr,
r_opening_value: E::Fr,
unshifted_openings_proof: S::OpeningProof,
shifted_openings_proof: S::OpeningProof,
}
pub fn prove_nonhomomorphic<E: Engine, S: CommitmentScheme<E::Fr>, T: Transcript<E::Fr, Input = S::Commitment>, C: Circuit<E>>(circuit: &C, committer: &S) -> Result<PlonkNonhomomorphicProof<E, S>, SynthesisError> {
let mut assembly = ProvingAssembly::<E>::new();
circuit.synthesize(&mut assembly)?;
assembly.finalize();
let worker = Worker::new();
let mut transcript = T::new();
let n = assembly.input_gates.len() + assembly.aux_gates.len();
// we need n+1 to be a power of two and can not have n to be power of two
let required_domain_size = n + 1;
assert!(required_domain_size.is_power_of_two());
let (w_l, w_r, w_o) = assembly.make_wire_assingments();
let w_l = Polynomial::<E::Fr, Values>::from_values_unpadded(w_l)?;
let w_r = Polynomial::<E::Fr, Values>::from_values_unpadded(w_r)?;
let w_o = Polynomial::<E::Fr, Values>::from_values_unpadded(w_o)?;
let a_poly = w_l.clone_padded_to_domain()?.ifft(&worker);
let b_poly = w_r.clone_padded_to_domain()?.ifft(&worker);
let c_poly = w_o.clone_padded_to_domain()?.ifft(&worker);
let (a_commitment, a_aux_data) = committer.commit_single(&a_poly);
let (b_commitment, b_aux_data) = committer.commit_single(&b_poly);
let (c_commitment, c_aux_data) = committer.commit_single(&c_poly);
transcript.commit_input(&a_commitment);
transcript.commit_input(&b_commitment);
transcript.commit_input(&c_commitment);
// TODO: Add public inputs
println!("Committed A, B and C polys");
let beta = transcript.get_challenge();
let gamma = transcript.get_challenge();
let mut w_l_plus_gamma = w_l.clone();
w_l_plus_gamma.add_constant(&worker, &gamma);
let mut w_r_plus_gamma = w_r.clone();
w_r_plus_gamma.add_constant(&worker, &gamma);
let mut w_o_plus_gamma = w_o.clone();
w_o_plus_gamma.add_constant(&worker, &gamma);
let z_1 = {
let n = assembly.input_gates.len() + assembly.aux_gates.len();
let s_id_1: Vec<_> = (1..=n).collect();
let s_id_1 = convert_to_field_elements(&s_id_1, &worker);
let s_id_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &s_id_1, &beta);
drop(s_id_1);
let s_id_2: Vec<_> = ((n+1)..=(2*n)).collect();
let s_id_2 = convert_to_field_elements(&s_id_2, &worker);
let s_id_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &s_id_2, &beta);
drop(s_id_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let s_id_3: Vec<_> = ((2*n+1)..=(3*n)).collect();
let s_id_3 = convert_to_field_elements(&s_id_3, &worker);
let s_id_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(s_id_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &s_id_3, &beta);
drop(s_id_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
// let grand_product_serial = w_l_contribution.calculate_grand_product_serial()?;
// assert!(grand_product == grand_product_serial);
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
Polynomial::<E::Fr, Values>::from_values(prepadded)?
};
let z_2 = {
let (sigma_1, sigma_2, sigma_3) = assembly.calculate_permutations_as_in_a_paper();
let sigma_1 = convert_to_field_elements(&sigma_1, &worker);
let sigma_1 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_1)?;
let mut w_l_contribution = w_l_plus_gamma.clone();
w_l_contribution.add_assign_scaled(&worker, &sigma_1, &beta);
drop(sigma_1);
let sigma_2 = convert_to_field_elements(&sigma_2, &worker);
let sigma_2 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_2)?;
let mut w_r_contribution = w_r_plus_gamma.clone();
w_r_contribution.add_assign_scaled(&worker, &sigma_2, &beta);
drop(sigma_2);
w_l_contribution.mul_assign(&worker, &w_r_contribution);
drop(w_r_contribution);
let sigma_3 = convert_to_field_elements(&sigma_3, &worker);
let sigma_3 = Polynomial::<E::Fr, Values>::from_values_unpadded(sigma_3)?;
let mut w_o_contribution = w_o_plus_gamma.clone();
w_o_contribution.add_assign_scaled(&worker, &sigma_3, &beta);
drop(sigma_3);
w_l_contribution.mul_assign(&worker, &w_o_contribution);
drop(w_o_contribution);
let grand_product = w_l_contribution.calculate_grand_product(&worker)?;
// let grand_product_serial = w_l_contribution.calculate_grand_product_serial()?;
// assert!(grand_product == grand_product_serial);
drop(w_l_contribution);
let values = grand_product.into_coeffs();
assert!((values.len() + 1).is_power_of_two());
let mut prepadded = Vec::with_capacity(values.len() + 1);
prepadded.push(E::Fr::one());
prepadded.extend(values);
let z_2 = Polynomial::<E::Fr, Values>::from_values(prepadded)?;
z_2
};
let z_1 = z_1.ifft(&worker);
let z_2 = z_2.ifft(&worker);
let (z_1_commitment, z_1_aux) = committer.commit_single(&z_1);
let (z_2_commitment, z_2_aux) = committer.commit_single(&z_2);
transcript.commit_input(&z_1_commitment);
transcript.commit_input(&z_2_commitment);
let mut z_1_shifted = z_1.clone();
z_1_shifted.distribute_powers(&worker, z_1.omega);
let mut z_2_shifted = z_2.clone();
z_2_shifted.distribute_powers(&worker, z_2.omega);
let a_lde = a_poly.clone().coset_lde(&worker, 4)?;
let b_lde = b_poly.clone().coset_lde(&worker, 4)?;
let c_lde = c_poly.clone().coset_lde(&worker, 4)?;
let (q_l, q_r, q_o, q_m, q_c, s_id, sigma_1, sigma_2, sigma_3) = assembly.output_setup_polynomials(&worker)?;
let q_l_lde = q_l.clone().coset_lde(&worker, 4)?;
let q_r_lde = q_r.clone().coset_lde(&worker, 4)?;
let q_o_lde = q_o.clone().coset_lde(&worker, 4)?;
let q_m_lde = q_m.clone().coset_lde(&worker, 4)?;
let q_c_lde = q_c.clone().coset_lde(&worker, 4)?;
let s_id_lde = s_id.clone().coset_lde(&worker, 4)?;
let sigma_1_lde = sigma_1.clone().coset_lde(&worker, 4)?;
let sigma_2_lde = sigma_2.clone().coset_lde(&worker, 4)?;
let sigma_3_lde = sigma_3.clone().coset_lde(&worker, 4)?;
let n_fe = E::Fr::from_str(&n.to_string()).expect("must be valid field element");
let mut two_n_fe = n_fe;
two_n_fe.double();
let alpha = transcript.get_challenge();
let mut vanishing_poly_inverse = assembly.calculate_inverse_vanishing_polynomial_in_a_coset(&worker, q_c_lde.size(), required_domain_size.next_power_of_two())?;
let mut t_1 = {
let mut t_1 = q_c_lde;
let mut q_l_by_a = q_l_lde;
q_l_by_a.mul_assign(&worker, &a_lde);
t_1.add_assign(&worker, &q_l_by_a);
drop(q_l_by_a);
let mut q_r_by_b = q_r_lde;
q_r_by_b.mul_assign(&worker, &b_lde);
t_1.add_assign(&worker, &q_r_by_b);
drop(q_r_by_b);
let mut q_o_by_c = q_o_lde;
q_o_by_c.mul_assign(&worker, &c_lde);
t_1.add_assign(&worker, &q_o_by_c);
drop(q_o_by_c);
let mut q_m_by_ab = q_m_lde;
q_m_by_ab.mul_assign(&worker, &a_lde);
q_m_by_ab.mul_assign(&worker, &b_lde);
t_1.add_assign(&worker, &q_m_by_ab);
drop(q_m_by_ab);
vanishing_poly_inverse.scale(&worker, alpha);
t_1.mul_assign(&worker, &vanishing_poly_inverse);
t_1
};
let z_1_lde = z_1.clone().coset_lde(&worker, 4)?;
let z_1_shifted_lde = z_1_shifted.clone().coset_lde(&worker, 4)?;
let z_2_lde = z_2.clone().coset_lde(&worker, 4)?;
let z_2_shifted_lde = z_2_shifted.clone().coset_lde(&worker, 4)?;
{
// TODO: May be optimize number of additions
let mut contrib_z_1 = z_1_lde.clone();
let mut s_id_by_beta = s_id_lde;
s_id_by_beta.scale(&worker, beta);
let mut n_by_beta = n_fe;
n_by_beta.mul_assign(&beta);
let mut a_perm = s_id_by_beta.clone();
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_lde);
contrib_z_1.mul_assign(&worker, &a_perm);
drop(a_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut b_perm = s_id_by_beta.clone();
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_lde);
contrib_z_1.mul_assign(&worker, &b_perm);
drop(b_perm);
s_id_by_beta.add_constant(&worker, &n_by_beta);
let mut c_perm = s_id_by_beta;
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_lde);
contrib_z_1.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_1.sub_assign(&worker, &z_1_shifted_lde);
vanishing_poly_inverse.scale(&worker, alpha);
contrib_z_1.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &contrib_z_1);
}
{
// TODO: May be optimize number of additions
let mut contrib_z_2 = z_2_lde.clone();
let mut a_perm = sigma_1_lde;
a_perm.scale(&worker, beta);
a_perm.add_constant(&worker, &gamma);
a_perm.add_assign(&worker, &a_lde);
contrib_z_2.mul_assign(&worker, &a_perm);
drop(a_perm);
let mut b_perm = sigma_2_lde;
b_perm.scale(&worker, beta);
b_perm.add_constant(&worker, &gamma);
b_perm.add_assign(&worker, &b_lde);
contrib_z_2.mul_assign(&worker, &b_perm);
drop(b_perm);
let mut c_perm = sigma_3_lde;
c_perm.scale(&worker, beta);
c_perm.add_constant(&worker, &gamma);
c_perm.add_assign(&worker, &c_lde);
contrib_z_2.mul_assign(&worker, &c_perm);
drop(c_perm);
contrib_z_2.sub_assign(&worker, &z_2_shifted_lde);
vanishing_poly_inverse.scale(&worker, alpha);
contrib_z_2.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &contrib_z_2);
}
drop(a_lde);
drop(b_lde);
drop(c_lde);
let l_0 = assembly.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), 0)?;
let l_n_minus_one = assembly.calculate_lagrange_poly(&worker, required_domain_size.next_power_of_two(), n-1)?;
{
let mut z_1_minus_z_2_shifted = z_1_shifted_lde.clone();
z_1_minus_z_2_shifted.sub_assign(&worker, &z_2_shifted_lde);
let l = l_n_minus_one.clone().coset_lde(&worker, 4)?;
z_1_minus_z_2_shifted.mul_assign(&worker, &l);
drop(l);
vanishing_poly_inverse.scale(&worker, alpha);
z_1_minus_z_2_shifted.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &z_1_minus_z_2_shifted);
}
{
let mut z_1_minus_z_2= z_1_lde.clone();
z_1_minus_z_2.sub_assign(&worker, &z_2_lde);
let l = l_0.clone().coset_lde(&worker, 4)?;
z_1_minus_z_2.mul_assign(&worker, &l);
drop(l);
vanishing_poly_inverse.scale(&worker, alpha);
z_1_minus_z_2.mul_assign(&worker, &vanishing_poly_inverse);
t_1.add_assign(&worker, &z_1_minus_z_2);
}
let t_poly = t_1.icoset_fft(&worker);
let degree = get_degree::<E>(&t_poly);
assert!(degree <= 3*n);
fn get_degree<E:Engine>(poly: &Polynomial<E::Fr, Coefficients>) -> usize {
let mut degree = poly.as_ref().len() - 1;
for c in poly.as_ref().iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break;
}
}
println!("Degree = {}", degree);
degree
}
let (t_commitment, t_2_aux) = committer.commit_single(&t_poly);
transcript.commit_input(&t_commitment);
let z = transcript.get_challenge();
// this is a sanity check
let a_at_z = a_poly.evaluate_at(&worker, z);
let b_at_z = b_poly.evaluate_at(&worker, z);
let c_at_z = c_poly.evaluate_at(&worker, z);
let q_l_at_z = q_l.evaluate_at(&worker, z);
let q_r_at_z = q_r.evaluate_at(&worker, z);
let q_o_at_z = q_o.evaluate_at(&worker, z);
let q_m_at_z = q_m.evaluate_at(&worker, z);
let q_c_at_z = q_c.evaluate_at(&worker, z);
let s_id_at_z = s_id.evaluate_at(&worker, z);
let sigma_1_at_z = sigma_1.evaluate_at(&worker, z);
let sigma_2_at_z = sigma_2.evaluate_at(&worker, z);
let sigma_3_at_z = sigma_3.evaluate_at(&worker, z);
let mut inverse_vanishing_at_z = assembly.evaluate_inverse_vanishing_poly(required_domain_size.next_power_of_two(), z);
let inverse_vanishing_at_z_no_alphas = inverse_vanishing_at_z;
let z_1_at_z = z_1.evaluate_at(&worker, z);
let z_2_at_z = z_2.evaluate_at(&worker, z);
let z_1_shifted_at_z = z_1_shifted.evaluate_at(&worker, z);
let z_2_shifted_at_z = z_2_shifted.evaluate_at(&worker, z);
let l_0_at_z = l_0.evaluate_at(&worker, z);
let l_n_minus_one_at_z = l_n_minus_one.evaluate_at(&worker, z);
let t_at_z = t_poly.evaluate_at(&worker, z);
{
transcript.commit_field_element(&a_at_z);
transcript.commit_field_element(&b_at_z);
transcript.commit_field_element(&c_at_z);
transcript.commit_field_element(&q_l_at_z);
transcript.commit_field_element(&q_r_at_z);
transcript.commit_field_element(&q_o_at_z);
transcript.commit_field_element(&q_m_at_z);
transcript.commit_field_element(&q_c_at_z);
transcript.commit_field_element(&s_id_at_z);
transcript.commit_field_element(&sigma_1_at_z);
transcript.commit_field_element(&sigma_2_at_z);
transcript.commit_field_element(&sigma_3_at_z);
transcript.commit_field_element(&t_at_z);
transcript.commit_field_element(&z_1_shifted_at_z);
transcript.commit_field_element(&z_2_shifted_at_z);
}
let unshifted_opening_aggregation_challenge = transcript.get_challenge();
let shifted_opening_aggregation_challenge = transcript.get_challenge();
// this is a sanity check
{
let mut t_1 = {
let mut res = q_c_at_z;
let mut tmp = q_l_at_z;
tmp.mul_assign(&a_at_z);
res.add_assign(&tmp);
let mut tmp = q_r_at_z;
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
let mut tmp = q_o_at_z;
tmp.mul_assign(&c_at_z);
res.add_assign(&tmp);
let mut tmp = q_m_at_z;
tmp.mul_assign(&a_at_z);
tmp.mul_assign(&b_at_z);
res.add_assign(&tmp);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
res
};
{
let mut res = z_1_at_z;
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_1_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_2_at_z;
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
res.mul_assign(&tmp);
res.sub_assign(&z_2_shifted_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_shifted_at_z;
res.sub_assign(&z_2_shifted_at_z);
res.mul_assign(&l_n_minus_one_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
{
let mut res = z_1_at_z;
res.sub_assign(&z_2_at_z);
res.mul_assign(&l_0_at_z);
inverse_vanishing_at_z.mul_assign(&alpha);
res.mul_assign(&inverse_vanishing_at_z);
t_1.add_assign(&res);
}
assert_eq!(t_at_z, t_1);
}
// now compute linearization polynomial
let mut r_1 = {
let mut res = q_c;
res.add_assign_scaled(&worker, &q_l, &a_at_z);
res.add_assign_scaled(&worker, &q_r, &b_at_z);
res.add_assign_scaled(&worker, &q_o, &c_at_z);
let mut a_by_b_at_z = a_at_z;
a_by_b_at_z.mul_assign(&b_at_z);
res.add_assign_scaled(&worker, &q_m, &a_by_b_at_z);
res.scale(&worker, alpha);
res
};
{
let mut factor = alpha;
factor.square();
let mut tmp = s_id_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
factor.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
factor.mul_assign(&tmp);
let mut tmp = s_id_at_z;
tmp.add_assign(&two_n_fe);
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
factor.mul_assign(&tmp);
r_1.add_assign_scaled(&worker, &z_1, &factor);
}
{
let mut factor = alpha;
factor.square();
factor.mul_assign(&alpha);
let mut tmp = sigma_1_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&a_at_z);
tmp.add_assign(&gamma);
factor.mul_assign(&tmp);
let mut tmp = sigma_2_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&b_at_z);
tmp.add_assign(&gamma);
factor.mul_assign(&tmp);
let mut tmp = sigma_3_at_z;
tmp.mul_assign(&beta);
tmp.add_assign(&c_at_z);
tmp.add_assign(&gamma);
factor.mul_assign(&tmp);
r_1.add_assign_scaled(&worker, &z_2, &factor);
}
{
let mut factor = alpha;
factor.square();
factor.square();
factor.mul_assign(&alpha);
factor.mul_assign(&l_0_at_z);
let mut tmp = z_1;
tmp.sub_assign(&worker, &z_2);
r_1.add_assign_scaled(&worker, &tmp, &factor);
}
let (r_commitment, r_aux_data) = committer.commit_single(&r_1);
let r_at_z = r_1.evaluate_at(&worker, z);
// another sanity check
{
let reevaluated_at_at_z = {
let mut numerator = r_at_z;
let mut tmp = alpha;
tmp.square();
tmp.mul_assign(&z_1_shifted_at_z);
numerator.sub_assign(&tmp);
let mut tmp = alpha;
tmp.square();
tmp.mul_assign(&alpha);
tmp.mul_assign(&z_2_shifted_at_z);
numerator.sub_assign(&tmp);
let mut z_1_shifted_minus_z_2_shifted = z_1_shifted_at_z;
z_1_shifted_minus_z_2_shifted.sub_assign(&z_2_shifted_at_z);
let mut tmp = alpha;
tmp.square();
tmp.square();
tmp.mul_assign(&l_n_minus_one_at_z);
tmp.mul_assign(&z_1_shifted_minus_z_2_shifted);
numerator.add_assign(&tmp);
numerator.mul_assign(&inverse_vanishing_at_z_no_alphas);
numerator
};
assert_eq!(t_at_z, reevaluated_at_at_z);
}
// follow the order from the paper
let unshifted_opening_values = vec![t_at_z, r_at_z, a_at_z, b_at_z, c_at_z, ]
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::cs::gates::*;
use crate::plonk::cs::*;
struct TestCircuit<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for TestCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| {
Ok(E::Fr::from_str("10").unwrap())
})?;
println!("A = {:?}", a);
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
println!("B = {:?}", b);
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
println!("C = {:?}", c);
let one = E::Fr::one();
let mut two = one;
two.double();
let mut negative_one = one;
negative_one.negate();
cs.enforce_zero_2((a, b), (two, negative_one))?;
let ten = E::Fr::from_str("10").unwrap();
cs.enforce_zero_2((b, c), (ten, negative_one))?;
cs.enforce_mul_3((a, b, c))?;
Ok(())
}
}
#[test]
fn test_trivial_circuit() {
use crate::pairing::bn256::{Bn256, Fr};
let mut assembly = GeneratorAssembly::<Bn256>::new();
let circuit = TestCircuit::<Bn256> {
_marker: PhantomData
};
circuit.synthesize(&mut assembly).expect("must work");
println!("{:?}", assembly);
assembly.finalize();
let (f_l, f_r, f_o) = assembly.make_wire_assingments();
let (sigma_1, sigma_2, sigma_3) = assembly.calculate_permutations_as_in_a_paper();
let num_gates = assembly.num_gates();
let id_1: Vec<_> = (1..=num_gates).collect();
let id_2: Vec<_> = ((num_gates+1)..=(2*num_gates)).collect();
let id_3: Vec<_> = ((2*num_gates + 1)..=(3*num_gates)).collect();
let beta = Fr::from_str("15").unwrap();
let gamma = Fr::from_str("4").unwrap();
let mut f_1_poly = vec![];
let mut g_1_poly = vec![];
for (i, el) in f_l.iter().enumerate() {
let mut tmp = Fr::from_str(&id_1[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
f_1_poly.push(tmp);
}
for (i, el) in f_l.iter().enumerate() {
let mut tmp = Fr::from_str(&sigma_1[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
g_1_poly.push(tmp);
}
let mut f_2_poly = vec![];
let mut g_2_poly = vec![];
for (i, el) in f_r.iter().enumerate() {
let mut tmp = Fr::from_str(&id_2[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
f_2_poly.push(tmp);
}
for (i, el) in f_r.iter().enumerate() {
let mut tmp = Fr::from_str(&sigma_2[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
g_2_poly.push(tmp);
}
let mut f_3_poly = vec![];
let mut g_3_poly = vec![];
for (i, el) in f_o.iter().enumerate() {
let mut tmp = Fr::from_str(&id_3[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
f_3_poly.push(tmp);
}
for (i, el) in f_o.iter().enumerate() {
let mut tmp = Fr::from_str(&sigma_3[i].to_string()).unwrap();
tmp.mul_assign(&beta);
tmp.add_assign(&gamma);
tmp.add_assign(&el);
g_3_poly.push(tmp);
}
let mut f_poly = vec![];
let mut g_poly = vec![];
for i in 0..f_1_poly.len() {
let mut tmp = f_1_poly[i];
tmp.mul_assign(&f_2_poly[i]);
tmp.mul_assign(&f_3_poly[i]);
f_poly.push(tmp);
}
for i in 0..g_1_poly.len() {
let mut tmp = g_1_poly[i];
tmp.mul_assign(&g_2_poly[i]);
tmp.mul_assign(&g_3_poly[i]);
g_poly.push(tmp);
}
let mut tmp = Fr::one();
let mut f_prime = vec![tmp];
for el in f_poly.iter() {
tmp.mul_assign(&el);
f_prime.push(tmp);
}
let mut tmp = Fr::one();
let mut g_prime = vec![tmp];
for el in g_poly.iter() {
tmp.mul_assign(&el);
g_prime.push(tmp);
}
assert!(f_prime[0] == g_prime[0]);
assert!(f_prime[num_gates] == g_prime[num_gates]);
let worker = Worker::new();
let _ = assembly.output_setup_polynomials(&worker).unwrap();
let _ = assembly.generate_proof().unwrap();
}
#[test]
fn test_coset_lde() {
use crate::pairing::bn256::{Bn256, Fr};
let worker = Worker::new();
let coeffs: Vec<_> = (0..4).collect();
let coeffs = convert_to_field_elements(&coeffs, &worker);
let coeffs = Polynomial::<Fr, _>::from_coeffs(coeffs).unwrap();
let mut expanded = coeffs.clone();
expanded.pad_to_size(16).unwrap();
let naive = expanded.coset_fft(&worker);
let fast = coeffs.coset_lde(&worker, 4).unwrap();
assert!(naive == fast);
}
}<file_sep>/src/sonic/unhelped/s2_proof.rs
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
#[derive(Clone)]
pub struct S2Eval<E: Engine> {
n: usize,
_marker: PhantomData<E>
}
#[derive(Clone)]
pub struct S2Proof<E: Engine> {
o: E::G1Affine,
pub c_value: E::Fr,
pub d_value: E::Fr,
pub c_opening: E::G1Affine,
pub d_opening: E::G1Affine
}
impl<E: Engine> S2Eval<E> {
pub fn calculate_commitment_element(n: usize, srs: &SRS<E>) -> E::G1Affine {
// TODO: parallelize
let mut o = E::G1::zero();
for i in 0..n {
o.add_assign_mixed(&srs.g_positive_x_alpha[i]);
}
o.into_affine()
}
pub fn new(n: usize) -> Self {
S2Eval {
n: n,
_marker: PhantomData
}
}
pub fn evaluate(&self, x: E::Fr, y: E::Fr, srs: &SRS<E>) -> S2Proof<E> {
// create a reference element first
let o = Self::calculate_commitment_element(self.n, &srs);
let mut poly = vec![E::Fr::one(); self.n+1];
let (c, c_opening) = {
let mut point = y;
point.mul_assign(&x);
let val = evaluate_at_consequitive_powers(&poly[1..], point, point);
poly[0] = val;
poly[0].negate();
let opening = polynomial_commitment_opening(0, self.n, poly.iter(), point, &srs);
(val, opening)
};
let (d, d_opening) = {
let mut point = y.inverse().unwrap();
point.mul_assign(&x);
let val = evaluate_at_consequitive_powers(&poly[1..], point, point);
poly[0] = val;
poly[0].negate();
let opening = polynomial_commitment_opening(0, self.n, poly.iter(), point, &srs);
(val, opening)
};
S2Proof {
o: o,
c_value: c,
d_value: d,
c_opening: c_opening,
d_opening: d_opening
}
}
pub fn verify(x: E::Fr, y: E::Fr, proof: &S2Proof<E>, srs: &SRS<E>) -> bool {
// e(C,hαx)e(C−yz,hα) = e(O,h)e(g−c,hα)
let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let mut minus_xy = x;
minus_xy.mul_assign(&y);
minus_xy.negate();
let mut h_alpha_term = proof.c_opening.mul(minus_xy.into_repr());
let g_in_c = E::G1Affine::one().mul(proof.c_value);
h_alpha_term.add_assign(&g_in_c);
let h_alpha_term = h_alpha_term.into_affine();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&proof.c_opening.prepare(), &alpha_x_precomp),
(&h_alpha_term.prepare(), &alpha_precomp),
(&proof.o.prepare(), &h_prep),
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
// e(D,hαx)e(D−y−1z,hα) = e(O,h)e(g−d,hα)
let mut minus_x_y_inv = x;
minus_x_y_inv.mul_assign(&y.inverse().unwrap());
minus_x_y_inv.negate();
let mut h_alpha_term = proof.d_opening.mul(minus_x_y_inv.into_repr());
let g_in_d = E::G1Affine::one().mul(proof.d_value);
h_alpha_term.add_assign(&g_in_d);
let h_alpha_term = h_alpha_term.into_affine();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&proof.d_opening.prepare(), &alpha_x_precomp),
(&h_alpha_term.prepare(), &alpha_precomp),
(&proof.o.prepare(), &h_prep),
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
true
}
}
#[test]
fn test_s2_proof() {
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use crate::sonic::srs::SRS;
use crate::sonic::cs::{Circuit, ConstraintSystem, LinearCombination};
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let x: Fr = rng.gen();
let y: Fr = rng.gen();
let proof = S2Eval::new(1024);
let proof = proof.evaluate(x, y, &srs);
let valid = S2Eval::verify(x, y, &proof, &srs);
assert!(valid);
}
}<file_sep>/src/plonk/plonk/mod.rs
pub mod prover;
pub mod generator;<file_sep>/src/sonic/cs/mod.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
mod lc;
pub use self::lc::{Coeff, Variable, LinearCombination};
pub trait Circuit<E: Engine> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError>;
}
pub trait ConstraintSystem<E: Engine> {
const ONE: Variable;
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>;
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>;
fn enforce_zero(&mut self, lc: LinearCombination<E>);
fn multiply<F>(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>;
// TODO: get rid of this
fn get_value(&self, _var: Variable) -> Result<E::Fr, ()> {
Err(())
}
}
/// This is a backend for the `SynthesisDriver` to relay information about
/// the concrete circuit. One backend might just collect basic information
/// about the circuit for verification, while another actually constructs
/// a witness.
pub trait Backend<E: Engine> {
type LinearConstraintIndex;
/// Get the value of a variable. Can return None if we don't know.
fn get_var(&self, _variable: Variable) -> Option<E::Fr> { None }
/// Set the value of a variable. Might error if this backend expects to know it.
fn set_var<F>(&mut self, _variable: Variable, _value: F) -> Result<(), SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError> { Ok(()) }
/// Create a new multiplication gate.
fn new_multiplication_gate(&mut self) { }
/// Create a new linear constraint, returning the power of Y for caching purposes.
fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex;
/// Insert a term into a linear constraint. TODO: bad name of function
fn insert_coefficient(&mut self, _var: Variable, _coeff: Coeff<E>, _y: &Self::LinearConstraintIndex) { }
/// Compute a `LinearConstraintIndex` from `q`.
fn get_for_q(&self, q: usize) -> Self::LinearConstraintIndex;
/// Mark y^{_index} as the power of y cooresponding to the public input
/// coefficient for the next public input, in the k(Y) polynomial.
fn new_k_power(&mut self, _index: usize) { }
}
/// This is an abstraction which synthesizes circuits.
pub trait SynthesisDriver {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError>;
}<file_sep>/src/sonic/helped/poly.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use crate::sonic::cs::{Backend};
use crate::sonic::cs::{Coeff, Variable, LinearCombination};
use crate::sonic::util::*;
/*
s(X, Y) = \sum\limits_{i=1}^N u_i(Y) X^{-i}
+ \sum\limits_{i=1}^N v_i(Y) X^{i}
+ \sum\limits_{i=1}^N w_i(Y) X^{i+N}
where
u_i(Y) = \sum\limits_{q=1}^Q Y^{q+N} u_{i,q}
v_i(Y) = \sum\limits_{q=1}^Q Y^{q+N} v_{i,q}
w_i(Y) = -Y^i + -Y^{-i} + \sum\limits_{q=1}^Q Y^{q+N} w_{i,q}
*/
#[derive(Clone)]
pub struct SxEval<E: Engine> {
y: E::Fr,
// current value of y^{q+N}
yqn: E::Fr,
// x^{-i} (\sum\limits_{q=1}^Q y^{q+N} u_{q,i})
u: Vec<E::Fr>,
// x^{i} (\sum\limits_{q=1}^Q y^{q+N} v_{q,i})
v: Vec<E::Fr>,
// x^{i+N} (-y^i -y^{-i} + \sum\limits_{q=1}^Q y^{q+N} w_{q,i})
w: Vec<E::Fr>,
max_n: usize,
}
impl<E: Engine> SxEval<E> {
pub fn new(y: E::Fr, n: usize) -> Self {
let y_inv = y.inverse().unwrap(); // TODO
let yqn = y.pow(&[n as u64]);
let u = vec![E::Fr::zero(); n];
let v = vec![E::Fr::zero(); n];
let mut minus_one = E::Fr::one();
minus_one.negate();
let mut w = vec![minus_one; n];
let mut w_neg = vec![minus_one; n];
mut_distribute_consequitive_powers(&mut w[..], y, y);
mut_distribute_consequitive_powers(&mut w_neg[..], y_inv, y_inv);
add_polynomials(&mut w[..], &w_neg[..]);
// let mut w = vec![E::Fr::zero(); n];
// let mut tmp1 = y;
// let mut tmp2 = y_inv;
// for w in &mut w {
// let mut new = tmp1;
// new.add_assign(&tmp2);
// new.negate();
// *w = new;
// tmp1.mul_assign(&y);
// tmp2.mul_assign(&y_inv);
// }
SxEval {
y,
yqn,
u,
v,
w,
max_n: n
}
}
pub fn poly(mut self) -> (Vec<E::Fr>, Vec<E::Fr>) {
self.v.extend(self.w);
(self.u, self.v)
}
pub fn finalize(self, x: E::Fr) -> E::Fr {
let x_inv = x.inverse().unwrap(); // TODO
let mut acc = E::Fr::zero();
let tmp = x_inv;
acc.add_assign(&evaluate_at_consequitive_powers(& self.u[..], tmp, tmp));
let tmp = x;
acc.add_assign(&evaluate_at_consequitive_powers(& self.v[..], tmp, tmp));
let tmp = x.pow(&[(self.v.len()+1) as u64]);
acc.add_assign(&evaluate_at_consequitive_powers(& self.w[..], tmp, x));
// let mut tmp = x_inv;
// for mut u in self.u {
// u.mul_assign(&tmp);
// acc.add_assign(&u);
// tmp.mul_assign(&x_inv);
// }
// let mut tmp = x;
// for mut v in self.v {
// v.mul_assign(&tmp);
// acc.add_assign(&v);
// tmp.mul_assign(&x);
// }
// for mut w in self.w {
// w.mul_assign(&tmp);
// acc.add_assign(&w);
// tmp.mul_assign(&x);
// }
acc
}
}
impl<'a, E: Engine> Backend<E> for &'a mut SxEval<E> {
type LinearConstraintIndex = E::Fr;
fn new_linear_constraint(&mut self) -> E::Fr {
self.yqn.mul_assign(&self.y);
self.yqn
}
fn get_for_q(&self, q: usize) -> Self::LinearConstraintIndex {
self.y.pow(&[(self.max_n + q) as u64])
}
fn insert_coefficient(&mut self, var: Variable, coeff: Coeff<E>, y: &E::Fr) {
let acc = match var {
Variable::A(index) => {
&mut self.u[index - 1]
}
Variable::B(index) => {
&mut self.v[index - 1]
}
Variable::C(index) => {
&mut self.w[index - 1]
}
};
match coeff {
Coeff::Zero => { },
Coeff::One => {
acc.add_assign(&y);
},
Coeff::NegativeOne => {
acc.sub_assign(&y);
},
Coeff::Full(mut val) => {
val.mul_assign(&y);
acc.add_assign(&val);
}
}
}
}
/*
s(X, Y) = \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} u_{i,q} X^{-i}
+ \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} v_{i,q} X^{i}
+ \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} w_{i,q} X^{i+N}
- \sum\limits_{i=1}^N Y^i X^{i+N}
- \sum\limits_{i=1}^N Y^{-i} X^{i+N}
*/
pub struct SyEval<E: Engine> {
max_n: usize,
current_q: usize,
// x^{-1}, ..., x^{-N}
a: Vec<E::Fr>,
// x^1, ..., x^{N}
b: Vec<E::Fr>,
// x^{N+1}, ..., x^{2*N}
c: Vec<E::Fr>,
// coeffs for y^1, ..., y^{N+Q}
positive_coeffs: Vec<E::Fr>,
// coeffs for y^{-1}, y^{-2}, ..., y^{-N}
negative_coeffs: Vec<E::Fr>,
}
impl<E: Engine> SyEval<E> {
pub fn new(x: E::Fr, n: usize, q: usize) -> Self {
let xinv = x.inverse().unwrap();
let mut a = vec![E::Fr::one(); n];
let mut b = vec![E::Fr::one(); n];
mut_distribute_consequitive_powers(&mut a[..], xinv, xinv);
mut_distribute_consequitive_powers(&mut b[..], x, x);
let mut c = vec![E::Fr::one(); n];
mut_distribute_consequitive_powers(&mut c[..], x.pow(&[(n+1) as u64]), x);
let mut minus_one = E::Fr::one();
minus_one.negate();
let mut positive_coeffs = vec![minus_one; n];
mut_distribute_consequitive_powers(&mut positive_coeffs[..], x.pow(&[(n+1) as u64]), x);
let negative_coeffs = positive_coeffs.clone();
positive_coeffs.resize(n + q, E::Fr::zero());
// let mut tmp = E::Fr::one();
// let mut a = vec![E::Fr::zero(); n];
// for a in &mut a {
// tmp.mul_assign(&xinv); // tmp = x^{-i}
// *a = tmp;
// }
// let mut tmp = E::Fr::one();
// let mut b = vec![E::Fr::zero(); n];
// for b in &mut b {
// tmp.mul_assign(&x); // tmp = x^{i}
// *b = tmp;
// }
// let mut positive_coeffs = vec![E::Fr::zero(); n + q];
// let mut negative_coeffs = vec![E::Fr::zero(); n];
// let mut c = vec![E::Fr::zero(); n];
// for ((c, positive_coeff), negative_coeff) in c.iter_mut().zip(&mut positive_coeffs).zip(&mut negative_coeffs) {
// tmp.mul_assign(&x); // tmp = x^{i+N}
// *c = tmp;
// // - \sum\limits_{i=1}^N Y^i X^{i+N}
// let mut tmp = tmp;
// tmp.negate();
// *positive_coeff = tmp;
// // - \sum\limits_{i=1}^N Y^{-i} X^{i+N}
// *negative_coeff = tmp;
// }
SyEval {
a,
b,
c,
positive_coeffs,
negative_coeffs,
current_q: 0,
max_n: n,
}
}
pub fn poly(self) -> (Vec<E::Fr>, Vec<E::Fr>) {
(self.negative_coeffs, self.positive_coeffs)
}
pub fn finalize(self, y: E::Fr) -> E::Fr {
let mut acc = E::Fr::zero();
let yinv = y.inverse().unwrap(); // TODO
let positive_powers_contrib = evaluate_at_consequitive_powers(& self.positive_coeffs[..], y, y);
let negative_powers_contrib = evaluate_at_consequitive_powers(& self.negative_coeffs[..], yinv, yinv);
acc.add_assign(&positive_powers_contrib);
acc.add_assign(&negative_powers_contrib);
// let mut tmp = y;
// for mut coeff in self.positive_coeffs {
// coeff.mul_assign(&tmp);
// acc.add_assign(&coeff);
// tmp.mul_assign(&y);
// }
// let mut tmp = yinv;
// for mut coeff in self.negative_coeffs {
// coeff.mul_assign(&tmp);
// acc.add_assign(&coeff);
// tmp.mul_assign(&yinv);
// }
acc
}
}
impl<'a, E: Engine> Backend<E> for &'a mut SyEval<E> {
type LinearConstraintIndex = usize;
fn new_linear_constraint(&mut self) -> usize {
self.current_q += 1;
self.current_q
}
fn get_for_q(&self, q: usize) -> Self::LinearConstraintIndex {
q
}
fn insert_coefficient(&mut self, var: Variable, coeff: Coeff<E>, q: &usize) {
match var {
Variable::A(index) => {
let index = index - 1;
// Y^{q+N} += X^{-i} * coeff
let mut tmp = self.a[index];
coeff.multiply(&mut tmp);
let yindex = *q + self.max_n;
self.positive_coeffs[yindex - 1].add_assign(&tmp);
}
Variable::B(index) => {
let index = index - 1;
// Y^{q+N} += X^{i} * coeff
let mut tmp = self.b[index];
coeff.multiply(&mut tmp);
let yindex = *q + self.max_n;
self.positive_coeffs[yindex - 1].add_assign(&tmp);
}
Variable::C(index) => {
let index = index - 1;
// Y^{q+N} += X^{i+N} * coeff
let mut tmp = self.c[index];
coeff.multiply(&mut tmp);
let yindex = *q + self.max_n;
self.positive_coeffs[yindex - 1].add_assign(&tmp);
}
};
}
}<file_sep>/src/sonic/sonic/synthesis_drivers.rs
use std::marker::PhantomData;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::pairing::{Engine};
use crate::sonic::cs::{Variable, Circuit, ConstraintSystem, LinearCombination};
use crate::SynthesisError;
use crate::pairing::ff::{Field};
use super::constraint_systems::{NonassigningSynthesizer, Synthesizer, PermutationSynthesizer};
pub struct Basic;
impl SynthesisDriver for Basic {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError> {
let mut tmp: Synthesizer<E, B> = Synthesizer::new(backend);
let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <Synthesizer<E, B> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut tmp)?;
Ok(())
}
}
pub struct Nonassigning;
impl SynthesisDriver for Nonassigning {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError> {
let mut tmp: NonassigningSynthesizer<E, B> = NonassigningSynthesizer::new(backend);
let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <NonassigningSynthesizer<E, B> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut tmp)?;
Ok(())
}
}
/*
In order to use the fully succinct version of Sonic, the resulting s(X, Y) polynomial
must be in a more "trivial" form
s(X, Y) = X^{-N - 1} Y^N s_1(X, Y) - X^N s_2(X, Y)
where
s_1(X, Y) = \sum\limits_{i=1}^N u'_i(Y) X^{-i + N + 1}
+ \sum\limits_{i=1}^N v'_i(Y) X^{i + N + 1}
+ \sum\limits_{i=1}^N w'_i(Y) X^{i + 2N + 1}
s_2(X, Y) = \sum\limits_{i=1}^N (Y^i + Y^{-i}) X^i
u'_i(Y) = \sum\limits_{q=1}^Q Y^q u_{q,i}
v'_i(Y) = \sum\limits_{q=1}^Q Y^q v_{q,i}
w'_i(Y) = \sum\limits_{q=1}^Q Y^q w_{q,i}
such that s_1(X, Y) can be expressed as the sum of M permutation polynomials.
It is trivial for the verifier to evaluate s_2(X, Y), since polynomials of the form
x + x^2 + x^3 + ... can be evaluated with a logarithmic number of field operations.
In order to get s_1(X, Y) into the form needed, each constituent permutation polynomial
is effectively of the form
s_j(X, Y) = \sum\limits_{i=1}^{3N+1} c_i X^i Y^\sigma_j(i)
where \sigma_j(i) defines the permutation. The X^i corresponds to the wire, and the
Y^\sigma_j(i) corresponds to the index of the linear constraint.
This effectively means that within each polynomial there can be only one particular
X^i term, and so wires can only appear in M different linear combinations. Further,
because there is only ever a particular Y^i term in each M permutation polynomial,
linear combinations can have only M wires.
In order to synthesize a constraint system into a form that supports this wonky
arrangement, we need M>=3. The general goal is to treat each permutation polynomial
as a "slot" and, when constructing linear constraints, keep track of which slots are
"occupied" by wires, either with respect to the wires themselves or with respect to
the linear combination as it is being assembled.
If the linear combination has more than M terms, then we need to recursively
construct ephemeral wires to hold the values of the remaining terms, and relate those
wires to those terms in new linear combinations.
Once our linear combinations are small enough to fit the terms into the M slots,
we eagerly shove the terms in. The easy case is when a slot is available for both
the wire and the linear combination. The remaining cases can be addressed generally
by imagining that the wire has no available slots. We will create a new ephemeral
wire that holds the same value as the original wire and use this wire to insert the
linear combination. Then, we'll swap one of the terms from another slot into the new
ephemeral wire, freeing a slot in the original wire. Then, we trivially have that the
new wire and old wire have distinct slots free (since M>=3) and so we can now force
that they become equal.
In terms of actually implementing this, things can get tricky. We don't want to end
up in a circumstance where we are infinitely recursing, which can happen depending on
the order we create linear combinations for the ephemeral variables.
*/
pub struct Permutation3;
impl SynthesisDriver for Permutation3 {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError> {
let mut tmp: PermutationSynthesizer<E, B> = PermutationSynthesizer::new(backend);
let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <PermutationSynthesizer<E, B> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut tmp)?;
Ok(())
}
}
<file_sep>/src/plonk/domains/mod.rs
use crate::pairing::ff::PrimeField;
use crate::SynthesisError;
use crate::worker::Worker;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Domain<F: PrimeField> {
pub size: u64,
pub power_of_two: u64,
pub generator: F,
}
impl<F: PrimeField> Domain<F> {
pub fn new_for_size(size: u64) -> Result<Self, SynthesisError> {
let size = size.next_power_of_two();
let mut power_of_two = 0;
let mut k = size;
while k != 1 {
k >>= 1;
power_of_two += 1;
}
let max_power_of_two = F::S as u64;
if power_of_two > max_power_of_two {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
let mut generator = F::root_of_unity();
for _ in power_of_two..max_power_of_two {
generator.square()
}
Ok(Self {
size: size,
power_of_two: power_of_two,
generator: generator
})
}
pub fn coset_for_natural_index_and_size(natural_index: usize, domain_size: usize) -> Vec<usize> {
assert!(domain_size > 1);
assert!(domain_size.is_power_of_two());
let natural_pair_index = (natural_index + (domain_size / 2)) % domain_size;
let mut coset = vec![natural_index, natural_pair_index];
coset.sort();
coset
}
pub fn index_and_size_for_next_domain(natural_index: usize, domain_size: usize) -> (usize, usize) {
// maps coset index into element of the next domain
// if index < current_size / 2 -> return index
// else -> return index - current_size / 2
assert!(domain_size > 1);
assert!(domain_size.is_power_of_two());
let next_size = domain_size / 2;
let next_index = if natural_index < next_size {
natural_index
} else {
natural_index - next_size
};
(next_index, next_size)
}
}
pub(crate) fn materialize_domain_elements_with_natural_enumeration<F: PrimeField>(
domain: &Domain<F>,
worker: &Worker
) -> Vec<F> {
let mut values = vec![F::zero(); domain.size as usize];
let generator = domain.generator;
worker.scope(values.len(), |scope, chunk| {
for (i, values) in values.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = generator.pow(&[(i*chunk) as u64]);
for p in values {
*p = current_power;
current_power.mul_assign(&generator);
}
});
}
});
values
}<file_sep>/src/tests/mod.rs
use crate::pairing::{
Engine
};
use crate::pairing::ff:: {
Field,
PrimeField,
};
pub mod dummy_engine;
use self::dummy_engine::*;
use std::marker::PhantomData;
use crate::{
Circuit,
ConstraintSystem,
SynthesisError
};
#[derive(Clone)]
pub(crate) struct XORDemo<E: Engine> {
pub(crate) a: Option<bool>,
pub(crate) b: Option<bool>,
pub(crate) _marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for XORDemo<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if self.a.is_some() {
if self.a.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc
);
let b_var = cs.alloc(|| "b", || {
if self.b.is_some() {
if self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc
);
let c_var = cs.alloc_input(|| "c", || {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var
);
Ok(())
}
}
#[derive(Clone)]
pub(crate) struct TranspilationTester<E: Engine> {
pub(crate) a: Option<E::Fr>,
pub(crate) b: Option<E::Fr>,
}
impl<E: Engine> Circuit<E> for TranspilationTester<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if let Some(a_value) = self.a {
Ok(a_value)
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a is zero",
|lc| lc + a_var,
|lc| lc + CS::one(),
|lc| lc
);
let b_var = cs.alloc(|| "b", || {
if let Some(b_value) = self.b {
Ok(b_value)
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "b is one",
|lc| lc + b_var,
|lc| lc + CS::one(),
|lc| lc + CS::one()
);
let c_var = cs.alloc_input(|| "c", || {
if let Some(a_value) = self.a {
Ok(a_value)
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a is equal to c",
|lc| lc + a_var,
|lc| lc + CS::one(),
|lc| lc + c_var
);
Ok(())
}
}
#[cfg(feature = "plonk")]
#[test]
fn transpile_xor() {
use crate::pairing::bn256::Bn256;
use crate::plonk::adaptor::alternative::Transpiler;
let c = XORDemo::<Bn256> {
a: None,
b: None,
_marker: PhantomData
};
let mut transpiler = Transpiler::new();
c.synthesize(&mut transpiler).unwrap();
}
#[cfg(feature = "plonk")]
#[test]
fn transpile_test_circuit() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::plonk::adaptor::alternative::*;
use crate::plonk::plonk::prover::*;
let c = TranspilationTester::<Bn256> {
a: Some(Fr::zero()),
b: Some(Fr::one()),
};
let mut transpiler = Transpiler::new();
c.clone().synthesize(&mut transpiler).unwrap();
let hints = transpiler.into_hints();
let adapted_curcuit = AdaptorCircuit::new(c.clone(), &hints);
use crate::plonk::cs::Circuit as PlonkCircuit;
let mut prover = ProvingAssembly::<Bn256>::new();
adapted_curcuit.synthesize(&mut prover).unwrap();
prover.finalize();
println!("Checking if is satisfied");
assert!(prover.is_satisfied());
}
<file_sep>/src/sonic/transcript/mod.rs
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::pairing::{CurveAffine, CurveProjective, Engine};
use std::io;
mod hasher;
use self::hasher::{Hasher, Keccak256Hasher, BlakeHasher};
#[derive(Clone)]
pub struct Transcript {
transcriptor: RollingHashTranscript<Keccak256Hasher>
}
impl Transcript {
pub fn new(personalization: &[u8]) -> Self {
Self {
transcriptor: RollingHashTranscript::new(personalization)
}
}
}
impl TranscriptProtocol for Transcript {
fn commit_point<G: CurveAffine>(&mut self, point: &G) {
self.transcriptor.commit_point(point);
}
fn commit_scalar<F: PrimeField>(&mut self, scalar: &F) {
self.transcriptor.commit_scalar(scalar);
}
fn get_challenge_scalar<F: PrimeField>(&mut self) -> F {
self.transcriptor.get_challenge_scalar()
}
}
use std::marker::PhantomData;
#[derive(Clone)]
pub struct RollingHashTranscript<H: Hasher> {
buffer: Vec<u8>,
last_finalized_value: Vec<u8>,
repeated_request_nonce: u32,
_marker: PhantomData<H>
}
impl<H: Hasher> RollingHashTranscript<H> {
pub fn new(personalization: &[u8]) -> Self {
let mut h = H::new(personalization);
let buffer = h.finalize();
Self {
buffer: buffer,
last_finalized_value: vec![],
repeated_request_nonce: 0u32,
_marker: PhantomData
}
}
pub fn commit_bytes(&mut self, personalization: &[u8], bytes: &[u8]) {
let mut h = H::new(&[]);
h.update(&self.buffer);
h.update(personalization);
h.update(bytes);
self.buffer = h.finalize();
}
pub fn get_challenge_bytes(&mut self, nonce: &[u8]) -> Vec<u8> {
let challenge_bytes = &self.buffer;
let mut h = H::new(&[]);
h.update(challenge_bytes);
h.update(nonce);
let challenge_bytes = h.finalize();
challenge_bytes
}
}
pub trait TranscriptProtocol {
fn commit_point<G: CurveAffine>(&mut self, point: &G);
fn commit_scalar<F: PrimeField>(&mut self, scalar: &F);
fn get_challenge_scalar<F: PrimeField>(&mut self) -> F;
}
impl<H:Hasher> TranscriptProtocol for RollingHashTranscript<H> {
fn commit_point<G: CurveAffine>(&mut self, point: &G) {
self.commit_bytes(b"point", point.into_uncompressed().as_ref());
// self.commit_bytes(b"point", point.into_compressed().as_ref());
self.repeated_request_nonce = 0u32;
}
fn commit_scalar<F: PrimeField>(&mut self, scalar: &F) {
let mut v = vec![];
scalar.into_repr().write_be(&mut v).unwrap();
// scalar.into_repr().write_le(&mut v).unwrap();
self.commit_bytes(b"scalar", &v);
self.repeated_request_nonce = 0u32;
}
fn get_challenge_scalar<F: PrimeField>(&mut self) -> F {
use byteorder::ByteOrder;
let mut nonce = self.repeated_request_nonce;
loop {
let mut nonce_bytes = vec![0u8; 4];
byteorder::BigEndian::write_u32(&mut nonce_bytes, nonce);
let mut repr: F::Repr = Default::default();
let challenge_bytes = self.get_challenge_bytes(&nonce_bytes);
repr.read_be(&challenge_bytes[..]).unwrap();
if let Ok(result) = F::from_repr(repr) {
// println!("Got a challenge {} for nonce = {}", result, nonce);
self.repeated_request_nonce = nonce + 1u32;
return result;
}
if nonce == (0xffffffff as u32) {
panic!("can not make challenge scalar");
}
nonce += 1;
}
}
}
// struct TranscriptReader<'a, H:Hasher>(&'a mut Transcript<H>);
// impl<'a, H:Hasher> io::Read for TranscriptReader<'a, H: Hasher> {
// fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// self.0.challenge_bytes(b"read", buf);
// Ok(buf.len())
// }
// }<file_sep>/src/sonic/helped/adapted_helper.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters};
use super::helper::{Aggregate};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::{Circuit};
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::srs::SRS;
use crate::sonic::sonic::Nonassigning;
use super::helper::create_aggregate as create_aggregate_sonic_circuit;
pub fn create_aggregate<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
params: &Parameters<E>,
) -> Aggregate<E>
{
let adapted_circuit = AdaptorCircuit(circuit);
create_aggregate_sonic_circuit::<_, _, Nonassigning>(&adapted_circuit, inputs, params)
}
<file_sep>/src/sonic/unhelped/verifier.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use crate::sonic::helped::{Proof, SxyAdvice};
use crate::sonic::helped::batch::Batch;
use crate::sonic::helped::poly::{SxEval, SyEval};
use crate::sonic::helped::helper::Aggregate;
use crate::sonic::helped::parameters::{Parameters};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
use crate::sonic::sonic::Preprocess;
use super::s2_proof::{S2Proof, S2Eval};
use super::aggregate::SuccinctAggregate;
use super::permutation_structure::create_permutation_structure;
use super::permutation_argument::{
PermutationArgumentProof,
PermutationProof,
PermutationArgument,
SpecializedSRS
};
pub struct SuccinctMultiVerifier<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng> {
circuit: C,
s1_special_reference: SpecializedSRS<E>,
s2_special_reference: E::G1Affine,
pub(crate) batch: Batch<E>,
k_map: Vec<usize>,
n: usize,
q: usize,
randomness_source: R,
_marker: PhantomData<(E, S)>
}
impl<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng> SuccinctMultiVerifier<E, C, S, R> {
// This constructor consumes randomness source cause it's later used internally
pub fn new(circuit: C, srs: &SRS<E>, rng: R) -> Result<Self, SynthesisError> {
let (n, q, k_map) = {
let mut preprocess = Preprocess::new();
S::synthesize(&mut preprocess, &circuit)?;
(preprocess.n, preprocess.q, preprocess.k_map)
};
// also calculate special reference for s1
let permutation_structure = create_permutation_structure(&circuit);
let s2_special_reference = permutation_structure.calculate_s2_commitment_value(&srs);
let s1_special_reference = permutation_structure.create_permutation_special_reference(&srs);
Ok(SuccinctMultiVerifier {
circuit,
s1_special_reference,
s2_special_reference,
batch: Batch::new(srs, n),
k_map: k_map,
n: n,
q: q,
randomness_source: rng,
_marker: PhantomData
})
}
pub fn add_aggregate(
&mut self,
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &SuccinctAggregate<E>,
srs: &SRS<E>
)
{
let mut transcript = Transcript::new(&[]);
let mut y_values: Vec<E::Fr> = Vec::with_capacity(proofs.len());
for &(ref proof, ref sxyadvice) in proofs {
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y_values.push(transcript.get_challenge_scalar());
}
transcript.commit_point(&sxyadvice.s);
}
let z: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&aggregate.c);
let w: E::Fr = transcript.get_challenge_scalar();
let szw = {
// prover will supply s1 and s2, need to calculate
// s(z, w) = X^-(N+1) * Y^N * s1 - X^N * s2
let x_n = z.pow(&[self.n as u64]);
let mut x_n_plus_1 = x_n;
x_n_plus_1.mul_assign(&z);
let x_n_plus_1_inv = x_n_plus_1.inverse().unwrap();
let y_n = w.pow(&[self.n as u64]);
// simultaneously add components to the batch verifier
// this is s2 contribution itself
let s2_proof = &aggregate.s2_proof;
let mut s2_part = s2_proof.c_value;
s2_part.add_assign(&s2_proof.d_value);
s2_part.mul_assign(&x_n);
// add terms for S2 for verification
{
let random: E::Fr = self.randomness_source.gen();
// e(C,hαx)e(C−yz,hα) = e(O,h)e(g−c,hα) that is
// e(C,hαx)e(C^−yz,hα)*e(O,-h)e(g^c,hα) = 1
let mut xy = z;
xy.mul_assign(&w);
self.batch.add_opening(s2_proof.c_opening, random, xy);
self.batch.add_opening_value(random, s2_proof.c_value);
self.batch.add_commitment(self.s2_special_reference, random);
}
{
let random: E::Fr = self.randomness_source.gen();
// e(D,hαx)e(D−y−1z,hα) = e(O,h)e(g−d,hα) that is
// e(D,hαx)e(D^−y-1z,hα)*e(O,-h)e(g^d,hα) = 1
let mut y_inv_by_x = z;
y_inv_by_x.mul_assign(&w.inverse().unwrap());
self.batch.add_opening(s2_proof.d_opening, random, y_inv_by_x);
self.batch.add_opening_value(random, s2_proof.d_value);
self.batch.add_commitment(self.s2_special_reference, random);
}
// now work with s1 part
let mut s1_part = aggregate.signature.perm_argument_proof.s_zy;
s1_part.mul_assign(&x_n_plus_1_inv);
s1_part.mul_assign(&y_n);
let mut szw = s1_part;
szw.sub_assign(&s2_part);
// verify commitments for s' and s
{
let mut transcript = Transcript::new(&[]);
// let s_commitments = &aggregate.signature.s_commitments;
// let s_prime_commitments = &aggregate.signature.s_prime_commitments;
let mut challenges = vec![];
for (s, s_prime) in aggregate.signature.s_commitments.iter()
.zip(aggregate.signature.s_prime_commitments.iter()) {
transcript.commit_point(s);
transcript.commit_point(s_prime);
}
for _ in 0..aggregate.signature.s_commitments.len() {
let challenge = transcript.get_challenge_scalar();
challenges.push(challenge);
}
let z_prime: E::Fr = transcript.get_challenge_scalar();
// we expect M permutation proofs, add them all into verification
// using batching with random challenges and extra randomness for pairing equation
{
// e(E,hαx)e(E−z′,hα) = e(Mj=1Sj′rj,h)e(g−v,hα)
let perm_proof = &aggregate.signature.perm_proof;
let s_r = multiexp(
aggregate.signature.s_prime_commitments.iter(),
challenges.iter()
).into_affine();
let p2_r = multiexp(
self.s1_special_reference.p_2.iter(),
challenges.iter()
).into_affine();
let value = perm_proof.v_zy;
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(perm_proof.e_opening, random, z_prime);
self.batch.add_opening_value(random, value);
self.batch.add_commitment(s_r, random);
// e(F,hαx)e(F−yz′,hα) = e(Mj=1P2jrj,h)e(g−v,hα)
let mut y_z_prime = z_prime;
y_z_prime.mul_assign(&w);
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(perm_proof.f_opening, random, y_z_prime);
self.batch.add_opening_value(random, value);
self.batch.add_commitment(p2_r, random);
}
// now we can actually take an opening of S commitments and
{
// e(I,hαx)e(I−z,hα) = e(Mj=1 Sj,h)e(g−s,hα)
let value = aggregate.signature.perm_argument_proof.s_zy;
let mut s_commitment = E::G1::zero();
for s in aggregate.signature.s_commitments.iter() {
s_commitment.add_assign_mixed(s);
}
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.signature.perm_argument_proof.s_opening, random, z);
self.batch.add_opening_value(random, value);
self.batch.add_commitment(s_commitment.into_affine(), random);
}
// TODO: Add grand product argument!
// for each of the grand product arguments create a corresponding commitment
// from already known elements
let mut betas = vec![];
let mut gammas = vec![];
let mut a_commitments = vec![];
let mut b_commitments = vec![];
for _ in 0..aggregate.signature.s_commitments.len() {
let beta: E::Fr = transcript.get_challenge_scalar();
let gamma: E::Fr = transcript.get_challenge_scalar();
betas.push(beta);
gammas.push(gamma);
}
let mut wellformedness_argument_commitments = vec![];
use crate::pairing::CurveAffine;
use crate::pairing::ff::PrimeField;
for (j, (((s, s_prime), beta), gamma)) in aggregate.signature.s_commitments.iter()
.zip(aggregate.signature.s_prime_commitments.iter())
.zip(betas.iter())
.zip(gammas.iter())
.enumerate()
{
// Sj(P4j)β(P1j)γ
let mut a = s.into_projective();
a.add_assign(&self.s1_special_reference.p_4[j].mul(beta.into_repr()));
a.add_assign(&self.s1_special_reference.p_1.mul(gamma.into_repr()));
let a = a.into_affine();
// Sj′(P3j)β(P1j)γ
let mut b = s_prime.into_projective();
b.add_assign(&self.s1_special_reference.p_3.mul(beta.into_repr()));
b.add_assign(&self.s1_special_reference.p_1.mul(gamma.into_repr()));
let b = b.into_affine();
a_commitments.push(a);
b_commitments.push(b);
wellformedness_argument_commitments.push(a);
wellformedness_argument_commitments.push(b);
}
// commitments to invidvidual grand products are assembled, now check first part of a grand
// product argument
// Now perform an actual check
{
let randomness: Vec<E::Fr> = (0..aggregate.signature.s_commitments.len()).map(|_| self.randomness_source.gen()).collect();
// e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα)
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_x_n_plus_one_precomp = srs.h_positive_x[self.n+1];
h_x_n_plus_one_precomp.negate();
let h_x_n_plus_one_precomp = h_x_n_plus_one_precomp.prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let a = multiexp(
a_commitments.iter(),
randomness.iter(),
).into_affine();
let a = a.prepare();
let b = multiexp(
b_commitments.iter(),
randomness.iter(),
).into_affine();
let b = b.prepare();
let mut yz_neg = w;
yz_neg.mul_assign(&z);
yz_neg.negate();
let mut ops = vec![];
let mut value = E::Fr::zero();
for (el, r) in aggregate.signature.grand_product_signature.grand_product_openings.iter().zip(randomness.iter()) {
let (v, o) = el;
ops.push(o.clone());
let mut val = *v;
val.mul_assign(&r);
value.add_assign(&val);
}
let value = g.mul(value.into_repr()).into_affine().prepare();
let openings = multiexp(
ops.iter(),
randomness.iter(),
).into_affine();
let openings_zy = openings.mul(yz_neg.into_repr()).into_affine().prepare();
let openings = openings.prepare();
// e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα)
let valid = E::final_exponentiation(&E::miller_loop(&[
(&openings, &h_alpha_x_precomp),
(&openings_zy, &h_alpha_precomp),
(&a, &h_prep),
(&b, &h_x_n_plus_one_precomp),
(&value, &h_alpha_precomp)
])).unwrap() == E::Fqk::one();
// TODO
assert!(valid, "grand product arguments must be valid for individual commitments");
}
// Now the second part of the grand product argument
{
let mut grand_product_challenges = vec![];
for _ in 0..aggregate.signature.grand_product_signature.c_commitments.len() {
let c: E::Fr = transcript.get_challenge_scalar();
grand_product_challenges.push(c);
}
// first re-calculate cj and t(z,y)
let mut yz = w;
yz.mul_assign(&z);
let z_inv = z.inverse().unwrap();
let mut t_zy = E::Fr::zero();
let mut commitments_points = vec![];
let mut rc_vec = vec![];
let mut ry_vec = vec![];
// in grand product arguments n is not a number of gates, but 3n+1 - number of variables + 1
let three_n_plus_1 = 3*self.n + 1;
for ((r, commitment), (a, _)) in grand_product_challenges.iter()
.zip(aggregate.signature.grand_product_signature.c_commitments.iter())
.zip(aggregate.signature.grand_product_signature.grand_product_openings.iter())
{
let (c, v) = commitment;
commitments_points.push(*c);
// cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y − z2n+2y)z−1
let mut c_zy = yz.pow([(three_n_plus_1 + 1) as u64]);
c_zy.mul_assign(v);
c_zy.add_assign(a);
c_zy.mul_assign(&w);
let mut z_n_plus_1 = z.pow([(three_n_plus_1 + 1) as u64]);
let mut z_n_plus_2 = z_n_plus_1;
z_n_plus_2.mul_assign(&z);
let mut z_2n_plus_2 = z_n_plus_1;
z_2n_plus_2.square();
z_2n_plus_2.mul_assign(&w);
z_n_plus_1.mul_assign(&w);
c_zy.add_assign(&z_n_plus_1);
c_zy.add_assign(&z_n_plus_2);
c_zy.sub_assign(&z_2n_plus_2);
c_zy.mul_assign(&z_inv);
let mut rc = c_zy;
rc.mul_assign(&r);
rc_vec.push(rc);
let mut ry = w;
ry.mul_assign(&r);
ry_vec.push(ry);
let mut val = rc;
val.sub_assign(&r);
t_zy.add_assign(&val);
}
t_zy.add_assign(&aggregate.signature.grand_product_signature.proof.e_zinv);
t_zy.sub_assign(&aggregate.signature.grand_product_signature.proof.f_y);
// t(z, y) is now calculated
let c_rc = multiexp(
commitments_points.iter(),
rc_vec.iter(),
).into_affine();
let c_ry = multiexp(
commitments_points.iter(),
ry_vec.iter(),
).into_affine();
// e(E,h^alphax)e(E^-z^-1,h^alpha) = e(\sumCj^(rj*cj),h)e(g^-e,h^alpha)
{
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.signature.grand_product_signature.proof.e_opening, random, z_inv);
self.batch.add_opening_value(random, aggregate.signature.grand_product_signature.proof.e_zinv);
self.batch.add_commitment(c_rc, random);
}
// e(F,h^alphax)e(F^-y,h) = e(\sumCj^(rj&y),h)e(g^-f,h^alpha)
{
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.signature.grand_product_signature.proof.f_opening, random, w);
self.batch.add_opening_value(random, aggregate.signature.grand_product_signature.proof.f_y);
self.batch.add_commitment(c_ry, random);
}
// e(T′,hαx)e(T′−z,hα) = e(T,h)e(g−t(z,y),hα)
{
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.signature.grand_product_signature.proof.t_opening, random, z);
self.batch.add_opening_value(random, t_zy);
self.batch.add_commitment(aggregate.signature.grand_product_signature.t_commitment, random);
}
}
// finally check the wellformedness arguments
{
let mut wellformedness_challenges = vec![];
for _ in 0..wellformedness_argument_commitments.len() {
let c: E::Fr = transcript.get_challenge_scalar();
wellformedness_challenges.push(c);
}
let d = srs.d;
let n = 3*self.n + 1; // same as for grand products
let alpha_x_d_precomp = srs.h_positive_x_alpha[d].prepare();
// TODO: not strictly required
assert!(n < d);
let d_minus_n = d - n;
let alpha_x_n_minus_d_precomp = srs.h_negative_x_alpha[d_minus_n].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let a = multiexp(
wellformedness_argument_commitments.iter(),
wellformedness_challenges.iter(),
).into_affine();
let r1: E::Fr = self.randomness_source.gen();
let r2: E::Fr = self.randomness_source.gen();
let mut r = r1;
r.add_assign(&r2);
let l_r1 = aggregate.signature.grand_product_signature.wellformedness_signature.proof.l.mul(r1.into_repr()).into_affine();
let r_r2 = aggregate.signature.grand_product_signature.wellformedness_signature.proof.r.mul(r2.into_repr()).into_affine();
let a_r = a.mul(r.into_repr()).into_affine();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&a_r.prepare(), &h_prep),
(&l_r1.prepare(), &alpha_x_d_precomp),
(&r_r2.prepare(), &alpha_x_n_minus_d_precomp)
])).unwrap() == E::Fqk::one();
assert!(valid, "wellformedness argument must be valid");
}
}
szw
};
{
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.opening, random, w);
self.batch.add_commitment(aggregate.c, random);
self.batch.add_opening_value(szw, random);
}
for ((opening, value), &y) in aggregate.c_openings.iter().zip(y_values.iter()) {
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(*opening, random, y);
self.batch.add_commitment(aggregate.c, random);
self.batch.add_opening_value(*value, random);
}
let random: E::Fr = self.randomness_source.gen();
let mut expected_value = E::Fr::zero();
for ((_, advice), c_opening) in proofs.iter().zip(aggregate.c_openings.iter()) {
let mut r: E::Fr = transcript.get_challenge_scalar();
// expected value of the later opening
{
let mut tmp = c_opening.1;
tmp.mul_assign(&r);
expected_value.add_assign(&tmp);
}
r.mul_assign(&random);
self.batch.add_commitment(advice.s, r);
}
self.batch.add_opening_value(expected_value, random);
self.batch.add_opening(aggregate.s_opening, random, z);
}
/// Caller must ensure to add aggregate after adding a proof
pub fn add_proof_with_advice(
&mut self,
proof: &Proof<E>,
inputs: &[E::Fr],
advice: &SxyAdvice<E>,
)
{
let mut z = None;
self.add_proof(proof, inputs, |_z, _y| {
z = Some(_z);
Some(advice.szy)
});
let z = z.unwrap();
// We need to open up SxyAdvice.s at z using SxyAdvice.opening
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&advice.opening);
transcript.commit_point(&advice.s);
transcript.commit_scalar(&advice.szy);
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(advice.opening, random, z);
self.batch.add_commitment(advice.s, random);
self.batch.add_opening_value(advice.szy, random);
}
pub fn add_proof<F>(
&mut self,
proof: &Proof<E>,
inputs: &[E::Fr],
sxy: F
)
where F: FnOnce(E::Fr, E::Fr) -> Option<E::Fr>
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
let y: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&proof.t);
let z: E::Fr = transcript.get_challenge_scalar();
transcript.commit_scalar(&proof.rz);
transcript.commit_scalar(&proof.rzy);
let r1: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&proof.z_opening);
transcript.commit_point(&proof.zy_opening);
// First, the easy one. Let's open up proof.r at zy, using proof.zy_opening
// as the evidence and proof.rzy as the opening.
{
let random: E::Fr = self.randomness_source.gen();
let mut zy = z;
zy.mul_assign(&y);
self.batch.add_opening(proof.zy_opening, random, zy);
self.batch.add_commitment_max_n(proof.r, random);
self.batch.add_opening_value(proof.rzy, random);
}
// Now we need to compute t(z, y) with what we have. Let's compute k(y).
let mut ky = E::Fr::zero();
for (exp, input) in self.k_map.iter().zip(Some(E::Fr::one()).iter().chain(inputs.iter())) {
let mut term = y.pow(&[(*exp + self.n) as u64]);
term.mul_assign(input);
ky.add_assign(&term);
}
// Compute s(z, y)
let szy = sxy(z, y).unwrap_or_else(|| {
let mut tmp = SxEval::new(y, self.n);
S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
tmp.finalize(z)
// let mut tmp = SyEval::new(z, self.n, self.q);
// S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
// tmp.finalize(y)
});
// Finally, compute t(z, y)
// t(z, y) = (r(z, y) + s(z,y))*r(z, 1) - k(y)
let mut tzy = proof.rzy;
tzy.add_assign(&szy);
tzy.mul_assign(&proof.rz);
tzy.sub_assign(&ky);
// We open these both at the same time by keeping their commitments
// linearly independent (using r1).
{
let mut random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(proof.z_opening, random, z);
self.batch.add_opening_value(tzy, random);
self.batch.add_commitment(proof.t, random);
random.mul_assign(&r1);
self.batch.add_opening_value(proof.rz, random);
self.batch.add_commitment_max_n(proof.r, random);
}
}
pub fn get_k_map(&self) -> Vec<usize> {
return self.k_map.clone();
}
pub fn get_n(&self) -> usize {
return self.n;
}
pub fn get_q(&self) -> usize {
return self.q;
}
pub fn check_all(self) -> bool {
self.batch.check_all()
}
}
// /// Check multiple proofs without aggregation. Verifier's work is
// /// not succint due to `S(X, Y)` evaluation
// pub fn verify_proofs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
// proofs: &[Proof<E>],
// inputs: &[Vec<E::Fr>],
// circuit: C,
// rng: R,
// params: &Parameters<E>,
// ) -> Result<bool, SynthesisError> {
// verify_proofs_on_srs::<E, C, S, R>(proofs, inputs, circuit, rng, ¶ms.srs)
// }
// /// Check multiple proofs without aggregation. Verifier's work is
// /// not succint due to `S(X, Y)` evaluation
// pub fn verify_proofs_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
// proofs: &[Proof<E>],
// inputs: &[Vec<E::Fr>],
// circuit: C,
// rng: R,
// srs: &SRS<E>,
// ) -> Result<bool, SynthesisError> {
// let mut verifier = MultiVerifier::<E, C, S, R>::new(circuit, srs, rng)?;
// let expected_inputs_size = verifier.get_k_map().len() - 1;
// for (proof, inputs) in proofs.iter().zip(inputs.iter()) {
// if inputs.len() != expected_inputs_size {
// return Err(SynthesisError::Unsatisfiable);
// }
// verifier.add_proof(proof, &inputs, |_, _| None);
// }
// Ok(verifier.check_all())
// }
// /// Check multiple proofs with aggregation. Verifier's work is
// /// not succint due to `S(X, Y)` evaluation
// pub fn verify_aggregate<E: Engine, C: Circuit<E>, S: SynthesisDriver,R: Rng>(
// proofs: &[(Proof<E>, SxyAdvice<E>)],
// aggregate: &Aggregate<E>,
// inputs: &[Vec<E::Fr>],
// circuit: C,
// rng: R,
// params: &Parameters<E>,
// ) -> Result<bool, SynthesisError> {
// verify_aggregate_on_srs::<E, C, S, R>(proofs, aggregate, inputs, circuit, rng, ¶ms.srs)
// }
// /// Check multiple proofs with aggregation. Verifier's work is
// /// not succint due to `S(X, Y)` evaluation
// pub fn verify_aggregate_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
// proofs: &[(Proof<E>, SxyAdvice<E>)],
// aggregate: &Aggregate<E>,
// inputs: &[Vec<E::Fr>],
// circuit: C,
// rng: R,
// srs: &SRS<E>,
// ) -> Result<bool, SynthesisError> {
// let mut verifier = MultiVerifier::<E, C, S, R>::new(circuit, srs, rng)?;
// let expected_inputs_size = verifier.get_k_map().len() - 1;
// for ((proof, advice), inputs) in proofs.iter().zip(inputs.iter()) {
// if inputs.len() != expected_inputs_size {
// return Err(SynthesisError::Unsatisfiable);
// }
// verifier.add_proof_with_advice(proof, &inputs, &advice);
// }
// verifier.add_aggregate(proofs, aggregate);
// Ok(verifier.check_all())
// }
<file_sep>/src/gm17/generator.rs
use super::super::verbose_flag;
use rand::Rng;
use std::sync::Arc;
use crate::pairing::{
Engine,
Wnaf,
CurveProjective,
CurveAffine
};
use crate::pairing::ff::{
PrimeField,
Field
};
use super::{
Parameters,
VerifyingKey
};
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use crate::domain::{
EvaluationDomain,
Scalar
};
use crate::worker::{
Worker
};
// /// Generates a random common reference string for
// /// a circuit.
// pub fn generate_random_parameters<E, C, R>(
// circuit: C,
// rng: &mut R
// ) -> Result<Parameters<E>, SynthesisError>
// where E: Engine, C: Circuit<E>, R: Rng
// {
// let g1 = rng.gen();
// let g2 = rng.gen();
// let alpha = rng.gen();
// let beta = rng.gen();
// let gamma = rng.gen();
// let delta = rng.gen();
// let tau = rng.gen();
// generate_parameters::<E, C>(
// circuit,
// g1,
// g2,
// alpha,
// beta,
// gamma,
// delta,
// tau
// )
// }
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a SAP. Square arithmetic problem is different from QAP in a form:
/// it's A*A - C = 0 instead of A*B - C = 0
struct KeypairAssembly<E: Engine> {
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
num_r1cs_aux: usize,
num_r1cs_constraints: usize,
at_inputs: Vec<Vec<(E::Fr, usize)>>,
ct_inputs: Vec<Vec<(E::Fr, usize)>>,
at_aux: Vec<Vec<(E::Fr, usize)>>,
ct_aux: Vec<Vec<(E::Fr, usize)>>
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.num_r1cs_aux += 1;
self.at_aux.push(vec![]);
self.ct_aux.push(vec![]);
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
use std::ops::{Add, Sub};
// this is where reduction happens. First we need to re-arrange initial constraints
// from the form <a,x>*<b,x> = <c,x> to an artificial
// <a - b,x> * <a - b,x> = y
// <a + b,x> * <a + b,x> = 4*<c,x> + y
fn quadruple<E: Engine>(
coeff: E::Fr
) -> E::Fr {
let mut tmp = coeff;
tmp.double();
tmp.double();
tmp
}
fn eval<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint))
}
}
}
// <a - b,x> * <a - b,x> = x_i
let i = self.num_constraints;
let y = self.alloc(
|| format!("SAP reduction y_{}", i),
|| Ok(E::Fr::one())
).expect("must allocate SAP reduction variable");
self.num_r1cs_aux -= 1;
let lc_a = a(LinearCombination::zero());
let lc_b = b(LinearCombination::zero());
let lc_c = c(LinearCombination::zero());
let lc_a_minus_b = lc_a.clone().sub(&lc_b);
let mut lc_y: LinearCombination<E> = LinearCombination::zero();
lc_y = lc_y.add(y);
eval(lc_a_minus_b, &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(lc_y, &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
self.num_constraints += 1;
// <a + b,x> * <a + b,x> = 4*<c,x> + y
let lc_a_plus_b = lc_a.add(&lc_b);
let mut lc_c_quadrupled: LinearCombination<E> = LinearCombination::zero();
for s in &lc_c.0 {
let tmp = quadruple::<E>(s.1);
lc_c_quadrupled = lc_c_quadrupled + (tmp, s.0);
}
lc_c_quadrupled = lc_c_quadrupled.add(y);
eval(lc_a_plus_b, &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(lc_c_quadrupled, &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
self.num_constraints += 1;
self.num_r1cs_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Create parameters for a circuit, given some toxic waste.
pub fn generate_parameters<E, C>(
circuit: C,
g1: E::G1,
g2: E::G2,
alpha: E::Fr,
beta: E::Fr,
gamma: E::Fr,
// delta: E::Fr,
tau: E::Fr
) -> Result<(), SynthesisError>
// Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let verbose = verbose_flag();
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
num_r1cs_aux: 0,
num_r1cs_constraints: 0,
at_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
ct_aux: vec![]
};
// Allocate the "one" input variable
let input_0 = assembly.alloc_input(|| "", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
let num_inputs_without_identity = assembly.num_inputs - 1;
// inputs must be constrained manually in SAP style,
// so input 0 (identity) is constrained as 1*1=1
{
use std::ops::{Add, Sub};
fn eval_lc<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint))
}
}
}
let mut lc_input_0_a: LinearCombination<E> = LinearCombination::zero();
lc_input_0_a = lc_input_0_a.add(input_0.clone());
eval_lc(lc_input_0_a, &mut assembly.at_inputs, &mut assembly.at_aux, assembly.num_constraints);
assembly.num_constraints += 1;
}
let num_constraints_before_inputs_constraining = assembly.num_constraints;
let num_aux_before_inputs_constraining = assembly.num_aux;
// Other inputs are constrained as x_i * 1 = x_i where
// 1 is actually input number 0 (identity)
for i in 1..assembly.num_inputs {
assembly.enforce(|| "",
|lc| lc + Variable(Index::Input(i)),
|lc| lc + Variable(Index::Input(0)),
|lc| lc + Variable(Index::Input(i)),
);
}
// check that each input generates 2 constraints
assert_eq!(num_inputs_without_identity * 2 +
num_constraints_before_inputs_constraining,
assembly.num_constraints,
"each input must produce two extra constraints");
// and that it creates one extra variable
assert_eq!(num_inputs_without_identity +
num_aux_before_inputs_constraining,
assembly.num_aux,
"each input must generate an extra variable");
assert_eq!(assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux,
assembly.num_inputs + assembly.num_aux,
"each constraint in principle adds one variable");
if verbose {eprintln!("Constraint system size is {}", assembly.num_constraints)};
// Create bases for blind evaluation of polynomials at tau
let powers_of_tau = vec![Scalar::<E>(E::Fr::zero()); assembly.num_constraints];
let mut domain = EvaluationDomain::from_coeffs(powers_of_tau)?;
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, {
2*(assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux)
+ assembly.num_r1cs_constraints + assembly.num_r1cs_aux
+ 2*(assembly.num_inputs + assembly.num_r1cs_constraints)
});
// Compute gamma*G2 window table
let mut g2_wnaf = Wnaf::new();
// let gamma_g2 = g2.into_affine().mul(gamma.into_repr());
let g2_wnaf = g2_wnaf.base(g2, {
// B query
assembly.num_inputs + assembly.num_aux
// alternatively expressed as
// assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux
});
let worker = Worker::new();
// let z_at_tau = {
// // Compute powers of tau
// if verbose {eprintln!("computing powers of tau...")};
// let start = std::time::Instant::now();
// {
// let domain = domain.as_mut();
// worker.scope(domain.len(), |scope, chunk| {
// for (i, subdomain) in domain.chunks_mut(chunk).enumerate()
// {
// scope.spawn(move || {
// let mut current_power = tau.pow(&[(i*chunk) as u64]);
// for p in subdomain {
// p.0 = current_power;
// current_power.mul_assign(&tau);
// }
// });
// }
// });
// }
// if verbose {eprintln!("powers of tau stage 1 done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
// // z_at_tau = t(x)
// let z_at_tau = domain.z(&tau);
// z_at_tau
// };
let domain_length = domain.as_ref().len();
if verbose {eprintln!("Domain length is {} ", domain_length)};
// G1^{gamma^2 * Z(t) * t^i} for 0 <= i < 2^m - 1 for 2^m domains
let mut gamma2_z_t_g1 = vec![E::G1::zero(); domain.as_ref().len() - 1];
let mut z_at_tau = E::Fr::zero();
{
// Compute powers of tau
if verbose {eprintln!("computing powers of tau...")};
let start = std::time::Instant::now();
{
let domain = domain.as_mut();
worker.scope(domain.len(), |scope, chunk| {
for (i, subdomain) in domain.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = tau.pow(&[(i*chunk) as u64]);
for p in subdomain {
p.0 = current_power;
current_power.mul_assign(&tau);
}
});
}
});
}
if verbose {eprintln!("powers of tau stage 1 done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
// z_at_tau = t(x)
z_at_tau = domain.z(&tau);
let mut gamma2_z_t = z_at_tau;
gamma2_z_t.mul_assign(&gamma);
gamma2_z_t.mul_assign(&gamma);
if verbose {eprintln!("computing the `G1^(gamma^2 * Z(t) * t^i)` query with multiple threads...")};
let start = std::time::Instant::now();
// Compute the H query with multiple threads
worker.scope(gamma2_z_t_g1.len(), |scope, chunk| {
for (gamma2_z_t_g1, p) in gamma2_z_t_g1.chunks_mut(chunk).zip(domain.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (gamma2_z_t_g1, p) in gamma2_z_t_g1.iter_mut().zip(p.iter())
{
// Compute final exponent
let mut exp = p.0;
exp.mul_assign(&gamma2_z_t);
// Exponentiate
*gamma2_z_t_g1 = g1_wnaf.scalar(exp.into_repr());
}
// Batch normalize
E::G1::batch_normalization(gamma2_z_t_g1);
});
}
});
if verbose {eprintln!("computing the `G1^(gamma^2 * Z(t) * t^i)` query done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
}
// G1^{gamma * A_i(t)} for 0 <= i <= num_variables
let mut a_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
// G2^{gamma * A_i(t)} for 0 <= i <= num_variables
let mut a_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux];
// G1^{gamma^2 * C_i(t) + (alpha + beta) * gamma * A_i(t)}
// for num_inputs + 1 < i <= num_variables
let mut c_1_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
// G1^{2 * gamma^2 * Z(t) * A_i(t)} for 0 <= i <= num_variables
let mut c_2_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
// G1^{gamma * Z(t)}
let mut gamma_zt = gamma;
gamma_zt.mul_assign(&z_at_tau);
let gamma_z = g1.into_affine().mul(gamma.into_repr());
// G2^{gamma * Z(t)}
let gamma_z_g2 = g2.into_affine().mul(gamma.into_repr());
let mut ab_gamma = alpha;
ab_gamma.add_assign(&beta);
ab_gamma.mul_assign(&gamma);
// G1^{(alpha + beta) * gamma * Z(t)}
let ab_gamma_z_g1 = g1.into_affine().mul(ab_gamma.into_repr());
let mut gamma2_z2 = gamma;
gamma2_z2.mul_assign(&z_at_tau);
gamma2_z2.square();
// G1^{gamma^2 * Z(t)^2}
let gamma2_z2_g1 = g1.into_affine().mul(gamma2_z2.into_repr());
// G^{gamma^2 * Z(t) * t^i} for 0 <= i < 2^m - 1 for 2^m domains
let mut gamma2_z_t = vec![E::G1::zero(); domain.as_ref().len() - 1];
if verbose {eprintln!("using inverse FFT to convert to intepolation coefficients...")};
let start = std::time::Instant::now();
// Use inverse FFT to convert to intepolation coefficients
domain.ifft(&worker);
let powers_of_tau = domain.into_coeffs();
// domain is now a set of scalars
if verbose {eprintln!("powers of tau evaluation in radix2 domain in {} s", start.elapsed().as_millis() as f64 / 1000.0)};
if verbose {eprintln!("evaluating polynomials...")};
let start = std::time::Instant::now();
// overall strategy:
// a_g1, a_g2, c_1_g1, c_2_g1 should be combined together by computing
// ab = (alpha + beta)
// g_2 = gamma^2
// t0 = gamma*A_i(t)
// t1 = g_2*C_t(t)
// a_g1 = t0*G1
// a_g2 = t0*G2
// c_1_g1 = (t1 + ab*t0)*G1
// c_2_g1 = (2*gamma*z_at_tau*t0)*G1
fn eval_stage_1<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
// powers of tau coefficients
powers_of_tau: &[Scalar<E>],
// SAP polynomials
at: &[Vec<(E::Fr, usize)>],
ct: &[Vec<(E::Fr, usize)>],
// Resulting evaluated SAP polynomials
a_g1: &mut [E::G1],
a_g2: &mut [E::G2],
c_1_g1: &mut [E::G1],
c_2_g1: &mut [E::G1],
// Trapdoors
alpha: &E::Fr,
beta: &E::Fr,
gamma: &E::Fr,
z_at_tau: &E::Fr,
// Worker
worker: &Worker
)
{
// Sanity check
assert_eq!(a_g1.len(), at.len());
assert_eq!(a_g1.len(), ct.len());
assert_eq!(a_g1.len(), a_g2.len());
assert_eq!(a_g1.len(), c_1_g1.len());
assert_eq!(a_g1.len(), c_2_g1.len());
// compute once
let mut ab = *alpha;
ab.add_assign(&beta);
let mut gamma2 = *gamma;
gamma2.square();
// Evaluate polynomials in multiple threads
worker.scope(a_g1.len(), |scope, chunk| {
for (((((a_g1, a_g2), c_1_g1), c_2_g1), at), ct) in a_g1.chunks_mut(chunk)
.zip(a_g2.chunks_mut(chunk))
.zip(c_1_g1.chunks_mut(chunk))
.zip(c_2_g1.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(ct.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move |_| {
for (((((a_g1, a_g2), c_1_g1), c_2_g1), at), ct) in a_g1.iter_mut()
.zip(a_g2.iter_mut())
.zip(c_1_g1.iter_mut())
.zip(c_2_g1.iter_mut())
.zip(at.iter())
.zip(ct.iter())
{
fn eval_at_tau<E: Engine>(
powers_of_tau: &[Scalar<E>],
p: &[(E::Fr, usize)]
) -> E::Fr
{
let mut acc = E::Fr::zero();
for &(ref coeff, index) in p {
let mut n = powers_of_tau[index].0;
n.mul_assign(coeff);
acc.add_assign(&n);
}
acc
}
// Evaluate SAP polynomials at tau
// t0 = gamma*A_i(t)
let mut t0 = eval_at_tau(powers_of_tau, at);
t0.mul_assign(&gamma);
// t1 = gamma^2*C_t(t)
let mut t1 = eval_at_tau(powers_of_tau, ct);
t1.mul_assign(&gamma2);
// a_g1 = t0*G1
// a_g2 = t0*G2
// c_1_g1 = (t1 + ab*t0)*G1
// c_2_g1 = (2*gamma*z_at_tau*t0)*G1
// Compute a_g1 and a_g2
if !t0.is_zero() {
*a_g1 = g1_wnaf.scalar(t0.into_repr());
*a_g2 = g2_wnaf.scalar(t0.into_repr());
}
let mut c_1_g1_factor = t0;
c_1_g1_factor.mul_assign(&ab);
c_1_g1_factor.add_assign(&t1);
// (2*gamma*z_at_tau*t0) inplace
t0.mul_assign(&z_at_tau);
t0.mul_assign(&gamma);
t0.double();
*c_1_g1 = g1_wnaf.scalar(c_1_g1_factor.into_repr());
*c_2_g1 = g1_wnaf.scalar(t0.into_repr());
}
// Batch normalize
E::G1::batch_normalization(a_g1);
E::G2::batch_normalization(a_g2);
E::G1::batch_normalization(c_1_g1);
E::G1::batch_normalization(c_2_g1);
});
};
});
}
// Evaluate for inputs.
eval_stage_1(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_inputs,
&assembly.ct_inputs,
&mut a_g1[0..assembly.num_inputs],
&mut a_g2[0..assembly.num_inputs],
&mut c_1_g1[0..assembly.num_inputs],
&mut c_2_g1[0..assembly.num_inputs],
&alpha,
&beta,
&gamma,
&z_at_tau,
&worker
);
// Evaluate for inputs.
eval_stage_1(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_aux,
&assembly.ct_aux,
&mut a_g1[assembly.num_inputs..],
&mut a_g2[assembly.num_inputs..],
&mut c_1_g1[assembly.num_inputs..],
&mut c_2_g1[assembly.num_inputs..],
&alpha,
&beta,
&gamma,
&z_at_tau,
&worker
);
// for _ in 0..assembly.num_inputs {
// c_1_g1.remove(0);
// }
if verbose {eprintln!("evaluating polynomials done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
// // Don't allow any elements be unconstrained, so that
// // the L query is always fully dense.
// for e in l.iter() {
// if e.is_zero() {
// return Err(SynthesisError::UnconstrainedVariable);
// }
// }
// let g1 = g1.into_affine();
// let g2 = g2.into_affine();
// let vk = VerifyingKey::<E> {
// alpha_g1: g1.mul(alpha).into_affine(),
// beta_g1: g1.mul(beta).into_affine(),
// beta_g2: g2.mul(beta).into_affine(),
// gamma_g2: g2.mul(gamma).into_affine(),
// delta_g1: g1.mul(delta).into_affine(),
// delta_g2: g2.mul(delta).into_affine(),
// ic: ic.into_iter().map(|e| e.into_affine()).collect()
// };
println!("Has generated {} points", a_g1.len());
Ok(())
// Ok(Parameters {
// vk: vk,
// h: Arc::new(h.into_iter().map(|e| e.into_affine()).collect()),
// l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
// // Filter points at infinity away from A/B queries
// a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
// b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
// b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect())
// })
}
<file_sep>/src/plonk/commitments/transparent/mod.rs
use crate::pairing::ff::PrimeField;
use crate::plonk::polynomials::*;
use crate::worker::Worker;
use super::CommitmentScheme;
pub mod precomputations;
pub mod iop;
pub mod fri;
pub mod iop_compiler;
pub mod utils;
use self::precomputations::PrecomputedInvOmegas;
use crate::plonk::domains::*;
use crate::plonk::commitments::transcript::Prng;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::fft::cooley_tukey_ntt::{CTPrecomputations, BitReversedOmegas};
// Such committer uses external transcript for all the operations
pub struct StatelessTransparentCommitter<
F: PrimeField,
FRI: FriIop<F>,
T: Transcript<F, Input = < < < <FRI as FriIop<F> >::IopType as IOP<F> >::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput >
>{
max_degree_plus_one: usize,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
num_queries: usize,
worker: Worker,
precomputed_inverse_omegas: PrecomputedInvOmegas<F>,
precomputed_bitreversed_omegas: BitReversedOmegas<F>,
fri_params: <FRI as FriIop<F>>::Params,
_marker_fri: std::marker::PhantomData<FRI>,
_marker_t: std::marker::PhantomData<T>
}
impl<
F: PrimeField,
FRI: FriIop<F>,
T: Transcript<F, Input = < < < <FRI as FriIop<F> >::IopType as IOP<F> >::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput >
> std::fmt::Debug for StatelessTransparentCommitter<F, FRI, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
writeln!(f, "Stateless transparent committer")
}
}
#[derive(Debug)]
pub struct TransparentCommitterParameters<F: PrimeField, FRI: FriIop<F>>{
pub lde_factor: usize,
pub num_queries: usize,
pub output_coeffs_at_degree_plus_one: usize,
pub fri_params: <FRI as FriIop<F>>::Params,
}
impl<F: PrimeField, FRI: FriIop<F>> Clone for TransparentCommitterParameters<F, FRI> {
fn clone(&self) -> Self {
TransparentCommitterParameters::<F, FRI>{
lde_factor: self.lde_factor,
num_queries: self.num_queries,
output_coeffs_at_degree_plus_one: self.output_coeffs_at_degree_plus_one,
fri_params: self.fri_params.clone(),
}
}
}
use std::time::Instant;
impl<
F: PrimeField,
FRI: FriIop<F>,
T: Transcript<F, Input = < < < <FRI as FriIop<F> >::IopType as IOP<F> >::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput >
> CommitmentScheme<F> for StatelessTransparentCommitter<F, FRI, T> {
type Commitment = < < < <FRI as FriIop<F> >::IopType as IOP<F> >::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput;
type OpeningProof = (FRI::Proof, Vec<Vec< (F, < < FRI as FriIop<F> >::IopType as IOP<F> >::Query) > >);
type IntermediateData = (Polynomial<F, Values>, < FRI as FriIop<F> >::IopType);
type Meta = TransparentCommitterParameters<F, FRI>;
type Prng = T;
const REQUIRES_PRECOMPUTATION: bool = true;
const IS_HOMOMORPHIC: bool = false;
fn new_for_size(max_degree_plus_one: usize, meta: Self::Meta) -> Self {
let base_size = max_degree_plus_one.next_power_of_two();
assert!(meta.lde_factor.is_power_of_two());
assert!(meta.output_coeffs_at_degree_plus_one.is_power_of_two());
let lde_domain_size = base_size*meta.lde_factor;
let base_domain = Domain::<F>::new_for_size(base_size as u64).expect("domain of large enough size should exist");
let lde_domain = Domain::<F>::new_for_size(lde_domain_size as u64).expect("domain of large enough size should exist");
let worker = Worker::new();
let omegas_inv_precomp = PrecomputedInvOmegas::<F>::new_for_domain(&lde_domain, &worker);
let omegas_bitrev_precomp = BitReversedOmegas::<F>::new_for_domain(&base_domain, &worker);
StatelessTransparentCommitter::<F, FRI, T> {
max_degree_plus_one: max_degree_plus_one,
lde_factor: meta.lde_factor,
output_coeffs_at_degree_plus_one: meta.output_coeffs_at_degree_plus_one,
num_queries: meta.num_queries,
worker: worker,
precomputed_inverse_omegas: omegas_inv_precomp,
precomputed_bitreversed_omegas: omegas_bitrev_precomp,
fri_params: meta.fri_params,
_marker_fri: std::marker::PhantomData,
_marker_t: std::marker::PhantomData
}
}
fn precompute(&self, poly: &Polynomial<F, Coefficients>) -> Option<Self::IntermediateData> {
assert!(poly.size() == self.max_degree_plus_one);
let original_poly_lde = poly.clone().lde_using_bitreversed_ntt(&self.worker, self.lde_factor, &self.precomputed_bitreversed_omegas).expect("must make an LDE");
// let original_poly_lde = poly.clone().lde(&self.worker, self.lde_factor).expect("must make an LDE");
let original_tree = < < FRI as FriIop<F> >::IopType as IOP<F> >::create(&original_poly_lde.as_ref());
Some((original_poly_lde, original_tree))
}
fn commit_single(&self, poly: &Polynomial<F, Coefficients>) -> (Self::Commitment, Option<Self::IntermediateData>) {
println!("Start commit single");
let start = Instant::now();
// let mut original_poly_lde = poly.clone().lde_using_bitreversed_ntt(&self.worker, self.lde_factor, &self.precomputed_bitreversed_omegas).expect("must make an LDE");
let mut original_poly_lde = poly.clone().bitreversed_lde_using_bitreversed_ntt(
&self.worker,
self.lde_factor,
&self.precomputed_bitreversed_omegas,
&F::one(),
).expect("must make an LDE");
original_poly_lde.bitreverse_enumeration(&self.worker);
// let original_poly_lde = poly.clone().lde(&self.worker, self.lde_factor).expect("must make an LDE");
let original_tree = < < FRI as FriIop<F> >::IopType as IOP<F> >::create(&original_poly_lde.as_ref());
let commitment = original_tree.get_root();
println!("Done in {:?} for max degree {}", start.elapsed(), poly.size());
println!("Done commit single");
(commitment, Some((original_poly_lde, original_tree)))
}
fn commit_multiple(&self, polynomials: Vec<&Polynomial<F, Coefficients>>, degrees: Vec<usize>, aggregation_coefficient: F) -> (Self::Commitment, Option<Vec<Self::IntermediateData>>) {
unimplemented!()
}
fn open_single(
&self,
poly: &Polynomial<F, Coefficients>,
at_point: F,
_opening_value: F,
data: &Option<&Self::IntermediateData>,
prng: &mut Self::Prng
) -> Self::OpeningProof {
println!("Start open single");
let start = Instant::now();
// do not need to to the subtraction cause last coefficient is never used by division
let division_result = {
let division_result = kate_divison_with_same_return_size(poly.as_ref(), at_point);
division_result
};
let q_poly = Polynomial::<F, Coefficients>::from_coeffs(division_result).expect("must be small enough");
let q_poly_lde = q_poly.lde_using_bitreversed_ntt(&self.worker, self.lde_factor, &self.precomputed_bitreversed_omegas).expect("must make an LDE");
// let q_poly_lde = q_poly.lde(&self.worker, self.lde_factor).expect("must make an LDE");
let lde_size = q_poly_lde.size();
let fri_proto = FRI::proof_from_lde(
&q_poly_lde,
self.lde_factor,
self.output_coeffs_at_degree_plus_one,
&self.precomputed_inverse_omegas,
&self.worker,
prng,
&self.fri_params
).expect("FRI must succeed");
for c in fri_proto.get_final_coefficients().iter() {
prng.commit_field_element(&c);
}
let mut used_queries: Vec<usize> = vec![];
let mut domain_indexes = vec![];
// even while this is conditional, it can be changed to unconditional given large enough field
while domain_indexes.len() < self.num_queries {
let domain_idx = bytes_to_challenge_index(prng.get_challenge_bytes(), lde_size);
let coset_index_values = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, lde_size);
let mut can_use = true;
for v in coset_index_values.iter() {
if used_queries.contains(&v) {
can_use = false;
break
}
}
if can_use {
domain_indexes.push(domain_idx);
used_queries.extend(coset_index_values);
}
}
let q_poly_fri_proof = FRI::prototype_into_proof(fri_proto, &q_poly_lde, domain_indexes.clone(), &self.fri_params).expect("must generate a proper proof");
let mut original_poly_queries = vec![];
let precomputations = if data.is_some() {
None
} else {
self.precompute(&poly)
};
let (original_poly_lde, original_poly_lde_oracle) = if let Some((lde, oracle)) = data.as_ref() {
(lde, oracle)
} else if let Some((lde, oracle)) = precomputations.as_ref() {
(lde, oracle)
} else {
unreachable!("precomputations are required for transparent polynomial commitment");
};
for idx in domain_indexes.into_iter() {
let original_value = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_for_natural_index(original_poly_lde.as_ref(), idx);
let original_poly_query = original_poly_lde_oracle.query(idx, original_poly_lde.as_ref());
original_poly_queries.push((*original_value, original_poly_query));
}
println!("Done in {:?} for max degree {}", start.elapsed(), poly.size());
println!("Done open single");
(q_poly_fri_proof, vec![original_poly_queries])
}
fn open_multiple(
&self,
polynomials: Vec<&Polynomial<F, Coefficients>>,
degrees: Vec<usize>,
aggregation_coefficient: F,
at_points: Vec<F>,
opening_values: Vec<F>,
data: &Option<Vec<&Self::IntermediateData>>,
prng: &mut Self::Prng
) -> Self::OpeningProof {
println!("Start open multiple");
let start = Instant::now();
assert!(at_points.len() == opening_values.len());
assert!(at_points.len() == polynomials.len());
let max_degree = *degrees.iter().max().expect("MAX element exists");
let min_degree = *degrees.iter().min().expect("MIN element exists");
assert!(f64::from(max_degree as u32) / f64::from(min_degree as u32) < 2.0, "polynomials should not have too large degree difference");
let mut division_results = vec![vec![]; polynomials.len()];
self.worker.scope(polynomials.len(), |scope, chunk| {
for ((p, q), at) in polynomials.chunks(chunk)
.zip(division_results.chunks_mut(chunk))
.zip(at_points.chunks(chunk))
{
scope.spawn(move |_| {
for ((p, q), at) in p.iter().zip(q.iter_mut()).zip(at.iter()) {
let division_result = kate_divison_with_same_return_size(p.as_ref(), *at);
*q = division_result;
}
});
}
});
// aggregate starting usign the first coefficient of 1
let mut q_poly: Option<Polynomial::<F, Coefficients>> = None;
let mut alpha = F::one();
for q in division_results.into_iter() {
if let Some(q_poly) = q_poly.as_mut() {
let q = Polynomial::<F, Coefficients>::from_coeffs(q).expect("must be small enough");
q_poly.add_assign_scaled(&self.worker, &q, &alpha);
} else {
let q = Polynomial::<F, Coefficients>::from_coeffs(q).expect("must be small enough");
q_poly = Some(q);
}
alpha.mul_assign(&aggregation_coefficient);
}
let q_poly = q_poly.expect("now it's aggregated");
// let q_poly_lde = q_poly.lde(&self.worker, self.lde_factor).expect("must make an LDE");
let mut q_poly_lde = q_poly.bitreversed_lde_using_bitreversed_ntt(
&self.worker,
self.lde_factor,
&self.precomputed_bitreversed_omegas,
&F::one(),
).expect("must make an LDE");
q_poly_lde.bitreverse_enumeration(&self.worker);
let lde_size = q_poly_lde.size();
let fri_proto = FRI::proof_from_lde(
&q_poly_lde,
self.lde_factor,
self.output_coeffs_at_degree_plus_one,
&self.precomputed_inverse_omegas,
&self.worker,
prng,
&self.fri_params
).expect("FRI must succeed");
for c in fri_proto.get_final_coefficients().iter() {
prng.commit_field_element(&c);
}
let mut used_queries: Vec<usize> = vec![];
let mut domain_indexes = vec![];
// even while this is conditional, it can be changed to unconditional given large enough field
while domain_indexes.len() < self.num_queries {
let domain_idx = bytes_to_challenge_index(prng.get_challenge_bytes(), lde_size);
let coset_index_values = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, lde_size);
let mut can_use = true;
for v in coset_index_values.iter() {
if used_queries.contains(&v) {
can_use = false;
break
}
}
if can_use {
domain_indexes.push(domain_idx);
used_queries.extend(coset_index_values);
}
}
let q_poly_fri_proof = FRI::prototype_into_proof(
fri_proto,
&q_poly_lde,
domain_indexes.clone(),
&self.fri_params
).expect("must generate a proper proof");
let precomputations = if data.is_some() {
None
} else {
let mut result = Vec::with_capacity(polynomials.len());
for poly in polynomials.iter() {
let p = self.precompute(&poly).expect("aux data is computed");
result.push(p);
}
Some(result)
};
let mut prec_may_be = None;
let data = if data.is_some() {
data.as_ref()
} else {
prec_may_be = Some(precomputations.as_ref().expect("is some").iter().map(|el| el).collect::<Vec<_>>());
prec_may_be.as_ref()
}.expect("there is aux data in full");
let mut queries = vec![];
for (original_poly_lde, original_poly_lde_oracle) in data.iter() {
let mut original_poly_queries = vec![];
for idx in domain_indexes.clone().into_iter() {
let original_value = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_for_natural_index(original_poly_lde.as_ref(), idx);
let original_poly_query = original_poly_lde_oracle.query(idx, original_poly_lde.as_ref());
original_poly_queries.push((*original_value, original_poly_query));
}
queries.push(original_poly_queries);
}
// let mut opened_values = vec![];
// for idx in domain_indexes.clone().into_iter() {
// let value = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_for_natural_index(q_poly_lde.as_ref(), idx);
// opened_values.push(value);
// }
// println!("Will open poly at indexes {:?} for values {:?}", domain_indexes, opened_values);
println!("Done in {:?} for max degree {}", start.elapsed(), max_degree);
println!("Done open multiple");
(q_poly_fri_proof, queries)
}
fn verify_single(&self, commitment: &Self::Commitment, at_point: F, claimed_value: F, proof: &Self::OpeningProof, prng: &mut Self::Prng) -> bool {
let (q_poly_fri_proof, original_poly_queries_vec) = proof;
assert!(original_poly_queries_vec.len() == 1);
let original_poly_queries = &original_poly_queries_vec[0];
let lde_size = self.max_degree_plus_one.next_power_of_two() * self.lde_factor;
let lde_domain = Domain::<F>::new_for_size(lde_size as u64).expect("large enough domain must exist");
// first get FRI challenges
let fri_challenges = FRI::get_fri_challenges(q_poly_fri_proof, prng, &self.fri_params);
for c in q_poly_fri_proof.get_final_coefficients().iter() {
prng.commit_field_element(&c);
}
// then make expected query locations
let mut used_queries: Vec<usize> = vec![];
let mut domain_indexes = vec![];
// even while this is conditional, it can be changed to unconditional given large enough field
while domain_indexes.len() < self.num_queries {
let domain_idx = bytes_to_challenge_index(prng.get_challenge_bytes(), lde_size);
let coset_index_values = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, lde_size);
let mut can_use = true;
for v in coset_index_values.iter() {
if used_queries.contains(&v) {
can_use = false;
break
}
}
if can_use {
domain_indexes.push(domain_idx);
used_queries.extend(coset_index_values);
}
}
// now simulate expected values
let mut simulated_q_poly_values = vec![];
for (domain_idx, original_poly_query) in domain_indexes.clone().into_iter()
.zip(original_poly_queries.iter()) {
let x = lde_domain.generator.pow(&[domain_idx as u64]);
assert!(original_poly_query.1.value() == original_poly_query.0);
let mut num = original_poly_query.0;
num.sub_assign(&claimed_value);
let mut den = x;
den.sub_assign(&at_point);
let den_inversed = den.inverse().expect("denominator is unlikely to be zero in large enough field");
let mut value_at_x = num;
value_at_x.mul_assign(&den_inversed);
let is_in_commitment = < <FRI as FriIop<F> >::IopType as IOP<F> >::verify_query(&original_poly_query.1, commitment);
if !is_in_commitment {
return false;
}
simulated_q_poly_values.push(value_at_x);
}
let valid = FRI::verify_proof_with_challenges(
q_poly_fri_proof,
domain_indexes,
&simulated_q_poly_values,
&fri_challenges,
&self.fri_params
).expect("fri verification should work");
valid
}
#[track_caller]
fn verify_multiple_openings(
&self,
commitments: Vec<&Self::Commitment>,
at_points: Vec<F>,
claimed_values: &Vec<F>,
aggregation_coefficient: F,
proof: &Self::OpeningProof,
prng: &mut Self::Prng
) -> bool {
let (q_poly_fri_proof, original_poly_queries_vec) = proof;
let lde_size = self.max_degree_plus_one.next_power_of_two() * self.lde_factor;
let lde_domain = Domain::<F>::new_for_size(lde_size as u64).expect("large enough domain must exist");
// first get FRI challenges
let fri_challenges = FRI::get_fri_challenges(q_poly_fri_proof, prng, &self.fri_params);
for c in q_poly_fri_proof.get_final_coefficients().iter() {
prng.commit_field_element(&c);
}
// then make expected query locations
let mut used_queries: Vec<usize> = vec![];
let mut domain_indexes = vec![];
// even while this is conditional, it can be changed to unconditional given large enough field
while domain_indexes.len() < self.num_queries {
let domain_idx = bytes_to_challenge_index(prng.get_challenge_bytes(), lde_size);
let coset_index_values = < < <FRI as FriIop<F> >::IopType as IOP<F> >::Combiner as CosetCombiner<F>>::get_coset_for_natural_index(domain_idx, lde_size);
let mut can_use = true;
for v in coset_index_values.iter() {
if used_queries.contains(&v) {
can_use = false;
break
}
}
if can_use {
domain_indexes.push(domain_idx);
used_queries.extend(coset_index_values);
}
}
// now simulate expected values
let mut simulated_q_poly_values = vec![F::zero(); domain_indexes.len()];
assert!(original_poly_queries_vec.len() == claimed_values.len());
// accumulate value of the q poly over subpolys
// we know claimed values of every polynomial at opening point,
// so we can compute something like (poly_1(x) - poly_1(z))/(x-z) + alpha * () + ...
// structure of original_poly_queries_vec is vector over [vector of queries into the same poly]
let mut alpha = F::one();
for subpoly_index in 0..original_poly_queries_vec.len() {
let queries_to_the_same_poly = &original_poly_queries_vec[subpoly_index];
let claimed_value = claimed_values[subpoly_index];
let subpoly_commitment = commitments[subpoly_index];
let opening_at = &at_points[subpoly_index];
assert_eq!(queries_to_the_same_poly.len(), domain_indexes.len());
let mut simulated_q_poly_subvalues = vec![];
for (domain_idx, original_poly_query) in domain_indexes.clone().into_iter()
.zip(queries_to_the_same_poly.iter()) {
let x = lde_domain.generator.pow(&[domain_idx as u64]);
assert!(original_poly_query.1.value() == original_poly_query.0);
let mut num = original_poly_query.0;
num.sub_assign(&claimed_value);
let mut den = x;
den.sub_assign(&opening_at);
let den_inversed = den.inverse().expect("denominator is unlikely to be zero in large enough field");
let mut value_at_x = num;
value_at_x.mul_assign(&den_inversed);
let is_in_commitment = < <FRI as FriIop<F> >::IopType as IOP<F> >::verify_query(&original_poly_query.1, subpoly_commitment);
if !is_in_commitment {
println!("Not in the root for subpoly {} out of {}", subpoly_index, original_poly_queries_vec.len());
return false;
}
simulated_q_poly_subvalues.push(value_at_x);
}
// in simulated_q_poly_values now there are values of this polynomial for all the queries,
// now we need to sum them up with a proper coefficients starting with 0
assert_eq!(simulated_q_poly_values.len(), simulated_q_poly_subvalues.len());
for (a, s) in simulated_q_poly_values.iter_mut().zip(simulated_q_poly_subvalues.into_iter()) {
let mut tmp = s;
tmp.mul_assign(&alpha);
a.add_assign(&tmp);
}
alpha.mul_assign(&aggregation_coefficient);
}
// println!("Will open poly at indexes {:?} for simulated values {:?}", domain_indexes, simulated_q_poly_values);
let now = std::time::Instant::now();
let valid = FRI::verify_proof_with_challenges(
q_poly_fri_proof,
domain_indexes,
&simulated_q_poly_values,
&fri_challenges,
&self.fri_params
).expect("fri verification should work");
println!("FRI part taken {:?}", now.elapsed());
valid
}
}
// use single threaded Kate division for now
fn kate_divison_with_same_return_size<F: PrimeField>(a: &[F], mut b: F) -> Vec<F>
{
b.negate();
let mut q = vec![F::zero(); a.len()];
let mut tmp = F::zero();
let mut found_one = false;
for (q, r) in q.iter_mut().rev().skip(1).zip(a.iter().rev()) {
if !found_one {
if r.is_zero() {
continue
} else {
found_one = true;
}
}
let mut lead_coeff = *r;
lead_coeff.sub_assign(&tmp);
*q = lead_coeff;
tmp = lead_coeff;
tmp.mul_assign(&b);
}
q
}
// this one is not ZK cause will expose values not from LDE, but from the original domain too
fn bytes_to_challenge_index<S: AsRef<[u8]>>(bytes: S, lde_size: usize) -> usize {
use byteorder::{BigEndian, ByteOrder};
let as_ref = bytes.as_ref();
let natural_x_index = BigEndian::read_u64(&as_ref[(as_ref.len() - 8)..]);
let natural_x_index = natural_x_index as usize;
let natural_x_index = natural_x_index % lde_size;
natural_x_index
}
#[cfg(test)]
mod test {
use super::*;
use crate::pairing::ff::{Field, PrimeField};
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::plonk::utils::*;
use crate::plonk::commitments::transparent::fri::*;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transcript::*;
use crate::plonk::commitments::transparent::fri::naive_fri::naive_fri::*;
use crate::plonk::commitments::transparent::iop::blake2s_trivial_iop::*;
use crate::plonk::commitments::*;
#[test]
fn test_small_transparent_commitment() {
use crate::pairing::bn256::{Bn256, Fr};
const SIZE:usize = 16;
let worker = Worker::new();
// let coeffs: Vec<_> = (0..SIZE).collect();
// let coeffs: Vec<_> = vec![1, 1, 0, 0, 0, 0, 0, 0];
let coeffs: Vec<_> = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1];
let coeffs = convert_to_field_elements(&coeffs, &worker);
let poly = Polynomial::<Fr, _>::from_coeffs(coeffs).unwrap();
let mut transcript = Blake2sTranscript::<Fr>::new();
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 2,
output_coeffs_at_degree_plus_one: 1,
fri_params: ()
};
let committer = <Committer as CommitmentScheme<Fr>>::new_for_size(SIZE, meta);
let (commitment, aux_data) = committer.commit_single(&poly);
let open_at = Fr::from_str("123").unwrap();
let expected_at_z = poly.evaluate_at(&worker, open_at);
let proof = committer.open_single(&poly, open_at, expected_at_z, &aux_data.as_ref(), &mut transcript);
let mut transcript = Blake2sTranscript::<Fr>::new();
let valid = committer.verify_single(&commitment, open_at, expected_at_z, &proof, &mut transcript);
assert!(valid);
}
#[test]
fn test_large_transparent_commitment() {
use std::time::Instant;
use crate::pairing::bn256::{Bn256, Fr};
let worker = Worker::new();
const SIZE:usize = 1 << 20;
// const SIZE:usize = 1 << 10;
let coeffs: Vec<_> = (0..SIZE).collect();
let coeffs = convert_to_field_elements(&coeffs, &worker);
let poly = Polynomial::<Fr, _>::from_coeffs(coeffs).unwrap();
let mut transcript = Blake2sTranscript::<Fr>::new();
type Iop = TrivialBlake2sIOP<Fr>;
type Fri = NaiveFriIop<Fr, Iop>;
type Committer = StatelessTransparentCommitter<Fr, Fri, Blake2sTranscript<Fr>>;
let meta = TransparentCommitterParameters {
lde_factor: 16,
num_queries: 6, // ~100 bits of security
output_coeffs_at_degree_plus_one: 16,
fri_params: ()
};
let committer = <Committer as CommitmentScheme<Fr>>::new_for_size(SIZE, meta);
let now = Instant::now();
let (commitment, aux_data) = committer.commit_single(&poly);
println!("Commitment taken {:?}", now.elapsed());
let open_at = Fr::from_str("123").unwrap();
let expected_at_z = poly.evaluate_at(&worker, open_at);
let now = Instant::now();
let proof = committer.open_single(&poly, open_at, expected_at_z, &aux_data.as_ref(), &mut transcript);
println!("Opening taken {:?}", now.elapsed());
let mut transcript = Blake2sTranscript::<Fr>::new();
let now = Instant::now();
let valid = committer.verify_single(&commitment, open_at, expected_at_z, &proof, &mut transcript);
println!("Verification taken {:?}", now.elapsed());
assert!(valid);
}
}<file_sep>/src/plonk/transparent_engine/proth.rs
use crate::ff::*;
use super::{PartialReductionField, PartialTwoBitReductionField};
#[derive(Copy, Clone, PartialEq, Eq, Default, Hash, ::serde::Serialize, ::serde::Deserialize)]
pub struct FrRepr(pub [u64; 4usize]);
#[derive(Hash, ::serde::Serialize, ::serde::Deserialize)]
pub struct Fr(FrRepr);
// const MODULUS: FrRepr = FrRepr([1u64, 0u64, 0u64, 576460752303423505u64]);
const MODULUS: FrRepr = FrRepr([1u64, 0u64, 0u64, 0x0800_0000_0000_0011]);
const MODULUS_TWICE: FrRepr = FrRepr([2u64, 0u64, 0u64, 0x1000_0000_0000_0022]);
const MODULUS_BITS: u32 = 252u32;
const REPR_SHAVE_BITS: u32 = 4u32;
const S: u32 = 192u32;
const C: u64 = 1u64;
// 0800 0000 0000 0011
const K: u64 = 576460752303423505u64;
const K_U128: u128 = 576460752303423505u128;
const NU: [u64; 5] = [
0x0000028c81fffbff,
0xfffffffeccf00000,
0x0000000000907fff,
0xffffffffffffbc00,
0x1f
];
const R: FrRepr = FrRepr([
18446744073709551585u64,
18446744073709551615u64,
18446744073709551615u64,
576460752303422960u64,
]);
const R2: FrRepr = FrRepr([
18446741271209837569u64,
5151653887u64,
18446744073700081664u64,
576413109808302096u64,
]);
const GENERATOR: FrRepr = FrRepr([
18446744073709551521u64,
18446744073709551615u64,
18446744073709551615u64,
576460752303421872u64,
]);
const ROOT_OF_UNITY: FrRepr = FrRepr([
4685640052668284376u64,
12298664652803292137u64,
735711535595279732u64,
514024103053294630u64,
]);
// const INV: u64 = 18446744073709551615u64;
#[inline(always)]
fn add_carry(a: u64, carry: &mut u64) -> u64 {
// use std::num::Wrapping;
let (low, of) = a.overflowing_add(*carry);
if of {
*carry = 1u64;
} else {
*carry = 0u64;
}
low
// let tmp = u128::from(a).wrapping_add(u128::from(*carry));
// *carry = (tmp >> 64) as u64;
// tmp as u64
}
#[inline(always)]
fn sub_borrow(a: u64, borrow: &mut u64) -> u64 {
// use std::num::Wrapping;
let (low, of) = a.overflowing_sub(*borrow);
if of {
*borrow = 1u64;
} else {
*borrow = 0u64;
}
low
// let tmp = (1u128 << 64).wrapping_add(u128::from(a)).wrapping_sub(u128::from(*borrow));
// *borrow = if tmp >> 64 == 0 { 1 } else { 0 };
// tmp as u64
}
impl ::std::fmt::Debug for FrRepr {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "0x")?;
for i in self.0.iter().rev() {
write!(f, "{:016x}", i)?;
}
Ok(())
}
}
impl ::rand::Rand for FrRepr {
#[inline(always)]
fn rand<R: ::rand::Rng>(rng: &mut R) -> Self {
FrRepr(rng.gen())
}
}
impl ::std::fmt::Display for FrRepr {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "0x")?;
for i in self.0.iter().rev() {
write!(f, "{:016x}", i)?;
}
Ok(())
}
}
impl AsRef<[u64]> for FrRepr {
#[inline(always)]
fn as_ref(&self) -> &[u64] {
&self.0
}
}
impl AsMut<[u64]> for FrRepr {
#[inline(always)]
fn as_mut(&mut self) -> &mut [u64] {
&mut self.0
}
}
impl From<u64> for FrRepr {
#[inline(always)]
fn from(val: u64) -> FrRepr {
use std::default::Default;
let mut repr = Self::default();
repr.0[0] = val;
repr
}
}
impl Ord for FrRepr {
#[inline(always)]
fn cmp(&self, other: &FrRepr) -> ::std::cmp::Ordering {
for (a, b) in self.0.iter().rev().zip(other.0.iter().rev()) {
if a < b {
return ::std::cmp::Ordering::Less;
} else if a > b {
return ::std::cmp::Ordering::Greater;
}
}
::std::cmp::Ordering::Equal
}
}
impl PartialOrd for FrRepr {
#[inline(always)]
fn partial_cmp(&self, other: &FrRepr) -> Option<::std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl crate::ff::PrimeFieldRepr for FrRepr {
#[inline(always)]
fn is_odd(&self) -> bool {
self.0[0] & 1 == 1
}
#[inline(always)]
fn is_even(&self) -> bool {
!self.is_odd()
}
#[inline(always)]
fn is_zero(&self) -> bool {
self.0.iter().all(|&e| e == 0)
}
#[inline(always)]
fn shr(&mut self, mut n: u32) {
if n as usize >= 64 * 4usize {
*self = Self::from(0);
return;
}
while n >= 64 {
let mut t = 0;
for i in self.0.iter_mut().rev() {
::std::mem::swap(&mut t, i);
}
n -= 64;
}
if n > 0 {
let mut t = 0;
for i in self.0.iter_mut().rev() {
let t2 = *i << (64 - n);
*i >>= n;
*i |= t;
t = t2;
}
}
}
#[inline(always)]
fn div2(&mut self) {
let mut t = 0;
for i in self.0.iter_mut().rev() {
let t2 = *i << 63;
*i >>= 1;
*i |= t;
t = t2;
}
}
#[inline(always)]
fn mul2(&mut self) {
let mut last = 0;
for i in &mut self.0 {
let tmp = *i >> 63;
*i <<= 1;
*i |= last;
last = tmp;
}
}
#[inline(always)]
fn shl(&mut self, mut n: u32) {
if n as usize >= 64 * 4usize {
*self = Self::from(0);
return;
}
while n >= 64 {
let mut t = 0;
for i in &mut self.0 {
::std::mem::swap(&mut t, i);
}
n -= 64;
}
if n > 0 {
let mut t = 0;
for i in &mut self.0 {
let t2 = *i >> (64 - n);
*i <<= n;
*i |= t;
t = t2;
}
}
}
#[inline(always)]
fn num_bits(&self) -> u32 {
let mut ret = (4usize as u32) * 64;
for i in self.0.iter().rev() {
let leading = i.leading_zeros();
ret -= leading;
if leading != 64 {
break;
}
}
ret
}
#[inline(always)]
fn add_nocarry(&mut self, other: &FrRepr) {
let mut carry = 0;
for (a, b) in self.0.iter_mut().zip(other.0.iter()) {
*a = crate::ff::adc(*a, *b, &mut carry);
}
}
#[inline(always)]
fn sub_noborrow(&mut self, other: &FrRepr) {
let mut borrow = 0;
for (a, b) in self.0.iter_mut().zip(other.0.iter()) {
*a = crate::ff::sbb(*a, *b, &mut borrow);
}
}
}
impl FrRepr {
#[inline(always)]
fn add_modulus_nocarry(&mut self) {
let mut carry = MODULUS.0[0usize];
self.0[0] = add_carry(self.0[0], &mut carry);
self.0[1] = add_carry(self.0[1], &mut carry);
self.0[2] = add_carry(self.0[2], &mut carry);
self.0[3] = crate::ff::adc(self.0[3], MODULUS.0[3usize], &mut carry);
}
#[inline(always)]
fn sub_modulus_noborrow(&mut self) {
let mut borrow = MODULUS.0[0usize];
// sub one, so just sub borrow
self.0[0] = sub_borrow(self.0[0], &mut borrow);
// sub borrow
self.0[1] = sub_borrow(self.0[1], &mut borrow);
// sub borrow
self.0[2] = sub_borrow(self.0[2], &mut borrow);
self.0[3] = crate::ff::sbb(self.0[3], MODULUS.0[3usize], &mut borrow);
}
#[inline(always)]
fn add_modulus_twice_nocarry(&mut self) {
let mut carry = MODULUS_TWICE.0[0];
self.0[0] = add_carry(self.0[0], &mut carry);
self.0[1] = add_carry(self.0[1], &mut carry);
self.0[2] = add_carry(self.0[2], &mut carry);
self.0[3] = crate::ff::adc(self.0[3], MODULUS_TWICE.0[3usize], &mut carry);
}
#[inline(always)]
fn sub_modulus_twice_noborrow(&mut self) {
let mut borrow = MODULUS_TWICE.0[0];
// sub one, so just sub borrow
self.0[0] = sub_borrow(self.0[0], &mut borrow);
// sub borrow
self.0[1] = sub_borrow(self.0[1], &mut borrow);
// sub borrow
self.0[2] = sub_borrow(self.0[2], &mut borrow);
self.0[3] = crate::ff::sbb(self.0[3], MODULUS_TWICE.0[3usize], &mut borrow);
}
}
impl ::std::marker::Copy for Fr {}
impl ::std::clone::Clone for Fr {
fn clone(&self) -> Fr {
*self
}
}
impl ::std::cmp::PartialEq for Fr {
fn eq(&self, other: &Fr) -> bool {
self.0 == other.0
}
}
impl ::std::cmp::Eq for Fr {}
impl ::std::fmt::Debug for Fr {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}({:?})", "Fr", self.into_repr())
}
}
impl Ord for Fr {
#[inline(always)]
fn cmp(&self, other: &Fr) -> ::std::cmp::Ordering {
self.into_repr().cmp(&other.into_repr())
}
}
impl PartialOrd for Fr {
#[inline(always)]
fn partial_cmp(&self, other: &Fr) -> Option<::std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl ::std::fmt::Display for Fr {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}({:?})", "Fr", self.into_repr())
}
}
impl ::rand::Rand for Fr {
fn rand<R: ::rand::Rng>(rng: &mut R) -> Self {
loop {
let mut tmp = Fr(FrRepr::rand(rng));
tmp.0.as_mut()[3usize] &= 0xffffffffffffffff >> REPR_SHAVE_BITS;
if tmp.is_valid() {
return tmp;
}
}
}
}
impl From<Fr> for FrRepr {
fn from(e: Fr) -> FrRepr {
e.into_repr()
}
}
impl crate::ff::PrimeField for Fr {
type Repr = FrRepr;
fn from_repr(r: FrRepr) -> Result<Fr, crate::ff::PrimeFieldDecodingError> {
let mut r = Fr(r);
if r.is_valid() {
r.mul_assign(&Fr(R2));
Ok(r)
} else {
Err(crate::ff::PrimeFieldDecodingError::NotInField(format!("{}", r.0)))
}
}
fn from_raw_repr(r: FrRepr) -> Result<Self, crate::ff::PrimeFieldDecodingError> {
let r = Fr(r);
if r.is_valid() {
Ok(r)
} else {
Err(crate::ff::PrimeFieldDecodingError::NotInField(format!("{}", r.0)))
}
}
fn into_repr(&self) -> FrRepr {
let mut r = *self;
r.mont_reduce(
(self.0).0[0usize],
(self.0).0[1usize],
(self.0).0[2usize],
(self.0).0[3usize],
0,
0,
0,
0,
);
r.0
}
fn into_raw_repr(&self) -> FrRepr {
let r = *self;
r.0
}
fn char() -> FrRepr {
MODULUS
}
const NUM_BITS: u32 = MODULUS_BITS;
const CAPACITY: u32 = Self::NUM_BITS - 1;
fn multiplicative_generator() -> Self {
Fr(GENERATOR)
}
const S: u32 = S;
fn root_of_unity() -> Self {
Fr(ROOT_OF_UNITY)
}
}
impl crate::ff::Field for Fr {
#[inline]
fn zero() -> Self {
Fr(FrRepr::default())
}
#[inline]
fn one() -> Self {
Fr(R)
}
#[inline]
fn is_zero(&self) -> bool {
self.0.is_zero()
}
#[inline]
fn add_assign(&mut self, other: &Fr) {
self.0.add_nocarry(&other.0);
self.reduce();
}
#[inline]
fn double(&mut self) {
self.0.mul2();
self.reduce();
}
#[inline]
fn sub_assign(&mut self, other: &Fr) {
if other.0 > self.0 {
self.0.add_modulus_nocarry();
// self.0.add_nocarry(&MODULUS);
}
self.0.sub_noborrow(&other.0);
}
#[inline]
fn negate(&mut self) {
if !self.is_zero() {
let mut tmp = MODULUS;
tmp.sub_noborrow(&self.0);
self.0 = tmp;
}
}
fn inverse(&self) -> Option<Self> {
if self.is_zero() {
None
} else {
let one = FrRepr::from(1);
let mut u = self.0;
let mut v = MODULUS;
let mut b = Fr(R2);
let mut c = Self::zero();
while u != one && v != one {
while u.is_even() {
u.div2();
if b.0.is_even() {
b.0.div2();
} else {
b.0.add_modulus_nocarry();
// b.0.add_nocarry(&MODULUS);
b.0.div2();
}
}
while v.is_even() {
v.div2();
if c.0.is_even() {
c.0.div2();
} else {
c.0.add_modulus_nocarry();
// c.0.add_nocarry(&MODULUS);
c.0.div2();
}
}
if v < u {
u.sub_noborrow(&v);
b.sub_assign(&c);
} else {
v.sub_noborrow(&u);
c.sub_assign(&b);
}
}
if u == one {
Some(b)
} else {
Some(c)
}
}
}
#[inline(always)]
fn frobenius_map(&mut self, _: usize) {}
#[inline]
fn mul_assign(&mut self, other: &Fr) {
let mut carry = 0;
let r0 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[0usize], &mut carry);
let r1 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[1usize], &mut carry);
let r2 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[2usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[3usize], &mut carry);
let r4 = carry;
let mut carry = 0;
let r1 =
crate::ff::mac_with_carry(r1, (self.0).0[1usize], (other.0).0[0usize], &mut carry);
let r2 =
crate::ff::mac_with_carry(r2, (self.0).0[1usize], (other.0).0[1usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[1usize], (other.0).0[2usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[1usize], (other.0).0[3usize], &mut carry);
let r5 = carry;
let mut carry = 0;
let r2 =
crate::ff::mac_with_carry(r2, (self.0).0[2usize], (other.0).0[0usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[2usize], (other.0).0[1usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[2usize], (other.0).0[2usize], &mut carry);
let r5 =
crate::ff::mac_with_carry(r5, (self.0).0[2usize], (other.0).0[3usize], &mut carry);
let r6 = carry;
let mut carry = 0;
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[3usize], (other.0).0[0usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[3usize], (other.0).0[1usize], &mut carry);
let r5 =
crate::ff::mac_with_carry(r5, (self.0).0[3usize], (other.0).0[2usize], &mut carry);
let r6 =
crate::ff::mac_with_carry(r6, (self.0).0[3usize], (other.0).0[3usize], &mut carry);
let r7 = carry;
self.mont_reduce(r0, r1, r2, r3, r4, r5, r6, r7);
}
#[inline]
fn square(&mut self) {
let mut carry = 0;
let r1 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (self.0).0[1usize], &mut carry);
let r2 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (self.0).0[2usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (self.0).0[3usize], &mut carry);
let r4 = carry;
let mut carry = 0;
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[1usize], (self.0).0[2usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[1usize], (self.0).0[3usize], &mut carry);
let r5 = carry;
let mut carry = 0;
let r5 =
crate::ff::mac_with_carry(r5, (self.0).0[2usize], (self.0).0[3usize], &mut carry);
let r6 = carry;
let r7 = r6 >> 63;
let r6 = (r6 << 1) | (r5 >> 63);
let r5 = (r5 << 1) | (r4 >> 63);
let r4 = (r4 << 1) | (r3 >> 63);
let r3 = (r3 << 1) | (r2 >> 63);
let r2 = (r2 << 1) | (r1 >> 63);
let r1 = r1 << 1;
let mut carry = 0;
let r0 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (self.0).0[0usize], &mut carry);
let r1 = crate::ff::adc(r1, 0, &mut carry);
let r2 =
crate::ff::mac_with_carry(r2, (self.0).0[1usize], (self.0).0[1usize], &mut carry);
let r3 = crate::ff::adc(r3, 0, &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[2usize], (self.0).0[2usize], &mut carry);
let r5 = crate::ff::adc(r5, 0, &mut carry);
let r6 =
crate::ff::mac_with_carry(r6, (self.0).0[3usize], (self.0).0[3usize], &mut carry);
let r7 = crate::ff::adc(r7, 0, &mut carry);
self.mont_reduce(r0, r1, r2, r3, r4, r5, r6, r7);
}
}
impl std::default::Default for Fr {
fn default() -> Self {
Self::zero()
}
}
impl Fr {
#[inline(always)]
fn is_valid(&self) -> bool {
self.0 < MODULUS
}
#[inline(always)]
fn is_below_modulus_twice(&self) -> bool {
self.0 < MODULUS_TWICE
}
#[inline(always)]
fn reduce(&mut self) {
if !self.is_valid() {
self.0.sub_modulus_noborrow();
// self.0.sub_noborrow(&MODULUS);
}
}
#[inline(always)]
fn mont_reduce_unreduced(
&mut self,
r0: u64,
mut r1: u64,
mut r2: u64,
mut r3: u64,
mut r4: u64,
mut r5: u64,
mut r6: u64,
mut r7: u64,
) {
let k = (!r0).wrapping_add(1);
let mut carry = 0;
crate::ff::adc(r0, k, &mut carry);
r1 = add_carry(r1, &mut carry);
r2 = add_carry(r2, &mut carry);
r3 = crate::ff::mac_with_carry(r3, k, MODULUS.0[3usize], &mut carry);
r4 = add_carry(r4, &mut carry);
let carry2 = carry;
let k = (!r1).wrapping_add(1);
let mut carry = 0;
crate::ff::adc(r1, k, &mut carry);
r2 = add_carry(r2, &mut carry);
r3 = add_carry(r3, &mut carry);
r4 = crate::ff::mac_with_carry(r4, k, MODULUS.0[3usize], &mut carry);
r5 = crate::ff::adc(r5, carry2, &mut carry);
let carry2 = carry;
let k = (!r2).wrapping_add(1);
let mut carry = 0;
crate::ff::adc(r2, k, &mut carry);
r3 = add_carry(r3, &mut carry);
r4 = add_carry(r4, &mut carry);
r5 = crate::ff::mac_with_carry(r5, k, MODULUS.0[3usize], &mut carry);
r6 = crate::ff::adc(r6, carry2, &mut carry);
let carry2 = carry;
let k = (!r3).wrapping_add(1);
let mut carry = 0;
crate::ff::adc(r3, k, &mut carry);
r4 = add_carry(r4, &mut carry);
r5 = add_carry(r5, &mut carry);
r6 = crate::ff::mac_with_carry(r6, k, MODULUS.0[3usize], &mut carry);
r7 = crate::ff::adc(r7, carry2, &mut carry);
(self.0).0[0usize] = r4;
(self.0).0[1usize] = r5;
(self.0).0[2usize] = r6;
(self.0).0[3usize] = r7;
}
#[inline(always)]
fn mont_reduce(&mut self,
r0: u64,
r1: u64,
r2: u64,
r3: u64,
r4: u64,
r5: u64,
r6: u64,
r7: u64,
) {
self.mont_reduce_unreduced(r0, r1, r2, r3, r4, r5, r6, r7);
self.reduce();
}
pub fn to_hex(&self) -> String {
let mut buf: Vec<u8> = Vec::with_capacity(32);
self.into_repr().write_be(&mut buf).unwrap();
crate::ff::hex::encode(&buf)
}
pub fn from_hex(value: &str) -> Result<Fr, String> {
let value = if value.starts_with("0x") { &value[2..] } else { value };
if value.len() % 2 != 0 {return Err(format!("hex length must be even for full byte encoding: {}", value))}
let mut buf = crate::ff::hex::decode(&value).map_err(|_| format!("could not decode hex: {}", value))?;
buf.reverse();
buf.resize(32, 0);
let mut repr = FrRepr::default();
repr.read_le(&buf[..]).map_err(|e| format!("could not read {}: {}", value, &e))?;
Fr::from_repr(repr).map_err(|e| format!("could not convert into prime field: {}: {}", value, &e))
}
}
impl crate::ff::SqrtField for Fr {
fn legendre(&self) -> crate::ff::LegendreSymbol {
let s = self.pow([0u64, 0u64, 9223372036854775808u64, 288230376151711752u64]);
if s == Self::zero() {
crate::ff::LegendreSymbol::Zero
} else if s == Self::one() {
crate::ff::LegendreSymbol::QuadraticResidue
} else {
crate::ff::LegendreSymbol::QuadraticNonResidue
}
}
fn sqrt(&self) -> Option<Self> {
match self.legendre() {
crate::ff::LegendreSymbol::Zero => Some(*self),
crate::ff::LegendreSymbol::QuadraticNonResidue => None,
crate::ff::LegendreSymbol::QuadraticResidue => {
let mut c = Fr(ROOT_OF_UNITY);
let mut r = self.pow([288230376151711753u64, 0u64, 0u64, 0u64]);
let mut t = self.pow([576460752303423505u64, 0u64, 0u64, 0u64]);
let mut m = S;
while t != Self::one() {
let mut i = 1;
{
let mut t2i = t;
t2i.square();
loop {
if t2i == Self::one() {
break;
}
t2i.square();
i += 1;
}
}
for _ in 0..(m - i - 1) {
c.square();
}
r.mul_assign(&c);
c.square();
t.mul_assign(&c);
m = i;
}
Some(r)
}
}
}
}
impl PartialReductionField for Fr {
#[inline(always)]
fn add_assign_unreduced(&mut self, other: &Fr) {
self.0.add_nocarry(&other.0);
}
#[inline(always)]
fn sub_assign_unreduced(&mut self, other: &Self) {
self.0.add_modulus_nocarry();
self.0.sub_noborrow(&other.0);
}
#[inline]
fn mul_assign_unreduced(&mut self, other: &Fr) {
let mut carry = 0;
let r0 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[0usize], &mut carry);
let r1 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[1usize], &mut carry);
let r2 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[2usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(0, (self.0).0[0usize], (other.0).0[3usize], &mut carry);
let r4 = carry;
let mut carry = 0;
let r1 =
crate::ff::mac_with_carry(r1, (self.0).0[1usize], (other.0).0[0usize], &mut carry);
let r2 =
crate::ff::mac_with_carry(r2, (self.0).0[1usize], (other.0).0[1usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[1usize], (other.0).0[2usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[1usize], (other.0).0[3usize], &mut carry);
let r5 = carry;
let mut carry = 0;
let r2 =
crate::ff::mac_with_carry(r2, (self.0).0[2usize], (other.0).0[0usize], &mut carry);
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[2usize], (other.0).0[1usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[2usize], (other.0).0[2usize], &mut carry);
let r5 =
crate::ff::mac_with_carry(r5, (self.0).0[2usize], (other.0).0[3usize], &mut carry);
let r6 = carry;
let mut carry = 0;
let r3 =
crate::ff::mac_with_carry(r3, (self.0).0[3usize], (other.0).0[0usize], &mut carry);
let r4 =
crate::ff::mac_with_carry(r4, (self.0).0[3usize], (other.0).0[1usize], &mut carry);
let r5 =
crate::ff::mac_with_carry(r5, (self.0).0[3usize], (other.0).0[2usize], &mut carry);
let r6 =
crate::ff::mac_with_carry(r6, (self.0).0[3usize], (other.0).0[3usize], &mut carry);
let r7 = carry;
self.mont_reduce_unreduced(r0, r1, r2, r3, r4, r5, r6, r7);
}
#[inline(always)]
fn reduce_once(&mut self) {
self.reduce();
}
#[inline(always)]
fn reduce_completely(&mut self) {
self.reduce_once();
}
fn overflow_factor(&self) -> usize {
let mut factor = 0usize;
let mut this = *self;
while !this.is_valid() {
this.0.sub_modulus_noborrow();
factor += 1;
}
factor
}
}
impl PartialTwoBitReductionField for Fr {
#[inline(always)]
fn sub_assign_twice_unreduced(&mut self, other: &Self) {
self.0.add_modulus_twice_nocarry();
self.0.sub_noborrow(&other.0);
}
#[inline(always)]
fn reduce_twice(&mut self) {
if !self.is_below_modulus_twice() {
self.0.sub_modulus_twice_noborrow();
}
}
#[inline(always)]
fn reduce_completely(&mut self) {
self.reduce_twice();
self.reduce_once();
}
}<file_sep>/run_long_division.sh
#!/bin/sh
cargo test --release -- --ignored --nocapture test_long_naive_division<file_sep>/src/sonic/transcript/hasher.rs
extern crate tiny_keccak;
extern crate blake2_rfc;
use self::tiny_keccak::Keccak;
use self::blake2_rfc::blake2s::{Blake2s, blake2s};
pub trait Hasher {
fn new(personalization: &[u8]) -> Self;
fn update(&mut self, data: &[u8]);
fn finalize(&mut self) -> Vec<u8>;
}
#[derive(Clone)]
pub struct BlakeHasher {
h: Blake2s
}
impl Hasher for BlakeHasher {
fn new(personalization: &[u8]) -> Self {
let mut h = Blake2s::new(32);
h.update(personalization);
Self {
h: h
}
}
fn update(&mut self, data: &[u8]) {
self.h.update(data);
}
fn finalize(&mut self) -> Vec<u8> {
use std::mem;
let new_h = Blake2s::new(32);
let h = std::mem::replace(&mut self.h, new_h);
let result = h.finalize();
result.as_ref().to_vec().clone()
}
}
#[derive(Clone)]
pub struct Keccak256Hasher {
h: Keccak
}
impl Hasher for Keccak256Hasher {
fn new(personalization: &[u8]) -> Self {
let mut h = Keccak::new_keccak256();
h.update(personalization);
Self {
h: h
}
}
fn update(&mut self, data: &[u8]) {
self.h.update(data);
}
fn finalize(&mut self) -> Vec<u8> {
use std::mem;
let new_h = Keccak::new_keccak256();
let h = std::mem::replace(&mut self.h, new_h);
let mut res: [u8; 32] = [0; 32];
h.finalize(&mut res);
res[..].to_vec()
}
}<file_sep>/src/plonk/better_cs/mod.rs
pub mod cs;
pub mod test_assembly;
pub mod adaptor;
pub mod generator;
pub mod keys;
pub mod prover;
pub mod verifier;
pub mod one_shot_test_assembly;
pub mod fma_adaptor;
pub(crate) mod utils;
const LDE_FACTOR: usize = 4;<file_sep>/src/sonic/unhelped/prover.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters, NUM_BLINDINGS};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
use crate::sonic::sonic::{CountN, Basic};
pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
srs: &SRS<E>,
n: usize
) -> Result<SxyAdvice<E>, SynthesisError>
{
let z: E::Fr;
let y: E::Fr;
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y = transcript.get_challenge_scalar();
transcript.commit_point(&proof.t);
z = transcript.get_challenge_scalar();
}
let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?;
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(y, n);
S::synthesize(&mut tmp, circuit)?;
tmp.poly()
};
// Compute S commitment
let s = multiexp(
srs.g_positive_x_alpha[0..(2 * n)]
.iter()
.chain_ext(srs.g_negative_x_alpha[0..(n)].iter()),
s_poly_positive.iter().chain_ext(s_poly_negative.iter())
).into_affine();
// Compute s(z, y)
let mut szy = E::Fr::zero();
{
szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_positive[..], z, z));
szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_negative[..], z_inv, z_inv));
}
// Compute kate opening
let opening = {
let mut open = szy;
open.negate();
let poly = kate_divison(
s_poly_negative.iter().rev().chain_ext(Some(open).iter()).chain_ext(s_poly_positive.iter()),
z,
);
let negative_poly = poly[0..n].iter().rev();
let positive_poly = poly[n..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
};
Ok(SxyAdvice {
s,
szy,
opening
})
}
pub fn create_advice<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
parameters: &Parameters<E>,
) -> Result<SxyAdvice<E>, SynthesisError>
{
let n = parameters.vk.n;
create_advice_on_information_and_srs::<E, C, S>(circuit, proof, ¶meters.srs, n)
}
pub fn create_advice_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
srs: &SRS<E>
) -> Result<SxyAdvice<E>, SynthesisError>
{
// annoying, but we need n to compute s(z, y), and this isn't
// precomputed anywhere yet
let n = {
let mut tmp = CountN::<S>::new();
S::synthesize(&mut tmp, circuit)?;
tmp.n
};
create_advice_on_information_and_srs::<E, C, S>(circuit, proof, srs, n)
}
pub fn create_proof<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
parameters: &Parameters<E>
) -> Result<Proof<E>, SynthesisError> {
create_proof_on_srs::<E, C, S>(circuit, ¶meters.srs)
}
extern crate rand;
use self::rand::{Rand, Rng, thread_rng};
use crate::sonic::sonic::Wires;
pub fn create_proof_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
srs: &SRS<E>
) -> Result<Proof<E>, SynthesisError>
{
let mut wires = Wires::new();
S::synthesize(&mut wires, circuit)?;
let n = wires.a.len();
let mut transcript = Transcript::new(&[]);
let rng = &mut thread_rng();
// c_{n+1}, c_{n+2}, c_{n+3}, c_{n+4}
let blindings: Vec<E::Fr> = (0..NUM_BLINDINGS).into_iter().map(|_| E::Fr::rand(rng)).collect();
// r is a commitment to r(X, 1)
let r = polynomial_commitment::<E, _>(
n,
2*n + NUM_BLINDINGS,
n,
&srs,
blindings.iter().rev()
.chain_ext(wires.c.iter().rev())
.chain_ext(wires.b.iter().rev())
.chain_ext(Some(E::Fr::zero()).iter())
.chain_ext(wires.a.iter()),
);
transcript.commit_point(&r);
let y: E::Fr = transcript.get_challenge_scalar();
// create r(X, 1) by observation that it's just a series of coefficients.
// Used representation is for powers X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}, X^{1}...X^{n}
// Same representation is ok for r(X, Y) too cause powers always match
let mut rx1 = wires.b;
rx1.extend(wires.c);
rx1.extend(blindings.clone());
rx1.reverse();
rx1.push(E::Fr::zero());
rx1.extend(wires.a);
let mut rxy = rx1.clone();
let y_inv = y.inverse().ok_or(SynthesisError::DivisionByZero)?;
// y^(-2n - num blindings)
let tmp = y_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
mut_distribute_consequitive_powers(
&mut rxy,
tmp,
y,
);
// negative powers [-1, -2n], positive [1, n]
let (mut s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(y, n);
S::synthesize(&mut tmp, circuit)?;
tmp.poly()
};
// r'(X, y) = r(X, y) + s(X, y). Note `y` - those are evaluated at the point already
let mut rxy_prime = rxy.clone();
{
// extend to have powers [n+1, 2n]
rxy_prime.resize(4 * n + 1 + NUM_BLINDINGS, E::Fr::zero());
s_poly_negative.reverse();
let neg_poly_len = s_poly_negative.len();
add_polynomials(&mut rxy_prime[(NUM_BLINDINGS+neg_poly_len)..(2 * n + NUM_BLINDINGS)], &s_poly_negative[..]);
s_poly_negative.reverse();
add_polynomials(&mut rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..], &s_poly_positive[..])
// // add coefficients in front of X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}
// for (r, s) in rxy_prime[NUM_BLINDINGS..(2 * n + NUM_BLINDINGS)]
// .iter_mut()
// .rev()
// .zip(s_poly_negative)
// {
// r.add_assign(&s);
// }
// // add coefficients in front of X^{1}...X^{n}, X^{n+1}...X^{2*n}
// for (r, s) in rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..].iter_mut().zip(s_poly_positive) {
// r.add_assign(&s);
// }
}
// by this point all R related polynomials are blinded and evaluated for Y variable
// t(X, y) = r'(X, y)*r(X, 1) and will be later evaluated at z
// contained degree in respect to X are from -4*n to 3*n including X^0
let mut txy = multiply_polynomials::<E>(rx1.clone(), rxy_prime);
txy[4 * n + 2 * NUM_BLINDINGS] = E::Fr::zero(); // -k(y)
// commit to t(X, y) to later open at z
let t = polynomial_commitment(
srs.d,
(4 * n) + 2*NUM_BLINDINGS,
3 * n,
srs,
// skip what would be zero power
txy[0..(4 * n) + 2*NUM_BLINDINGS].iter()
.chain_ext(txy[(4 * n + 2*NUM_BLINDINGS + 1)..].iter()),
);
transcript.commit_point(&t);
let z: E::Fr = transcript.get_challenge_scalar();
let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?;
let rz = {
let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&rx1, tmp, z)
};
// rzy is evaluation of r(X, Y) at z, y
let rzy = {
let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&rxy, tmp, z)
};
transcript.commit_scalar(&rz);
transcript.commit_scalar(&rzy);
let r1: E::Fr = transcript.get_challenge_scalar();
let zy_opening = {
// r(X, 1) - r(z, y)
// subtract constant term from R(X, 1)
rx1[(2 * n + NUM_BLINDINGS)].sub_assign(&rzy);
let mut point = y;
point.mul_assign(&z);
polynomial_commitment_opening(
2 * n + NUM_BLINDINGS,
n,
&rx1,
point,
srs
)
};
assert_eq!(rx1.len(), 3*n + NUM_BLINDINGS + 1);
// it's an opening of t(X, y) at z
let z_opening = {
rx1[(2 * n + NUM_BLINDINGS)].add_assign(&rzy); // restore
let rx1_len = rx1.len();
mul_add_polynomials(&mut txy[(2 * n + NUM_BLINDINGS)..(2 * n + NUM_BLINDINGS + rx1_len)], &rx1[..], r1);
// // skip powers from until reach -2n - NUM_BLINDINGS
// for (t, &r) in txy[(2 * n + NUM_BLINDINGS)..].iter_mut().zip(rx1.iter()) {
// let mut r = r;
// r.mul_assign(&r1);
// t.add_assign(&r);
// }
let val = {
let tmp = z_inv.pow(&[(4*n + 2*NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&txy, tmp, z)
};
txy[(4 * n + 2*NUM_BLINDINGS)].sub_assign(&val);
polynomial_commitment_opening(
4*n + 2*NUM_BLINDINGS,
3*n,
&txy,
z,
srs)
};
Ok(Proof {
r, rz, rzy, t, z_opening, zy_opening
})
}
<file_sep>/src/plonk/commitments/transparent/fri/mod.rs
use crate::pairing::ff::PrimeField;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::polynomials::*;
use crate::worker::*;
use crate::SynthesisError;
use crate::plonk::commitments::transparent::utils::log2_floor;
use crate::plonk::commitments::transcript::Prng;
pub mod naive_fri;
pub mod coset_combining_fri;
pub trait FriProofPrototype<F: PrimeField, I: IOP<F>> {
fn get_roots(&self) -> Vec< < <I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput>;
fn get_final_root(&self) -> < <I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput;
fn get_final_coefficients(&self) -> Vec<F>;
}
pub trait FriProof<F: PrimeField, I: IOP<F>> {
fn get_final_coefficients(&self) -> &[F];
}
pub trait FriPrecomputations<F: PrimeField> {
fn new_for_domain_size(size: usize) -> Self;
fn omegas_inv_bitreversed(&self) -> &[F];
fn omegas_inv_ref(&self) -> &[F];
fn domain_size(&self) -> usize;
}
pub trait FriIop<F: PrimeField> {
const DEGREE: usize;
type IopType: IOP<F>;
type ProofPrototype: FriProofPrototype<F, Self::IopType>;
type Proof: FriProof<F, Self::IopType>;
type Params: Clone + std::fmt::Debug;
fn proof_from_lde<P: Prng<F, Input = < < <Self::IopType as IOP<F>>::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput >,
C: FriPrecomputations<F>
>(
lde_values: &Polynomial<F, Values>,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
precomputations: &C,
worker: &Worker,
prng: &mut P,
params: &Self::Params
) -> Result<Self::ProofPrototype, SynthesisError>;
fn prototype_into_proof(
prototype: Self::ProofPrototype,
iop_values: &Polynomial<F, Values>,
natural_first_element_indexes: Vec<usize>,
params: &Self::Params
) -> Result<Self::Proof, SynthesisError>;
// // will write roots to prng values
// fn verify_proof_with_transcript<P: Prng<F, Input = < < <Self::IopType as IOP<F>>::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput > >(
// proof: &Self::Proof,
// natural_element_indexes: Vec<usize>,
// expected_values: &[F],
// prng: &mut P
// ) -> Result<bool, SynthesisError>;
fn get_fri_challenges<P: Prng<F, Input = < < <Self::IopType as IOP<F>>::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput > >(
proof: &Self::Proof,
prng: &mut P,
params: &Self::Params
) -> Vec<F>;
fn verify_proof_with_challenges(
proof: &Self::Proof,
natural_element_indexes: Vec<usize>,
expected_value: &[F],
fri_challenges: &[F],
params: &Self::Params
) -> Result<bool, SynthesisError>;
}<file_sep>/src/plonk/better_better_cs/trees/mod.rs
pub mod tree_hash;
pub mod binary_tree;<file_sep>/run_insane_test.sh
#!/bin/sh
# cargo test --release -- --ignored --nocapture test_multiexp_performance_on_large_data
RUSTFLAGS="-C target-cpu=native -C target_feature=+bmi2,+adx,+sse4.1" cargo +nightly test --release --features "asm" -- --ignored --nocapture test_large_data_different_multiexps
<file_sep>/src/sonic/unhelped/padding.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use crate::sonic::cs::{Backend};
use crate::sonic::cs::{Coeff, Variable, LinearCombination};
use crate::sonic::util::*;
use crate::sonic::util::*;
use crate::sonic::cs::{SynthesisDriver};
use crate::Circuit as BellmanCircuit;
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::cs::Circuit;
use crate::sonic::cs::ConstraintSystem;
use crate::sonic::cs::Nonassigning;
use crate::SynthesisError;
/*
s_1(X, Y) = \sum\limits_{i=1}^N u_i(Y) X^{N + 1 - i}
+ \sum\limits_{i=1}^N v_i(Y) X^{N + 1 + i}
+ \sum\limits_{i=1}^N w_i(Y) X^{2N + 1 + i}
where
u_i(Y) = \sum\limits_{q=1}^Q Y^{q} u_{i,q}
v_i(Y) = \sum\limits_{q=1}^Q Y^{q} v_{i,q}
w_i(Y) = \sum\limits_{q=1}^Q Y^{q} w_{i,q}
s_1(X, Y) = \sum\limits_{i=1}^(3N + 1) [u_{N + 1 - i}(Y), v_{i - N - 1}(Y), w_{i - 2N - 1}(Y)] X^{i}
where [] means concatenation
if we open up both sums a little it would look like
// q = 1,
Y * ( X * u_{N, 1} + X^{N + 1} * v_{1, 1} + X^{2N + 1} * w{1, 1}) = Y * (k_0 * X + k_1 * X^{N + 1} + k_2 * X^{2N + 1})
and for permutation where should exist another term over Y that would have the same structure, but with coefficients permuted, e.g.
Y^{p_1} * (k_1 * X + k_2 * X^{N + 1} + k_0 * X^{2N + 1}) and Y^{p_2} * (k_2 * X + k_0 * X^{N + 1} + k_1 * X^{2N + 1})
that would result in a sum
X * (k_0 * Y + k_1 * Y^{p_1} + k_2 * Y^{p_2})
+ X^{N + 1} * (k_1 * Y + k_2 * Y^{p_1} + k_0 * Y^{p_2})
+ X^{2N + 1} * (k_2 * Y + k_0 * Y^{p_1} + k_1 * Y^{p_2})
and permutations would look like
[k_0, k_1, k_2]
[1 , p_1, p_2]
[k_0, k_1, k_2]
[p_2, 1 , p_1]
[k_0, k_1, k_2]
[p_1, p_2, 1 ]
that would naively mean that k_0 should appear in constraint number 1 for variable number 1
constraint number p_1 for variable number N + 1
constraint number p_2 for variable number 2N + 1
restructuring strategy:
where u_{i, q} is a coefficient in a linear constraint for an A type variable number i
that corresponds to the qth multiplication gate
to make s_1 representable as a permutation we first must synthesize all the normal constraints,
then make what would look like a cyclic shift + expansion
- imagine that there were originally N variables
- variable A(i) in linear constraint number q had a coefficient of u{i, q}
- add a variable B(i+n) that would have a number
*/
pub struct Debugging<E> {
constraint_num: usize,
u: Vec<String>,
v: Vec<String>,
w: Vec<String>,
_marker: std::marker::PhantomData<E>
}
impl<'a, E: Engine> Backend<E> for &'a mut Debugging<E> {
fn new_linear_constraint(&mut self) {
self.constraint_num += 1;
self.u.push("".to_string());
self.v.push("".to_string());
self.w.push("".to_string());
}
fn insert_coefficient(&mut self, var: Variable, coeff: Coeff<E>) {
let one = E::Fr::one();
let mut minus_one = one;
minus_one.negate();
match var {
Variable::A(index) => {
let acc = &mut self.u[self.constraint_num - 1];
match coeff {
Coeff::Zero => { },
Coeff::One => {
acc.push_str(&format!(" + A{}", index));
},
Coeff::NegativeOne => {
acc.push_str(&format!(" - A{}", index));
},
Coeff::Full(val) => {
if val == one {
acc.push_str(&format!(" + A{}", index));
} else if val == minus_one {
acc.push_str(&format!(" - A{}", index));
} else {
acc.push_str(&format!(" + {}*A{}", val, index));
}
}
}
}
Variable::B(index) => {
let acc = &mut self.v[self.constraint_num - 1];
match coeff {
Coeff::Zero => { },
Coeff::One => {
acc.push_str(&format!(" + B{}", index));
},
Coeff::NegativeOne => {
acc.push_str(&format!(" - B{}", index));
},
Coeff::Full(val) => {
if val == one {
acc.push_str(&format!(" + B{}", index));
} else if val == minus_one {
acc.push_str(&format!(" - B{}", index));
} else {
acc.push_str(&format!(" + {}*B{}", val, index));
}
}
}
}
Variable::C(index) => {
let acc = &mut self.w[self.constraint_num - 1];
match coeff {
Coeff::Zero => { },
Coeff::One => {
acc.push_str(&format!(" + C{}", index));
},
Coeff::NegativeOne => {
acc.push_str(&format!(" - C{}", index));
},
Coeff::Full(val) => {
if val == one {
acc.push_str(&format!(" + C{}", index));
} else if val == minus_one {
acc.push_str(&format!(" - C{}", index));
} else {
acc.push_str(&format!(" + {}*C{}", val, index));
}
}
}
}
};
}
}
pub struct Padding;
impl SynthesisDriver for Padding {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError> {
struct Synthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
}
impl<E: Engine, B: Backend<E>>Synthesizer<E, B> {
fn purge_current_var(&mut self) {
match self.current_variable.take() {
Some(index) => {
let var_a = Variable::A(index);
let var_b = Variable::B(index);
let var_c = Variable::C(index);
let mut product = None;
let value_a = self.backend.get_var(var_a);
self.backend.set_var(var_b, || {
let value_b = E::Fr::one();
product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?);
product.as_mut().map(|product| product.mul_assign(&value_b));
Ok(value_b)
}).expect("should exist by now");
self.backend.set_var(var_c, || {
product.ok_or(SynthesisError::AssignmentMissing)
}).expect("should exist by now");
self.current_variable = None;
},
_ => {}
}
}
fn alloc_one(&mut self) -> Variable {
self.n += 1;
let index = self.n;
assert_eq!(index, 1);
self.backend.new_multiplication_gate();
let var_a = Variable::A(1);
let var_b = Variable::B(1);
let var_c = Variable::C(1);
self.backend.set_var(var_a, || {
Ok(E::Fr::one())
}).expect("should exist by now");
self.backend.set_var(var_b, || {
Ok(E::Fr::one())
}).expect("should exist by now");
self.backend.set_var(var_c, || {
Ok(E::Fr::one())
}).expect("should exist by now");
self.q += 1;
self.backend.new_linear_constraint();
self.backend.insert_coefficient(var_a, Coeff::One);
self.backend.insert_coefficient(var_b, Coeff::One);
self.backend.insert_coefficient(var_c, Coeff::NegativeOne);
self.backend.new_k_power(self.q);
var_a
}
}
impl<E: Engine, B: Backend<E>> ConstraintSystem<E> for Synthesizer<E, B> {
const ONE: Variable = Variable::A(1);
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_a = Variable::A(index);
let var_b = Variable::B(index);
let var_c = Variable::C(index);
let mut product = None;
let value_a = self.backend.get_var(var_a);
self.backend.set_var(var_b, || {
let value_b = value()?;
product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?);
product.as_mut().map(|product| product.mul_assign(&value_b));
Ok(value_b)
})?;
self.backend.set_var(var_c, || {
product.ok_or(SynthesisError::AssignmentMissing)
})?;
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let var_a = Variable::A(index);
self.backend.set_var(var_a, value)?;
self.current_variable = Some(index);
Ok(var_a)
}
}
}
// TODO: allocate input without spawning extra constraints
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
// self.purge_current_var();
// self.n += 1;
// self.backend.new_multiplication_gate();
// let index = self.n;
// let var = Variable::A::(index);
// self.q += 1;
// self.backend.new_k_power(self.q);
// self.backend.self.backend.insert_coefficient(new_var, Coeff::One);
// it's always going to be
let input_var = self.alloc(value)?;
self.enforce_zero(LinearCombination::zero() + input_var);
self.backend.new_k_power(self.q-2);
self.backend.new_k_power(self.q-1);
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: LinearCombination<E>)
{
self.q += 1;
self.backend.new_linear_constraint();
for (var, coeff) in lc.as_ref() {
self.backend.insert_coefficient(*var, *coeff);
}
// now we need to "rotate" a linear constraint by allocating more dummy variables, so ensuring
// that if for some q (index of LC) there is a coefficient C in front of a variable A(i) (that will result in a term ~ C*Y^{q}*X^{i})
// then there will be some other q' where there is a coefficient C in front of the variable B(i)
// (that will result in a term ~ C*Y^{q'}*X^{i+N}) and another q'' with C in front of C(i)
// (that will result in a term ~ C*Y^{q''}*X^{i+2N}), so S polynomial is indeed a permutation
// allocate at max 1 variable to later work with whole gates directly
self.purge_current_var();
use std::collections::HashMap;
// A -> B, B -> C, C -> A
{
self.q += 1;
self.backend.new_linear_constraint();
let mut allocation_map = HashMap::with_capacity(lc.as_ref().len());
let mut expected_new_index = self.n + 1;
// determine size of the map
for (var, _) in lc.as_ref() {
match var {
Variable::A(index) => {
if allocation_map.get(index).is_none() && *index != 1 {
allocation_map.insert(*index, expected_new_index);
expected_new_index += 1;
println!("A{} -> B{}", index, expected_new_index);
}
},
Variable::B(index) => {
if allocation_map.get(index).is_none() && *index != 2 {
allocation_map.insert(*index, expected_new_index);
expected_new_index += 1;
println!("B{} -> C{}", index, expected_new_index);
}
},
Variable::C(index) => {
if allocation_map.get(index).is_none() && *index != 3 {
allocation_map.insert(*index, expected_new_index);
expected_new_index += 1;
println!("C{} -> A{}", index, expected_new_index);
}
}
}
}
for _ in 0..allocation_map.len() {
self.backend.new_multiplication_gate();
self.n += 1;
}
for (index, new_index) in allocation_map.iter() {
let var_a = Variable::A(*new_index);
let var_b = Variable::B(*new_index);
let var_c = Variable::C(*new_index);
// A -> B, B -> C, C -> A
let b_val = self.backend.get_var(Variable::A(*index));
let c_val = self.backend.get_var(Variable::B(*index));
let a_val = self.backend.get_var(Variable::C(*index));
self.backend.set_var(var_a, || {
let value = a_val.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
}).expect("should exist by now");
self.backend.set_var(var_b, || {
let value = b_val.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
}).expect("should exist by now");
self.backend.set_var(var_c, || {
let value = c_val.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
}).expect("should exist by now");
}
// A -> B, B -> C, C -> A
for (var, coeff) in lc.as_ref() {
let new_var = match var {
Variable::A(index) => {
let var = if *index == 1 {
Variable::B(2)
} else {
let new_index = allocation_map.get(index).unwrap();
Variable::B(*new_index)
};
var
},
Variable::B(index) => {
let var = if *index == 2 {
Variable::C(3)
} else {
let new_index = allocation_map.get(index).unwrap();
Variable::C(*new_index)
};
var
},
Variable::C(index) => {
let var = if *index == 3 {
Variable::A(1)
} else {
let new_index = allocation_map.get(index).unwrap();
Variable::A(*new_index)
};
var
}
};
self.backend.insert_coefficient(new_var, *coeff);
}
}
// A -> C, B -> A, C -> B
{
self.q += 1;
self.backend.new_linear_constraint();
let mut allocation_map = HashMap::with_capacity(lc.as_ref().len());
let mut expected_new_index = self.n + 1;
// determine size of the map
for (var, _) in lc.as_ref() {
match var {
Variable::A(index) => {
if allocation_map.get(index).is_none() && *index != 1 {
allocation_map.insert(*index, expected_new_index);
expected_new_index += 1;
println!("A{} -> C{}", index, expected_new_index);
}
},
Variable::B(index) => {
if allocation_map.get(index).is_none() && *index != 2 {
allocation_map.insert(*index, expected_new_index);
expected_new_index += 1;
println!("B{} -> A{}", index, expected_new_index);
}
},
Variable::C(index) => {
if allocation_map.get(index).is_none() && *index != 3 {
allocation_map.insert(*index, expected_new_index);
expected_new_index += 1;
println!("C{} -> B{}", index, expected_new_index);
}
}
}
}
for _ in 0..allocation_map.len() {
self.backend.new_multiplication_gate();
self.n += 1;
}
// A -> C, B -> A, C -> B
for (index, new_index) in allocation_map.iter() {
let var_a = Variable::A(*new_index);
let var_b = Variable::B(*new_index);
let var_c = Variable::C(*new_index);
let b_val = self.backend.get_var(Variable::C(*index));
let c_val = self.backend.get_var(Variable::A(*index));
let a_val = self.backend.get_var(Variable::B(*index));
self.backend.set_var(var_a, || {
let value = a_val.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
}).expect("should exist by now");
self.backend.set_var(var_b, || {
let value = b_val.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
}).expect("should exist by now");
self.backend.set_var(var_c, || {
let value = c_val.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
}).expect("should exist by now");
}
// A -> C, B -> A, C -> B
for (var, coeff) in lc.as_ref() {
let new_var = match var {
Variable::A(index) => {
let var = if *index == 1 {
Variable::C(3)
} else {
let new_index = allocation_map.get(index).unwrap();
Variable::C(*new_index)
};
var
},
Variable::B(index) => {
let var = if *index == 2 {
Variable::A(1)
} else {
let new_index = allocation_map.get(index).unwrap();
Variable::A(*new_index)
};
var
},
Variable::C(index) => {
let var = if *index == 3 {
Variable::B(2)
} else {
let new_index = allocation_map.get(index).unwrap();
Variable::B(*new_index)
};
var
}
};
self.backend.insert_coefficient(new_var, *coeff);
}
}
}
fn multiply<F>(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let a = Variable::A(index);
let b = Variable::B(index);
let c = Variable::C(index);
let mut b_val = None;
let mut c_val = None;
self.backend.set_var(a, || {
let (a, b, c) = values()?;
b_val = Some(b);
c_val = Some(c);
Ok(a)
})?;
self.backend.set_var(b, || {
b_val.ok_or(SynthesisError::AssignmentMissing)
})?;
self.backend.set_var(c, || {
c_val.ok_or(SynthesisError::AssignmentMissing)
})?;
Ok((a, b, c))
}
fn get_value(&self, var: Variable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
let mut tmp: Synthesizer<E, B> = Synthesizer {
backend: backend,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
};
let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <Synthesizer<E, B> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut tmp)?;
println!("Done synthesizing, N = {}, Q = {}", tmp.n, tmp.q);
Ok(())
}
}
pub fn constraints_info<E: Engine, C: BellmanCircuit<E> + Clone>(
circuit: C,
)
{
let adapted_circuit = AdaptorCircuit(circuit);
create_constraints_info::<_, _, Nonassigning>(&adapted_circuit)
}
pub fn constraints_padding_info<E: Engine, C: BellmanCircuit<E> + Clone>(
circuit: C,
)
{
let adapted_circuit = AdaptorCircuit(circuit);
create_constraints_info::<_, _, Padding>(&adapted_circuit)
}
pub fn create_constraints_info<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
)
{
let mut backend = Debugging::<E> {
constraint_num: 0,
u: vec![],
v: vec![],
w: vec![],
_marker: std::marker::PhantomData
};
S::synthesize(&mut backend, circuit).unwrap();
for (i, ((u, v), w)) in backend.u.iter()
.zip(backend.v.iter())
.zip(backend.w.iter())
.enumerate()
{
println!("Constraint {}: 0 = {}{}{}", i, u, v, w);
}
}
#[test]
fn my_fun_circuit_test() {
use crate::pairing::ff::PrimeField;
use crate::pairing::bls12_381::{Bls12, Fr};
struct MyCircuit;
impl<E: Engine> Circuit<E> for MyCircuit {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let (a, b, _) = cs.multiply(|| {
Ok((
E::Fr::from_str("10").unwrap(),
E::Fr::from_str("20").unwrap(),
E::Fr::from_str("200").unwrap(),
))
})?;
cs.enforce_zero(LinearCombination::from(a) + a - b);
let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
cs.enforce_zero(LinearCombination::from(b) - multiplier);
Ok(())
}
}
create_constraints_info::<Bls12, _, Nonassigning>(&MyCircuit);
println!("---------------");
create_constraints_info::<Bls12, _, Padding>(&MyCircuit);
}<file_sep>/src/sonic/util.rs
use crate::SynthesisError;
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use crate::pairing::{CurveAffine, CurveProjective, Engine};
use super::srs::SRS;
pub trait ChainExt: Iterator {
fn chain_ext<U>(self, other: U) -> Chain<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator<Item = Self::Item>,
{
Chain {
t: self,
u: other.into_iter(),
}
}
}
impl<I: Iterator> ChainExt for I {}
#[derive(Clone)]
pub struct Chain<T, U> {
t: T,
u: U,
}
impl<T, U> Iterator for Chain<T, U>
where
T: Iterator,
U: Iterator<Item = T::Item>,
{
type Item = T::Item;
fn next(&mut self) -> Option<T::Item> {
match self.t.next() {
Some(v) => Some(v),
None => match self.u.next() {
Some(v) => Some(v),
None => None,
},
}
}
}
impl<T, U> ExactSizeIterator for Chain<T, U>
where
T: Iterator,
U: Iterator<Item = T::Item>,
T: ExactSizeIterator,
U: ExactSizeIterator,
{
fn len(&self) -> usize {
self.t.len() + self.u.len()
}
}
impl<T, U> DoubleEndedIterator for Chain<T, U>
where
T: Iterator,
U: Iterator<Item = T::Item>,
T: DoubleEndedIterator,
U: DoubleEndedIterator,
{
fn next_back(&mut self) -> Option<T::Item> {
match self.u.next_back() {
Some(v) => Some(v),
None => match self.t.next_back() {
Some(v) => Some(v),
None => None,
},
}
}
}
pub fn polynomial_commitment<
'a,
E: Engine,
IS: IntoIterator<Item = &'a E::Fr>,
>(
max: usize,
largest_negative_power: usize,
largest_positive_power: usize,
srs: &'a SRS<E>,
s: IS,
) -> E::G1Affine
where
IS::IntoIter: ExactSizeIterator,
{
// smallest power is d - max - largest_negative_power; It should either be 0 for use of positive powers only,
// of we should use part of the negative powers
let d = srs.d;
assert!(max >= largest_positive_power);
// use both positive and negative powers for commitment
if d < max + largest_negative_power + 1 {
let min_power = largest_negative_power + max - d;
let max_power = d + largest_positive_power - max;
// need to use negative powers to make a proper commitment
return multiexp(
srs.g_negative_x_alpha[0..min_power].iter().rev()
.chain_ext(srs.g_positive_x_alpha[..max_power].iter()),
s
).into_affine();
} else {
return multiexp(
srs.g_positive_x_alpha[(srs.d - max - largest_negative_power - 1)..].iter(),
s
).into_affine();
}
}
/// For now this function MUST take a polynomial in a form f(x) - f(z)
pub fn polynomial_commitment_opening<
'a,
E: Engine,
I: IntoIterator<Item = &'a E::Fr>
>(
largest_negative_power: usize,
_largest_positive_power: usize,
polynomial_coefficients: I,
point: E::Fr,
srs: &'a SRS<E>,
) -> E::G1Affine
where I::IntoIter: DoubleEndedIterator + ExactSizeIterator,
{
// let poly = parallel_kate_divison::<E, _>(polynomial_coefficients, point);
// use std::time::Instant;
// let start = Instant::now();
let poly = kate_divison(
polynomial_coefficients,
point,
);
// println!("Kate division of size {} taken {:?}", poly.len(), start.elapsed());
let negative_poly = poly[0..largest_negative_power].iter().rev();
let positive_poly = poly[largest_negative_power..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
}
extern crate crossbeam;
use self::crossbeam::channel::{unbounded};
pub fn evaluate_at_consequitive_powers<'a, F: Field> (
coeffs: &[F],
first_power: F,
base: F
) -> F
{
use crate::worker::Worker;
let (s, r) = unbounded();
let worker = Worker::new();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, coeffs) in coeffs.chunks(chunk).enumerate()
{
let s = s.clone();
scope.spawn(move |_| {
let mut current_power = base.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&first_power);
let mut acc = F::zero();
for p in coeffs {
let mut tmp = *p;
tmp.mul_assign(¤t_power);
acc.add_assign(&tmp);
current_power.mul_assign(&base);
}
s.send(acc).expect("must send");
});
}
});
drop(s);
// all threads in a scope have done working, so we can safely read
let mut result = F::zero();
loop {
if r.is_empty() {
break;
}
let value = r.recv().expect("must not be empty");
result.add_assign(&value);
}
result
}
pub fn mut_evaluate_at_consequitive_powers<'a, F: Field> (
coeffs: &mut [F],
first_power: F,
base: F
) -> F
{
use crate::worker::Worker;
let (s, r) = unbounded();
let worker = Worker::new();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, coeffs) in coeffs.chunks_mut(chunk).enumerate()
{
let s = s.clone();
scope.spawn(move |_| {
let mut current_power = base.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&first_power);
let mut acc = F::zero();
for p in coeffs {
p.mul_assign(¤t_power);
acc.add_assign(&p);
current_power.mul_assign(&base);
}
s.send(acc).expect("must send");
});
}
});
drop(s);
// all threads in a scope have done working, so we can safely read
let mut result = F::zero();
loop {
if r.is_empty() {
break;
}
let value = r.recv().expect("must not be empty");
result.add_assign(&value);
}
result
}
/// Multiply each coefficient by some power of the base in a form
/// `first_power * base^{i}`
pub fn mut_distribute_consequitive_powers<'a, F: Field> (
coeffs: &mut [F],
first_power: F,
base: F
)
{
use crate::worker::Worker;
let worker = Worker::new();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, coeffs_chunk) in coeffs.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = base.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&first_power);
for p in coeffs_chunk {
p.mul_assign(¤t_power);
current_power.mul_assign(&base);
}
});
}
});
}
// pub fn multiexp<
// 'a,
// G: CurveAffine,
// IB: IntoIterator<Item = &'a G>,
// IS: IntoIterator<Item = &'a G::Scalar>,
// >(
// g: IB,
// s: IS,
// ) -> G::Projective
// where
// IB::IntoIter: ExactSizeIterator + Clone,
// IS::IntoIter: ExactSizeIterator,
// {
// use crate::worker::Worker;
// use crate::multiexp::dense_multiexp;
// use std::time::Instant;
// let start = Instant::now();
// let s: Vec<<G::Scalar as PrimeField>::Repr> = s.into_iter().map(|e| e.into_repr()).collect::<Vec<_>>();
// let g: Vec<G> = g.into_iter().map(|e| *e).collect::<Vec<_>>();
// println!("Multiexp collecting taken {:?}", start.elapsed());
// assert_eq!(s.len(), g.len(), "scalars and exponents must have the same length");
// let start = Instant::now();
// let pool = Worker::new();
// println!("Multiexp pool creation taken {:?}", start.elapsed());
// let start = Instant::now();
// let result = dense_multiexp(
// &pool,
// &g,
// &s
// ).unwrap();
// println!("Multiexp taken {:?}", start.elapsed());
// result
// }
pub fn multiexp<
'a,
G: CurveAffine,
IB: IntoIterator<Item = &'a G>,
IS: IntoIterator<Item = &'a G::Scalar>,
>(
g: IB,
s: IS,
) -> G::Projective
where
IB::IntoIter: ExactSizeIterator + Clone,
IS::IntoIter: ExactSizeIterator,
{
use crate::worker::Worker;
use crate::multiexp::multiexp;
use crate::source::FullDensity;
use futures::Future;
use std::sync::Arc;
let s: Vec<<G::Scalar as PrimeField>::Repr> = s.into_iter().map(|e| e.into_repr()).collect::<Vec<_>>();
let g: Vec<G> = g.into_iter().map(|e| *e).collect::<Vec<_>>();
assert_eq!(s.len(), g.len(), "scalars and exponents must have the same length");
let pool = Worker::new();
// use std::time::Instant;
// let start = Instant::now();
let result = multiexp(
&pool,
(Arc::new(g), 0),
FullDensity,
Arc::new(s)
).wait().unwrap();
// println!("Multiexp taken {:?}", start.elapsed());
result
}
pub fn multiexp_serial<
'a,
G: CurveAffine,
IB: IntoIterator<Item = &'a G>,
IS: IntoIterator<Item = &'a G::Scalar>,
>(
g: IB,
s: IS,
) -> G::Projective
where
IB::IntoIter: ExactSizeIterator + Clone,
IS::IntoIter: ExactSizeIterator,
{
let g = g.into_iter();
let s = s.into_iter();
assert_eq!(g.len(), s.len());
let c = if s.len() < 32 {
3u32
} else {
(f64::from(s.len() as u32)).ln().ceil() as u32
};
// Convert all of the scalars into representations
let mut s = s.map(|s| s.into_repr()).collect::<Vec<_>>();
let mut windows = vec![];
let mut buckets = vec![];
let mask = (1u64 << c) - 1u64;
let mut cur = 0;
let num_bits = <G::Engine as ScalarEngine>::Fr::NUM_BITS;
while cur <= num_bits {
let mut acc = G::Projective::zero();
buckets.truncate(0);
buckets.resize((1 << c) - 1, G::Projective::zero());
let g = g.clone();
for (s, g) in s.iter_mut().zip(g) {
let index = (s.as_ref()[0] & mask) as usize;
if index != 0 {
buckets[index - 1].add_assign_mixed(g);
}
s.shr(c as u32);
}
let mut running_sum = G::Projective::zero();
for exp in buckets.iter().rev() {
running_sum.add_assign(exp);
acc.add_assign(&running_sum);
}
windows.push(acc);
cur += c;
}
let mut acc = G::Projective::zero();
for window in windows.into_iter().rev() {
for _ in 0..c {
acc.double();
}
acc.add_assign(&window);
}
acc
}
/// Divides polynomial `a` in `x` by `x - b` with
/// no remainder.
pub fn kate_divison<'a, F: Field, I: IntoIterator<Item = &'a F>>(a: I, mut b: F) -> Vec<F>
where
I::IntoIter: DoubleEndedIterator + ExactSizeIterator,
{
b.negate();
let a = a.into_iter();
let mut q = vec![F::zero(); a.len() - 1];
let mut tmp = F::zero();
for (q, r) in q.iter_mut().rev().zip(a.rev()) {
let mut lead_coeff = *r;
lead_coeff.sub_assign(&tmp);
*q = lead_coeff;
tmp = lead_coeff;
tmp.mul_assign(&b);
}
q
}
/// Divides polynomial `a` in `x` by `x - b` with
/// no remainder using fft.
pub fn parallel_kate_divison<'a, E: Engine, I: IntoIterator<Item = &'a E::Fr>>(a: I, b: E::Fr) -> Vec<E::Fr>
where
I::IntoIter: DoubleEndedIterator + ExactSizeIterator,
{
// this implementation is only for division by `x - b` form polynomail,
// so we can manuall calculate the reciproical poly of the form `x^2/(x-b)`
// and the reminder
// x^2 /(x - b) = x + b*x/(x - b) = (x + b) + b^2/(x - b)
let reciproical = vec![b, E::Fr::one()]; // x + b
// and remainder b^2
let mut b_squared = b;
b_squared.square();
let mut b_neg = b;
b_neg.negate();
let divisor = vec![b_neg, E::Fr::one()];
let poly: Vec<E::Fr> = a.into_iter().map(|el| el.clone()).collect();
let (q, _) = kate_divison_inner::<E>(poly, divisor, reciproical, b_squared);
// assert_eq!(r.len(), 0);
q
}
fn kate_divison_inner<E: Engine>(
poly: Vec<E::Fr>,
divisor: Vec<E::Fr>,
reciproical: Vec<E::Fr>,
remainder: E::Fr
) -> (Vec<E::Fr>, Vec<E::Fr>) {
if poly.len() == 1 {
return (vec![], poly);
}
// TODO: Change generic multiplications by multiplications by degree 1 polynomial
let poly_degree = poly.len() - 1;
let mut q = multiply_polynomials::<E>(poly.clone(), reciproical.clone());
q.drain(0..2);
// recursion step
if poly_degree > 2 {
let mut rec_step = poly.clone();
mul_polynomial_by_scalar(&mut rec_step[..], remainder);
// truncate low order terms
rec_step.drain(0..2);
let (q2, _) = kate_divison_inner::<E>(rec_step, divisor.clone(), reciproical, remainder);
// length of q2 is smaller
add_polynomials(&mut q[..q2.len()], &q2[..]);
}
// although r must be zero, calculate it for now
if q.len() == 0 {
return (q, poly);
}
// r = u - v*q
let mut poly = poly;
let tmp = multiply_polynomials::<E>(divisor, q.clone());
sub_polynomials(&mut poly[..], &tmp[..]);
return (q, poly);
}
/// Convenience function to check polynomail commitment
pub fn check_polynomial_commitment<E: Engine>(
commitment: &E::G1Affine,
point: &E::Fr,
value: &E::Fr,
opening: &E::G1Affine,
max: usize,
srs: &SRS<E>
) -> bool {
// e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{−d +max}} )
if srs.d < max {
return false;
}
let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut neg_x_n_minus_d_precomp = srs.h_negative_x[srs.d - max];
neg_x_n_minus_d_precomp.negate();
let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare();
let w = opening.prepare();
let mut gv = srs.g_positive_x[0].mul(value.into_repr());
let mut z_neg = *point;
z_neg.negate();
let w_minus_z = opening.mul(z_neg.into_repr());
gv.add_assign(&w_minus_z);
let gv = gv.into_affine().prepare();
E::final_exponentiation(&E::miller_loop(&[
(&w, &alpha_x_precomp),
(&gv, &alpha_precomp),
(&commitment.prepare(), &neg_x_n_minus_d_precomp),
])).unwrap() == E::Fqk::one()
}
#[test]
fn laurent_division() {
use crate::pairing::ff::PrimeField;
use crate::pairing::bls12_381::{Fr};
let mut poly = vec![
Fr::from_str("328947234").unwrap(),
Fr::from_str("3545623451111").unwrap(),
Fr::from_str("112").unwrap(),
Fr::from_str("55555").unwrap(),
Fr::from_str("1235685").unwrap(),
];
fn eval(poly: &[Fr], point: Fr) -> Fr {
let point_inv = point.inverse().unwrap();
let mut acc = Fr::zero();
let mut tmp = Fr::one();
for p in &poly[2..] {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point);
}
let mut tmp = point_inv;
for p in poly[0..2].iter().rev() {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point_inv);
}
acc
}
let x = Fr::from_str("23").unwrap();
let z = Fr::from_str("2000").unwrap();
let p_at_x = eval(&poly, x);
let p_at_z = eval(&poly, z);
// poly = poly(X) - poly(z)
poly[2].sub_assign(&p_at_z);
let quotient_poly = kate_divison(&poly, z);
let quotient = eval("ient_poly, x);
// check that
// quotient * (x - z) = p_at_x - p_at_z
let mut lhs = x;
lhs.sub_assign(&z);
lhs.mul_assign("ient);
let mut rhs = p_at_x;
rhs.sub_assign(&p_at_z);
assert_eq!(lhs, rhs);
}
pub fn multiply_polynomials<E: Engine>(a: Vec<E::Fr>, b: Vec<E::Fr>) -> Vec<E::Fr> {
let result_len = a.len() + b.len() - 1;
use crate::worker::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
let scalars_a: Vec<Scalar<E>> = a.into_iter().map(|e| Scalar::<E>(e)).collect();
let mut domain_a = EvaluationDomain::from_coeffs_into_sized(scalars_a, result_len).unwrap();
let scalars_b: Vec<Scalar<E>> = b.into_iter().map(|e| Scalar::<E>(e)).collect();
let mut domain_b = EvaluationDomain::from_coeffs_into_sized(scalars_b, result_len).unwrap();
domain_a.fft(&worker);
domain_b.fft(&worker);
domain_a.mul_assign(&worker, &domain_b);
drop(domain_b);
domain_a.ifft(&worker);
let mut mul_result: Vec<E::Fr> = domain_a.into_coeffs().iter().map(|e| e.0).collect();
mul_result.truncate(result_len);
mul_result
}
// alternative implementation that does not require an `Evaluation domain` struct
pub fn multiply_polynomials_fft<E: Engine>(a: Vec<E::Fr>, b: Vec<E::Fr>) -> Vec<E::Fr> {
use crate::worker::Worker;
use crate::domain::{best_fft, Scalar};
use crate::group::Group;
let result_len = a.len() + b.len() - 1;
// m is a size of domain where Z polynomial does NOT vanish
// in normal domain Z is in a form of (X-1)(X-2)...(X-N)
let mut m = 1;
let mut exp = 0;
let mut omega = E::Fr::root_of_unity();
let max_degree = (1 << E::Fr::S) - 1;
if result_len > max_degree {
panic!("multiplication result degree is too large");
}
while m < result_len {
m *= 2;
exp += 1;
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp > E::Fr::S {
panic!("multiplication result degree is too large");
}
}
// If full domain is not needed - limit it,
// e.g. if (2^N)th power is not required, just double omega and get 2^(N-1)th
// Compute omega, the 2^exp primitive root of unity
for _ in exp..E::Fr::S {
omega.square();
}
let omegainv = omega.inverse().unwrap();
let minv = E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap();
let worker = Worker::new();
let mut scalars_a: Vec<Scalar<E>> = a.into_iter().map(|e| Scalar::<E>(e)).collect();
let mut scalars_b: Vec<Scalar<E>> = b.into_iter().map(|e| Scalar::<E>(e)).collect();
scalars_a.resize(m, Scalar::<E>(E::Fr::zero()));
scalars_b.resize(m, Scalar::<E>(E::Fr::zero()));
best_fft(&mut scalars_a[..], &worker, &omega, exp);
best_fft(&mut scalars_b[..], &worker, &omega, exp);
// do the convolution
worker.scope(scalars_a.len(), |scope, chunk| {
for (a, b) in scalars_a.chunks_mut(chunk).zip(scalars_b.chunks(chunk)) {
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_mul_assign(&b.0);
}
});
}
});
// no longer need it
drop(scalars_b);
best_fft(&mut scalars_a[..], &worker, &omegainv, exp);
worker.scope(scalars_a.len(), |scope, chunk| {
for v in scalars_a.chunks_mut(chunk) {
scope.spawn(move |_| {
for v in v {
v.group_mul_assign(&minv);
}
});
}
});
let mut mul_result: Vec<E::Fr> = scalars_a.into_iter().map(|e| e.0).collect();
mul_result.truncate(result_len);
mul_result
}
pub fn multiply_polynomials_serial<E: Engine>(mut a: Vec<E::Fr>, mut b: Vec<E::Fr>) -> Vec<E::Fr> {
let result_len = a.len() + b.len() - 1;
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
while m < result_len {
m *= 2;
exp += 1;
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp >= E::Fr::S {
panic!("polynomial too large")
}
}
// Compute omega, the 2^exp primitive root of unity
let mut omega = E::Fr::root_of_unity();
for _ in exp..E::Fr::S {
omega.square();
}
// Extend with zeroes
a.resize(m, E::Fr::zero());
b.resize(m, E::Fr::zero());
serial_fft::<E>(&mut a[..], &omega, exp);
serial_fft::<E>(&mut b[..], &omega, exp);
for (a, b) in a.iter_mut().zip(b.iter()) {
a.mul_assign(b);
}
serial_fft::<E>(&mut a[..], &omega.inverse().unwrap(), exp);
a.truncate(result_len);
let minv = E::Fr::from_str(&format!("{}", m))
.unwrap()
.inverse()
.unwrap();
for a in a.iter_mut() {
a.mul_assign(&minv);
}
a
}
// add polynomails in coefficient form
pub fn add_polynomials<F: Field>(a: &mut [F], b: &[F]) {
use crate::worker::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
assert_eq!(a.len(), b.len());
worker.scope(a.len(), |scope, chunk| {
for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk))
{
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.add_assign(b);
}
});
}
});
}
// subtract polynomails in coefficient form
pub fn sub_polynomials<F: Field>(a: &mut [F], b: &[F]) {
use crate::worker::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
assert_eq!(a.len(), b.len());
worker.scope(a.len(), |scope, chunk| {
for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk))
{
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.sub_assign(b);
}
});
}
});
}
// multiply coefficients of the polynomial by the scalar
pub fn mul_polynomial_by_scalar<F: Field>(a: &mut [F], b: F) {
use crate::worker::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
worker.scope(a.len(), |scope, chunk| {
for a in a.chunks_mut(chunk)
{
scope.spawn(move |_| {
for a in a.iter_mut() {
a.mul_assign(&b);
}
});
}
});
}
// elementwise add coeffs of one polynomial with coeffs of other, that are
// first multiplied by a scalar
pub fn mul_add_polynomials<F: Field>(a: &mut [F], b: &[F], c: F) {
use crate::worker::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
assert_eq!(a.len(), b.len());
worker.scope(a.len(), |scope, chunk| {
for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk))
{
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
let mut r = *b;
r.mul_assign(&c);
a.add_assign(&r);
}
});
}
});
}
fn serial_fft<E: Engine>(a: &mut [E::Fr], omega: &E::Fr, log_n: u32) {
fn bitreverse(mut n: u32, l: u32) -> u32 {
let mut r = 0;
for _ in 0..l {
r = (r << 1) | (n & 1);
n >>= 1;
}
r
}
let n = a.len() as u32;
assert_eq!(n, 1 << log_n);
for k in 0..n {
let rk = bitreverse(k, log_n);
if k < rk {
a.swap(rk as usize, k as usize);
}
}
let mut m = 1;
for _ in 0..log_n {
let w_m = omega.pow(&[(n / (2 * m)) as u64]);
let mut k = 0;
while k < n {
let mut w = E::Fr::one();
for j in 0..m {
let mut t = a[(k + j + m) as usize];
t.mul_assign(&w);
let mut tmp = a[(k + j) as usize];
tmp.sub_assign(&t);
a[(k + j + m) as usize] = tmp;
a[(k + j) as usize].add_assign(&t);
w.mul_assign(&w_m);
}
k += 2 * m;
}
m *= 2;
}
}
pub trait OptionExt<T> {
fn get(self) -> Result<T, SynthesisError>;
}
impl<T> OptionExt<T> for Option<T> {
fn get(self) -> Result<T, SynthesisError> {
match self {
Some(t) => Ok(t),
None => Err(SynthesisError::AssignmentMissing),
}
}
}
#[test]
fn test_mul() {
use rand::{self, Rand};
use crate::pairing::bls12_381::Bls12;
use crate::pairing::bls12_381::Fr;
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let b = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let serial_res = multiply_polynomials_serial::<Bls12>(a.clone(), b.clone());
let parallel_res = multiply_polynomials::<Bls12>(a, b);
assert_eq!(serial_res.len(), parallel_res.len());
assert_eq!(serial_res, parallel_res);
}
#[test]
fn test_eval_at_powers() {
use rand::{self, Rand, Rng};
use crate::pairing::bls12_381::Bls12;
use crate::pairing::bls12_381::Fr;
const SAMPLES: usize = 100000;
let rng = &mut rand::thread_rng();
let a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let x: Fr = rng.gen();
let n: u32 = rng.gen();
let mut acc = Fr::zero();
{
let mut tmp = x.pow(&[n as u64]);
for coeff in a.iter() {
let mut c = *coeff;
c.mul_assign(&tmp);
acc.add_assign(&c);
tmp.mul_assign(&x);
}
}
let first_power = x.pow(&[n as u64]);
let acc_parallel = evaluate_at_consequitive_powers(&a[..], first_power, x);
assert_eq!(acc_parallel, acc);
}
#[test]
fn test_mut_eval_at_powers() {
use rand::{self, Rand, Rng};
use crate::pairing::bls12_381::Bls12;
use crate::pairing::bls12_381::Fr;
const SAMPLES: usize = 100000;
let rng = &mut rand::thread_rng();
let mut a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut b = a.clone();
let x: Fr = rng.gen();
let n: u32 = rng.gen();
let mut acc = Fr::zero();
{
let mut tmp = x.pow(&[n as u64]);
for coeff in a.iter_mut() {
coeff.mul_assign(&tmp);
acc.add_assign(&coeff);
tmp.mul_assign(&x);
}
}
let first_power = x.pow(&[n as u64]);
let acc_parallel = mut_evaluate_at_consequitive_powers(&mut b[..], first_power, x);
assert_eq!(acc_parallel, acc);
assert!(a == b);
}
#[test]
fn test_mut_distribute_powers() {
use rand::{self, Rand, Rng};
use crate::pairing::bls12_381::Bls12;
use crate::pairing::bls12_381::Fr;
const SAMPLES: usize = 100000;
let rng = &mut rand::thread_rng();
let mut a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut b = a.clone();
let x: Fr = rng.gen();
let n: u32 = rng.gen();
{
let mut tmp = x.pow(&[n as u64]);
for coeff in a.iter_mut() {
coeff.mul_assign(&tmp);
tmp.mul_assign(&x);
}
}
let first_power = x.pow(&[n as u64]);
mut_distribute_consequitive_powers(&mut b[..], first_power, x);
assert!(a == b);
}
#[test]
fn test_trivial_parallel_kate_division() {
use crate::pairing::ff::PrimeField;
use crate::pairing::bls12_381::{Bls12, Fr};
let mut minus_one = Fr::one();
minus_one.negate();
let z = Fr::one();
// this is x^2 - 1
let poly = vec![
minus_one,
Fr::from_str("0").unwrap(),
Fr::from_str("1").unwrap(),
];
let quotient_poly = kate_divison(&poly, z);
let parallel_q_poly = parallel_kate_divison::<Bls12, _>(&poly, z);
assert_eq!(quotient_poly, parallel_q_poly);
}
#[test]
fn test_less_trivial_parallel_kate_division() {
use crate::pairing::ff::PrimeField;
use crate::pairing::bls12_381::{Bls12, Fr};
let z = Fr::one();
let mut poly = vec![
Fr::from_str("328947234").unwrap(),
Fr::from_str("3545623451111").unwrap(),
Fr::from_str("5").unwrap(),
Fr::from_str("55555").unwrap(),
Fr::from_str("1235685").unwrap(),
];
fn eval(poly: &[Fr], point: Fr) -> Fr {
let mut acc = Fr::zero();
let mut tmp = Fr::one();
for p in &poly[..] {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point);
}
acc
}
let p_at_z = eval(&poly, z);
// poly = poly(X) - poly(z)
poly[0].sub_assign(&p_at_z);
let quotient_poly = kate_divison(&poly, z);
let parallel_q_poly = parallel_kate_divison::<Bls12, _>(&poly, z);
assert_eq!(quotient_poly, parallel_q_poly);
}
#[test]
fn test_parallel_kate_division() {
use crate::pairing::ff::PrimeField;
use crate::pairing::bls12_381::{Bls12, Fr};
let mut poly = vec![
Fr::from_str("328947234").unwrap(),
Fr::from_str("3545623451111").unwrap(),
Fr::from_str("0").unwrap(),
Fr::from_str("55555").unwrap(),
Fr::from_str("1235685").unwrap(),
];
fn eval(poly: &[Fr], point: Fr) -> Fr {
let point_inv = point.inverse().unwrap();
let mut acc = Fr::zero();
let mut tmp = Fr::one();
for p in &poly[2..] {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point);
}
let mut tmp = point_inv;
for p in poly[0..2].iter().rev() {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point_inv);
}
acc
}
let z = Fr::from_str("2000").unwrap();
let p_at_z = eval(&poly, z);
// poly = poly(X) - poly(z)
poly[2].sub_assign(&p_at_z);
let quotient_poly = kate_divison(&poly, z);
let parallel_q_poly = parallel_kate_divison::<Bls12, _>(&poly, z);
assert_eq!(quotient_poly, parallel_q_poly);
}<file_sep>/src/plonk/commitments/transcript/poseidon_transcript.rs
use poseidon_hash::{PoseidonEngine, PoseidonHashParams, StatefulSponge};
use crate::pairing::ff::{PrimeField, PrimeFieldRepr};
use crate::byteorder::{ByteOrder, BigEndian};
use super::*;
#[derive(Clone)]
pub struct PoseidonTranscript<'a, E: PoseidonEngine> {
state: StatefulSponge<'a, E>,
}
impl<'a, E: PoseidonEngine> PoseidonTranscript<'a, E> {
pub fn from_params(params: &'a E::Params) -> Self {
let stateful = StatefulSponge::new(params);
Self {
state: stateful
}
}
}
impl<'a, E: PoseidonEngine> Prng<E::Fr> for PoseidonTranscript<'a, E> {
type Input = E::Fr;
type InitializationParameters = &'a E::Params;
fn new() -> Self {
unimplemented!()
}
fn new_from_params(params: Self::InitializationParameters) -> Self {
let stateful = StatefulSponge::new(params);
Self {
state: stateful
}
}
fn commit_input(&mut self, input: &Self::Input) {
self.state.absorb_single_value(*input);
}
fn get_challenge(&mut self) -> E::Fr {
let value = self.state.squeeze_out_single();
value
}
}<file_sep>/src/sonic/helped/generator.rs
use rand::Rng;
use std::sync::Arc;
use crate::pairing::{
Engine,
Wnaf,
CurveProjective,
CurveAffine
};
use crate::pairing::ff::{
PrimeField,
Field
};
use super::{
Parameters,
VerifyingKey
};
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use crate::domain::{
Scalar
};
use crate::worker::{
Worker
};
use std::marker::PhantomData;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::srs::SRS;
use crate::sonic::cs::LinearCombination as SonicLinearCombination;
use crate::sonic::cs::Circuit as SonicCircuit;
use crate::sonic::cs::ConstraintSystem as SonicConstraintSystem;
use crate::sonic::cs::Variable as SonicVariable;
use crate::sonic::cs::Coeff;
use crate::sonic::sonic::{AdaptorCircuit};
use super::parameters::NUM_BLINDINGS;
use crate::sonic::sonic::NonassigningSynthesizer;
use crate::sonic::sonic::PermutationSynthesizer;
use crate::sonic::sonic::{Basic, Preprocess};
use crate::verbose_flag;
/// Generates a random common reference string for
/// a circuit.
pub fn generate_random_parameters<E, C, R>(
circuit: C,
rng: &mut R
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>, R: Rng
{
let alpha = rng.gen();
let x = rng.gen();
generate_parameters::<E, C>(
circuit,
alpha,
x
)
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into
#[derive(Clone, Debug)]
pub struct CircuitParameters<E: Engine> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_constraints: usize,
pub k_map: Vec<usize>,
pub n: usize,
pub q: usize,
_marker: PhantomData<E>
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into
struct GeneratorAssembly<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> {
cs: &'a mut CS,
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
_marker: PhantomData<E>
}
impl<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> crate::ConstraintSystem<E>
for GeneratorAssembly<'a, E, CS>
{
type Root = Self;
// this is an important change
fn one() -> crate::Variable {
crate::Variable::new_unchecked(crate::Index::Input(1))
}
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.num_aux += 1;
let var = self.cs.alloc(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F,
) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.num_inputs += 1;
let var = self.cs.alloc_input(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
{
fn convert<E: Engine>(lc: crate::LinearCombination<E>) -> SonicLinearCombination<E> {
let mut ret = SonicLinearCombination::zero();
for &(v, coeff) in lc.as_ref().iter() {
let var = match v.get_unchecked() {
crate::Index::Input(i) => SonicVariable::A(i),
crate::Index::Aux(i) => SonicVariable::B(i),
};
ret = ret + (Coeff::Full(coeff), var);
}
ret
}
fn eval<E: Engine, CS: SonicConstraintSystem<E>>(
lc: &SonicLinearCombination<E>,
cs: &CS,
) -> Option<E::Fr> {
let mut ret = E::Fr::zero();
for &(v, coeff) in lc.as_ref().iter() {
let mut tmp = match cs.get_value(v) {
Ok(tmp) => tmp,
Err(_) => return None,
};
coeff.multiply(&mut tmp);
ret.add_assign(&tmp);
}
Some(ret)
}
self.num_constraints += 1;
let a_lc = convert(a(crate::LinearCombination::zero()));
let a_value = eval(&a_lc, &*self.cs);
let b_lc = convert(b(crate::LinearCombination::zero()));
let b_value = eval(&b_lc, &*self.cs);
let c_lc = convert(c(crate::LinearCombination::zero()));
let c_value = eval(&c_lc, &*self.cs);
let (a, b, c) = self
.cs
.multiply(|| Ok((a_value.unwrap(), b_value.unwrap(), c_value.unwrap())))
.unwrap();
self.cs.enforce_zero(a_lc - a);
self.cs.enforce_zero(b_lc - b);
self.cs.enforce_zero(c_lc - c);
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Get circuit information such as number of input, variables,
/// constraints, and the corresponding SONIC parameters
/// k_map, n, q
pub fn get_circuit_parameters<E, C>(
circuit: C,
) -> Result<CircuitParameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let mut preprocess = Preprocess::new();
let (num_inputs, num_aux, num_constraints) = {
let mut cs: NonassigningSynthesizer<E, &'_ mut Preprocess<E>> = NonassigningSynthesizer::new(&mut preprocess);
let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <NonassigningSynthesizer<E, &'_ mut Preprocess<E>> as SonicConstraintSystem<E>>::ONE) {
(SonicVariable::A(1), SonicVariable::A(1)) => {},
_ => return Err(SynthesisError::UnconstrainedVariable)
}
let mut assembly = GeneratorAssembly::<'_, E, _> {
cs: &mut cs,
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
_marker: PhantomData
};
circuit.synthesize(&mut assembly)?;
(assembly.num_inputs, assembly.num_aux, assembly.num_constraints)
};
Ok(CircuitParameters {
num_inputs: num_inputs,
num_aux: num_aux,
num_constraints: num_constraints,
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q,
_marker: PhantomData
})
}
/// Get circuit information such as number of input, variables,
/// constraints, and the corresponding SONIC parameters
/// k_map, n, q
pub fn get_circuit_parameters_for_succinct_sonic<E, C>(
circuit: C,
) -> Result<CircuitParameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let mut preprocess = Preprocess::new();
let (num_inputs, num_aux, num_constraints) = {
let mut cs: PermutationSynthesizer<E, &'_ mut Preprocess<E>> = PermutationSynthesizer::new(&mut preprocess);
let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <PermutationSynthesizer<E, &'_ mut Preprocess<E>> as SonicConstraintSystem<E>>::ONE) {
(SonicVariable::A(1), SonicVariable::A(1)) => {},
_ => return Err(SynthesisError::UnconstrainedVariable)
}
let mut assembly = GeneratorAssembly::<'_, E, _> {
cs: &mut cs,
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
_marker: PhantomData
};
circuit.synthesize(&mut assembly)?;
(assembly.num_inputs, assembly.num_aux, assembly.num_constraints)
};
Ok(CircuitParameters {
num_inputs: num_inputs,
num_aux: num_aux,
num_constraints: num_constraints,
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q,
_marker: PhantomData
})
}
pub fn generate_parameters<E, C>(
circuit: C,
alpha: E::Fr,
x: E::Fr
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let circuit_parameters = get_circuit_parameters::<E, C>(circuit)?;
let min_d = circuit_parameters.n * 4 + 2*NUM_BLINDINGS;
let srs = generate_srs(alpha, x, min_d)?;
let parameters = generate_parameters_on_srs_and_information::<E>(&srs, circuit_parameters)?;
Ok(parameters)
}
pub fn generate_parameters_on_srs<E, C>(
circuit: C,
srs: &SRS<E>,
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let circuit_parameters = get_circuit_parameters::<E, C>(circuit)?;
let parameters = generate_parameters_on_srs_and_information(&srs, circuit_parameters)?;
Ok(parameters)
}
pub fn generate_parameters_on_srs_and_information<E: Engine>(
srs: &SRS<E>,
information: CircuitParameters<E>
) -> Result<Parameters<E>, SynthesisError>
{
assert!(srs.d >= information.n * 4 + 2*NUM_BLINDINGS);
let min_d = information.n * 4 + 2*NUM_BLINDINGS;
let trimmed_srs: SRS<E> = SRS {
d: min_d,
g_negative_x: srs.g_negative_x[0..min_d+1].to_vec(),
g_positive_x: srs.g_positive_x[0..min_d+1].to_vec().clone(),
h_negative_x: srs.h_negative_x[0..min_d+1].to_vec(),
h_positive_x: srs.h_positive_x[0..min_d+1].to_vec(),
g_negative_x_alpha: srs.g_negative_x_alpha[0..min_d].to_vec(),
g_positive_x_alpha: srs.g_positive_x_alpha[0..min_d].to_vec(),
h_negative_x_alpha: srs.h_negative_x_alpha[0..min_d+1].to_vec(),
h_positive_x_alpha: srs.h_positive_x_alpha[0..min_d+1].to_vec(),
};
let vk = VerifyingKey {
alpha_x: trimmed_srs.h_positive_x_alpha[1],
alpha: trimmed_srs.h_positive_x_alpha[0],
neg_h: {
let mut tmp = trimmed_srs.h_negative_x[0];
tmp.negate();
tmp
},
neg_x_n_minus_d: {
let mut tmp = trimmed_srs.h_negative_x[trimmed_srs.d - information.n];
tmp.negate();
tmp
},
k_map: information.k_map,
n: information.n,
q: information.q
};
Ok(Parameters{
vk: vk,
srs: trimmed_srs
})
}
pub fn generate_srs<E: Engine>(
alpha: E::Fr,
x: E::Fr,
d: usize
) -> Result<SRS<E>, SynthesisError> {
let verbose = verbose_flag();
let g1 = E::G1Affine::one().into_projective();
let g2 = E::G2Affine::one().into_projective();
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, 4*d);
// Compute G2 window table
let mut g2_wnaf = Wnaf::new();
let g2_wnaf = g2_wnaf.base(g2, 4*d);
let x_inverse = x.inverse().ok_or(SynthesisError::UnexpectedIdentity)?;
let worker = Worker::new();
let mut x_powers_positive = vec![Scalar::<E>(E::Fr::zero()); d];
let mut x_powers_negative = vec![Scalar::<E>(E::Fr::zero()); d];
{
// Compute powers of tau
if verbose {eprintln!("computing powers of x...")};
let start = std::time::Instant::now();
{
worker.scope(d, |scope, chunk| {
for (i, x_powers) in x_powers_positive.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = x.pow(&[(i*chunk + 1) as u64]);
for p in x_powers {
p.0 = current_power;
current_power.mul_assign(&x);
}
});
}
});
}
{
worker.scope(d, |scope, chunk| {
for (i, x_powers) in x_powers_negative.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = x_inverse.pow(&[(i*chunk + 1) as u64]);
for p in x_powers {
p.0 = current_power;
current_power.mul_assign(&x_inverse);
}
});
}
});
}
if verbose {eprintln!("powers of x done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
}
// we will later add zero powers to g_x, h_x, h_x_alpha
let mut g_negative_x = vec![E::G1::one(); d];
let mut g_positive_x = vec![E::G1::one(); d];
let mut h_negative_x = vec![E::G2::one(); d];
let mut h_positive_x = vec![E::G2::one(); d];
let mut g_negative_x_alpha = vec![E::G1::one(); d];
let mut g_positive_x_alpha = vec![E::G1::one(); d];
let mut h_negative_x_alpha = vec![E::G2::one(); d];
let mut h_positive_x_alpha = vec![E::G2::one(); d];
fn eval<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
powers_of_x: &[Scalar<E>],
g_x: &mut [E::G1],
g_x_alpha: &mut [E::G1],
h_x: &mut [E::G2],
h_x_alpha: &mut [E::G2],
// Trapdoors
alpha: &E::Fr,
// Worker
worker: &Worker
)
{
// Sanity check
assert_eq!(g_x.len(), powers_of_x.len());
assert_eq!(g_x.len(), g_x_alpha.len());
assert_eq!(g_x.len(), h_x.len());
assert_eq!(g_x.len(), h_x_alpha.len());
// Evaluate polynomials in multiple threads
worker.scope(g_x.len(), |scope, chunk| {
for ((((x, g_x), g_x_alpha), h_x), h_x_alpha) in powers_of_x.chunks(chunk)
.zip(g_x.chunks_mut(chunk))
.zip(g_x_alpha.chunks_mut(chunk))
.zip(h_x.chunks_mut(chunk))
.zip(h_x_alpha.chunks_mut(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move |_| {
for ((((x, g_x), g_x_alpha), h_x), h_x_alpha) in x.iter()
.zip(g_x.iter_mut())
.zip(g_x_alpha.iter_mut())
.zip(h_x.iter_mut())
.zip(h_x_alpha.iter_mut())
{
let mut x_alpha = x.0;
x_alpha.mul_assign(&alpha);
*g_x = g1_wnaf.scalar(x.0.into_repr());
*h_x = g2_wnaf.scalar(x.0.into_repr());
*g_x_alpha = g1_wnaf.scalar(x_alpha.into_repr());
*h_x_alpha = g2_wnaf.scalar(x_alpha.into_repr());
}
// Batch normalize
E::G1::batch_normalization(g_x);
E::G1::batch_normalization(g_x_alpha);
E::G2::batch_normalization(h_x);
E::G2::batch_normalization(h_x_alpha);
});
};
});
}
let start = std::time::Instant::now();
// Evaluate for positive powers.
eval(
&g1_wnaf,
&g2_wnaf,
&x_powers_positive,
&mut g_positive_x[..],
&mut g_positive_x_alpha[..],
&mut h_positive_x[..],
&mut h_positive_x_alpha[..],
&alpha,
&worker
);
// Evaluate for negative powers
eval(
&g1_wnaf,
&g2_wnaf,
&x_powers_negative,
&mut g_negative_x[..],
&mut g_negative_x_alpha[..],
&mut h_negative_x[..],
&mut h_negative_x_alpha[..],
&alpha,
&worker
);
if verbose {eprintln!("evaluating points done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
let g1 = g1.into_affine();
let g2 = g2.into_affine();
let h_alpha = g2.mul(alpha.into_repr()).into_affine();
let g_negative_x = {
let mut tmp = vec![g1];
tmp.extend(g_negative_x.into_iter().map(|e| e.into_affine()));
tmp
};
let g_positive_x = {
let mut tmp = vec![g1];
tmp.extend(g_positive_x.into_iter().map(|e| e.into_affine()));
tmp
};
let h_negative_x = {
let mut tmp = vec![g2];
tmp.extend(h_negative_x.into_iter().map(|e| e.into_affine()));
tmp
};
let h_positive_x = {
let mut tmp = vec![g2];
tmp.extend(h_positive_x.into_iter().map(|e| e.into_affine()));
tmp
};
let g_negative_x_alpha = g_negative_x_alpha.into_iter().map(|e| e.into_affine()).collect();
let g_positive_x_alpha = g_positive_x_alpha.into_iter().map(|e| e.into_affine()).collect();
let h_negative_x_alpha = {
let mut tmp = vec![h_alpha];
tmp.extend(h_negative_x_alpha.into_iter().map(|e| e.into_affine()));
tmp
};
let h_positive_x_alpha = {
let mut tmp = vec![h_alpha];
tmp.extend(h_positive_x_alpha.into_iter().map(|e| e.into_affine()));
tmp
};
Ok(SRS {
d: d,
g_negative_x: g_negative_x,
g_positive_x: g_positive_x,
h_negative_x: h_negative_x,
h_positive_x: h_positive_x,
g_negative_x_alpha: g_negative_x_alpha,
g_positive_x_alpha: g_positive_x_alpha,
h_negative_x_alpha: h_negative_x_alpha,
h_positive_x_alpha: h_positive_x_alpha,
}
)
}<file_sep>/src/marlin/prover.rs
use crate::log::Stopwatch;
use rand::Rng;
use std::sync::Arc;
use futures::Future;
use crate::pairing::{
Engine,
CurveProjective,
CurveAffine
};
use crate::pairing::ff::{
PrimeField,
Field
};
use super::{
IndexedSetup
};
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use crate::worker::{
Worker
};
use crate::plonk::polynomials::*;
use crate::plonk::domains::*;
use super::generator::*;
use super::Proof;
use crate::kate_commitment::*;
fn eval<E: Engine>(
lc: LinearCombination<E>,
input_assignment: &[E::Fr],
aux_assignment: &[E::Fr]
) -> E::Fr
{
let mut acc = E::Fr::zero();
for (index, coeff) in lc.0.into_iter() {
let mut tmp;
match index {
Variable(Index::Input(i)) => {
tmp = input_assignment[i];
},
Variable(Index::Aux(i)) => {
tmp = aux_assignment[i];
}
}
if coeff == E::Fr::one() {
acc.add_assign(&tmp);
} else {
tmp.mul_assign(&coeff);
acc.add_assign(&tmp);
}
}
acc
}
// This is a proving assignment with densities precalculated
pub struct PreparedProver<E: Engine>{
assignment: ProvingAssignment<E>,
}
#[derive(Clone)]
struct ProvingAssignment<E: Engine> {
// Evaluations of A, B, C polynomials
a: Vec<E::Fr>,
b: Vec<E::Fr>,
c: Vec<E::Fr>,
// Assignments of variables
input_assignment: Vec<E::Fr>,
aux_assignment: Vec<E::Fr>
}
pub fn prepare_prover<E, C>(
circuit: C,
) -> Result<PreparedProver<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let mut prover = ProvingAssignment {
a: vec![],
b: vec![],
c: vec![],
input_assignment: vec![],
aux_assignment: vec![]
};
prover.alloc_input(|| "CS::ONE", || Ok(E::Fr::one()))?;
circuit.synthesize(&mut prover)?;
// for i in 0..prover.input_assignment.len() {
// prover.enforce(|| "",
// |lc| lc + Variable(Index::Input(i)),
// |lc| lc,
// |lc| lc,
// );
// }
let prepared = PreparedProver {
assignment: prover
};
return Ok(prepared)
}
pub struct IndexPrecomputations<E: Engine> {
// values on 2K size coset for the last check
pub a_row_over_2k_coset: Polynomial<E::Fr, Values>,
pub b_row_over_2k_coset: Polynomial<E::Fr, Values>,
pub c_row_over_2k_coset: Polynomial<E::Fr, Values>,
pub a_col_over_2k_coset: Polynomial<E::Fr, Values>,
pub b_col_over_2k_coset: Polynomial<E::Fr, Values>,
pub c_col_over_2k_coset: Polynomial<E::Fr, Values>,
pub a_val_over_2k_coset: Polynomial<E::Fr, Values>,
pub b_val_over_2k_coset: Polynomial<E::Fr, Values>,
pub c_val_over_2k_coset: Polynomial<E::Fr, Values>,
// values on K size for sumchecks
pub a_row_over_k: Polynomial<E::Fr, Values>,
pub b_row_over_k: Polynomial<E::Fr, Values>,
pub c_row_over_k: Polynomial<E::Fr, Values>,
pub a_col_over_k: Polynomial<E::Fr, Values>,
pub b_col_over_k: Polynomial<E::Fr, Values>,
pub c_col_over_k: Polynomial<E::Fr, Values>,
pub a_val_over_k: Polynomial<E::Fr, Values>,
pub b_val_over_k: Polynomial<E::Fr, Values>,
pub c_val_over_k: Polynomial<E::Fr, Values>,
// r(x, x) on H
pub r_x_x_values_over_h: Polynomial<E::Fr, Values>,
}
impl<E: Engine> IndexPrecomputations<E> {
pub fn new(params: &IndexedSetup<E>, worker: &Worker) -> Result<Self, SynthesisError> {
let lde_factor_from_h_to_k = params.domain_k_size / params.domain_h_size;
assert!(lde_factor_from_h_to_k.is_power_of_two());
let domain_h = Domain::<E::Fr>::new_for_size(params.domain_h_size as u64)?;
let r_x_x_values_over_h = eval_unnormalized_bivariate_lagrange_poly_over_diaginal(domain_h.size, &domain_h, &worker);
let r_x_x_values_over_h = Polynomial::from_values(r_x_x_values_over_h)?;
let a_row_over_2k_coset: Polynomial<E::Fr, Values> = params.a_row_poly.clone().coset_lde(&worker, 2)?;
let b_row_over_2k_coset: Polynomial<E::Fr, Values> = params.b_row_poly.clone().coset_lde(&worker, 2)?;
let c_row_over_2k_coset: Polynomial<E::Fr, Values> = params.c_row_poly.clone().coset_lde(&worker, 2)?;
let a_col_over_2k_coset: Polynomial<E::Fr, Values> = params.a_col_poly.clone().coset_lde(&worker, 2)?;
let b_col_over_2k_coset: Polynomial<E::Fr, Values> = params.b_col_poly.clone().coset_lde(&worker, 2)?;
let c_col_over_2k_coset: Polynomial<E::Fr, Values> = params.c_col_poly.clone().coset_lde(&worker, 2)?;
let a_val_over_2k_coset: Polynomial<E::Fr, Values> = params.a_matrix_poly.clone().coset_lde(&worker, 2)?;
let b_val_over_2k_coset: Polynomial<E::Fr, Values> = params.b_matrix_poly.clone().coset_lde(&worker, 2)?;
let c_val_over_2k_coset: Polynomial<E::Fr, Values> = params.c_matrix_poly.clone().coset_lde(&worker, 2)?;
let a_row_over_k: Polynomial<E::Fr, Values> = params.a_row_poly.clone().fft(&worker);
let b_row_over_k: Polynomial<E::Fr, Values> = params.b_row_poly.clone().fft(&worker);
let c_row_over_k: Polynomial<E::Fr, Values> = params.c_row_poly.clone().fft(&worker);
let a_col_over_k: Polynomial<E::Fr, Values> = params.a_col_poly.clone().fft(&worker);
let b_col_over_k: Polynomial<E::Fr, Values> = params.b_col_poly.clone().fft(&worker);
let c_col_over_k: Polynomial<E::Fr, Values> = params.c_col_poly.clone().fft(&worker);
let a_val_over_k: Polynomial<E::Fr, Values> = params.a_matrix_poly.clone().fft(&worker);
let b_val_over_k: Polynomial<E::Fr, Values> = params.b_matrix_poly.clone().fft(&worker);
let c_val_over_k: Polynomial<E::Fr, Values> = params.c_matrix_poly.clone().fft(&worker);
let new = IndexPrecomputations::<E> {
a_row_over_2k_coset,
b_row_over_2k_coset,
c_row_over_2k_coset,
a_col_over_2k_coset,
b_col_over_2k_coset,
c_col_over_2k_coset,
a_val_over_2k_coset,
b_val_over_2k_coset,
c_val_over_2k_coset,
a_row_over_k,
b_row_over_k,
c_row_over_k,
a_col_over_k,
b_col_over_k,
c_col_over_k,
a_val_over_k,
b_val_over_k,
c_val_over_k,
r_x_x_values_over_h,
};
Ok(new)
}
}
pub struct PrecomputedBases<E: Engine> {
pub crs_values_on_h: Crs::<E, CrsForLagrangeForm>,
pub crs_values_on_k: Crs::<E, CrsForLagrangeForm>,
// pub crs_values_on_h_coset: Crs::<E, CrsForLagrangeFormOnCoset>
}
impl<E: Engine> PrecomputedBases<E> {
pub fn new_42_for_index(params: &IndexedSetup<E>, worker: &Worker) -> Self {
println!("Making CRS");
let monomial = Crs::<E, CrsForMonomialForm>::crs_42(params.domain_k_size, &worker);
println!("Done making power series");
// TODO: use subslicing here
let crs_values_on_h = Crs::<E, CrsForLagrangeForm>::from_powers(&monomial, params.domain_h_size, &worker);
println!("Done making lagrange bases on H");
let crs_values_on_k = Crs::<E, CrsForLagrangeForm>::from_powers(&monomial, params.domain_k_size, &worker);
// let crs_values_on_h_coset = Crs::<E, CrsForLagrangeFormOnCoset>::from_powers(&monomial, params.domain_h_size, &worker);
println!("Done making CRS");
Self {
crs_values_on_h,
crs_values_on_k,
// crs_values_on_h_coset,
}
}
}
impl<E:Engine> PreparedProver<E> {
pub fn create_proof(
self,
params: &IndexedSetup<E>,
crs: &PrecomputedBases<E>,
precomputations: &IndexPrecomputations<E>,
) -> Result<(), SynthesisError>
{
// this prover performs the following:
// - commit to the witness vector `w` and to the results of application of the A/B/C matrixes
// to it as `z_a`, `z_b`, `z_c` (we may ommit commitment to `z_c` and just claim it's value)
// - commit to the quotient (z_a * z_b - z_c)/vanishing_on_H
// - perform a first sumcheck using random challenge `alpha` and linear combination challenges
// `eta_a`, `eta_b`, `eta_c`. Those prove that over the domain H (matrix size):
// sum of the r(alpha, x) * z_m(x) - r_m(alpha, x) * w(x) is equal to zero where M \in {A, B, C}
// and r_m(alpha, x) = \sum_{k \in H} r(alpha, k) * M(k, x)
// at this step we claim commit to eta_a * r_a(alpha, x) + eta_b * r_b(alpha, x) + eta_c * r_c(alpha, x)
// without individual contributions r_a(alpha, x), r_b(alpha, x), r_c(alpha, x)
// - perform the second sumcheck to prove that \sum_{k \in H} r(alpha, k) * M(k, x) is evaluated correctly
// in a batched way: define q_2(x) = r(alpha, x) \sum_{m \in M} * M(x, beta_1)
// if we avaluate previous batched commitment to eta_a * r_a(alpha, x) + eta_b * r_b(alpha, x) + eta_c * r_c(alpha, x)
// at the point beta_1 then it must be equal to \sum_{H} q_2(x)
// - perform a third sumcheck to claim that q_2(x) was evaluated correctly:
// we later check that q_2(beta_2) = r(alpha, beta_2) * \sum_{m \in M} * M(beta_2, beta_1)
// for this purpose we need to prove correctness of M(beta_2, beta_1) relatively to the initial indexing
// for this we define individual polynomials over domain K f_m = M(x, beta_1, beta_2) = Func(indexing, beta_1, beta_2) such that
// after summing out over x we'll get the value M(beta_2, beta_1). To achieve this we perform a sumcheck
let worker = Worker::new();
let domain_h = Domain::<E::Fr>::new_for_size(params.domain_h_size as u64)?;
let domain_k = Domain::<E::Fr>::new_for_size(params.domain_k_size as u64)?;
let prover = self.assignment;
println!("Start prover work");
let stopwatch = Stopwatch::new();
let a_values_on_h = Polynomial::from_values(prover.a)?;
let b_values_on_h = Polynomial::from_values(prover.b)?;
let c_values_on_h = Polynomial::from_values(prover.c)?;
println!("Committing z_a, z_b, z_c");
let a_commitment = commit_using_values(&a_values_on_h, &crs.crs_values_on_h, &worker)?;
let b_commitment = commit_using_values(&b_values_on_h, &crs.crs_values_on_h, &worker)?;
let c_commitment = commit_using_values(&c_values_on_h, &crs.crs_values_on_h, &worker)?;
let h_poly_values_on_h = {
elog_verbose!("H size is {}", a_values_on_h.size());
let a_poly = a_values_on_h.clone().ifft(&worker);
let b_poly = b_values_on_h.clone().ifft(&worker);
let c_poly = c_values_on_h.clone().ifft(&worker);
let mut a = a_poly.coset_fft(&worker);
let b = b_poly.coset_fft(&worker);
let c = c_poly.coset_fft(&worker);
a.mul_assign(&worker, &b);
drop(b);
a.sub_assign(&worker, &c);
drop(c);
let z_in_coset = evaluate_vanishing_for_size(&E::Fr::multiplicative_generator(), domain_h.size);
let z_in_coset_inv = z_in_coset.inverse().ok_or(SynthesisError::DivisionByZero)?;
a.scale(&worker, z_in_coset_inv); // divide
let h = a.icoset_fft(&worker);
let h_values = h.fft(&worker);
h_values
};
println!("Committing h");
let h_commitment = commit_using_values(&h_poly_values_on_h, &crs.crs_values_on_h, &worker)?;
// TODO: later split this up: use witness poly for proving, but commit to the one contatining
// zeroes instead of inputs
let mut witness_values_on_h = Vec::with_capacity(a_values_on_h.size());
witness_values_on_h.extend(prover.input_assignment);
witness_values_on_h.extend(prover.aux_assignment);
witness_values_on_h.resize(a_values_on_h.size(), E::Fr::zero());
let witness_values_on_h = Polynomial::from_values(witness_values_on_h)?;
println!("Committing w");
let witness_commitment = commit_using_values(&witness_values_on_h, &crs.crs_values_on_h, &worker)?;
// now start the lincheck
// define q1(x) = r(alpha, x) * (\ sum_{M} eta_m * z_m(x)) - (\sum_{M} eta_m * r_m(alpha, x)) w(x)
// formally this polynomial is of degree 2H
// then we define a polynomial sum_q1(x) that is a grand sum of q1(x) (easy to calculate from values)
// then we need to show that (q1(x) + sum_q1(x) - sum_q1(x*omega)) / (vanishing_H) == 0
let alpha = E::Fr::from_str("5").unwrap();
let eta_a = E::Fr::one();
let eta_b = E::Fr::zero();
let eta_c = E::Fr::zero();
// let eta_a = E::Fr::from_str("7").unwrap();
// let eta_b = E::Fr::from_str("11").unwrap();
// let eta_c = E::Fr::from_str("42").unwrap();
// We have not committed to witness values and values of application of A/B/C matrixes on witness
// also no masking for now
let mut repr = <E::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = domain_h.size;
let size_h_as_fe = E::Fr::from_repr(repr).expect("must convert domain size into field element");
let mut repr = <E::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = domain_k.size;
let size_k_as_fe = E::Fr::from_repr(repr).expect("must convert domain size into field element");
// By this moment we should have oracles to all the witness polynomials, as well as Af, Bf, Cf
// first sumcheck is for the polynomial
// Q_1(X) = r(alpha, X) * F_1(X) + r_m(alpha, X) * F_2(X)
// where r_m(alpha, X) = \sum_{k \in K} r(X, k) M (k, Y)
// F_1(X) is result of applying one of the matrixes (A/B/C) on the vector of witnesses
// F_2(X) is a witness itself
// this is reduced for the following sumcheck (neglecting the ZK)
// \sum_{H} r(alpha, X) ( \sum_{M} eta_M * z_M(X) ) - witness(X) * \sum_{M} ( \eta_M r_M(alpha, X)) )
// where z_M(X) = (M * witness)(X)
let r_alpha_x_values = eval_unnormalized_bivariate_lagrange_poly_over_different_inputs(
alpha,
domain_h.size,
&domain_h,
&worker
);
let r_alpha_x_values_over_h = Polynomial::from_values(r_alpha_x_values)?;
// now do the same for A/B/C matrixes
// R(X, X)
// let r_x_x_over_h = eval_unnormalized_bivariate_lagrange_poly_over_diaginal(domain_h.size, &domain_h, &worker);
// let r_x_x_values_over_h = Polynomial::from_values(r_x_x_over_h)?;
// now compute r_M(alpha, X) = \sum_{H} r(alpha, X) M(X, Y) (sum is over X \in H)
let lde_factor_from_h_to_k = domain_k.size / domain_h.size;
assert!(lde_factor_from_h_to_k.is_power_of_two());
// let a_matrix_at_k = params.a_matrix_poly.clone().fft(&worker);
// let b_matrix_at_k = params.b_matrix_poly.clone().fft(&worker);
// let c_matrix_at_k = params.c_matrix_poly.clone().fft(&worker);
// let domain_h_elements = materialize_domain_elements(&domain_h, &worker);
fn construct_r_m_from_matrix<F: PrimeField>(
matrix_evals_at_k: &Polynomial<F, Values>,
r_x_x_on_h: &Polynomial<F, Values>,
r_alpha_x_on_h: &Polynomial<F, Values>,
row_indexes: &Vec<usize>,
col_indexes: &Vec<usize>,
domain_h: &Domain<F>,
worker: &Worker
) -> Result<Polynomial<F, Values>, SynthesisError> {
let mut result = vec![F::zero(); domain_h.size as usize];
let to_spawn = worker.get_num_spawned_threads(col_indexes.len());
let mut subresults = vec![result.clone(); to_spawn];
// M(X, Y) for X = omega^row_index and Y = omega^col_index is equal to the
// R1CS matrix M value at (row_index, col_index)
worker.scope(col_indexes.len(), |scope, chunk_size| {
for (chunk_id, ((subres, row_chunk), col_chunk)) in subresults.chunks_mut(1)
.zip(row_indexes.chunks(chunk_size))
.zip(col_indexes.chunks(chunk_size))
.enumerate() {
scope.spawn(move |_| {
let start = chunk_id * chunk_size;
let write_to_subres = &mut subres[0];
// first we go over non-degenerate indexes
for (i, (&row_index, &col_index)) in row_chunk.iter().zip(col_chunk.iter()).enumerate() {
let k_domain_index = start + i;
let r_x_x_at_h_row = &r_x_x_on_h.as_ref()[row_index];
let r_x_x_at_h_col = &r_x_x_on_h.as_ref()[col_index];
let r_alpha_x_at_h = &r_alpha_x_on_h.as_ref()[row_index];
let val = &matrix_evals_at_k.as_ref()[k_domain_index];
let mut result = *r_x_x_at_h_col;
result.mul_assign(val);
result.mul_assign(r_x_x_at_h_row);
// println!("Matrix element contribution into row {}, column {} = {}", row_index, col_index, result);
result.mul_assign(r_alpha_x_at_h);
write_to_subres[col_index].add_assign(&result);
}
});
}
});
let subresults_ref = &subresults;
worker.scope(result.len(), |scope, chunk_size| {
for (chunk_id, chunk) in result.chunks_mut(chunk_size).enumerate() {
scope.spawn(move |_| {
let start = chunk_id * chunk_size;
for (j, el) in chunk.iter_mut().enumerate() {
let idx = start + j;
for s in subresults_ref.iter() {
if !s[idx].is_zero() {
el.add_assign(&s[idx]);
}
}
}
});
}
});
Polynomial::from_values(result)
}
let r_a_alpha_x = construct_r_m_from_matrix(
&precomputations.a_val_over_k,
&precomputations.r_x_x_values_over_h,
&r_alpha_x_values_over_h,
¶ms.a_row_indexes,
¶ms.a_col_indexes,
&domain_h,
&worker
)?;
let r_b_alpha_x = construct_r_m_from_matrix(
&precomputations.b_val_over_k,
&precomputations.r_x_x_values_over_h,
&r_alpha_x_values_over_h,
¶ms.b_row_indexes,
¶ms.b_col_indexes,
&domain_h,
&worker
)?;
let r_c_alpha_x = construct_r_m_from_matrix(
&precomputations.c_val_over_k,
&precomputations.r_x_x_values_over_h,
&r_alpha_x_values_over_h,
¶ms.c_row_indexes,
¶ms.c_col_indexes,
&domain_h,
&worker
)?;
// sum_{m} eta_m * z_m
let mut r_m_sum = a_values_on_h.clone();
r_m_sum.scale(&worker, eta_a);
r_m_sum.add_assign_scaled(&worker, &b_values_on_h, &eta_b);
r_m_sum.add_assign_scaled(&worker, &c_values_on_h, &eta_c);
// sum_{m} eta_m * M(alpha, x)
let mut r_m_alpha_x_sum = r_a_alpha_x.clone();
r_m_alpha_x_sum.scale(&worker, eta_a);
r_m_alpha_x_sum.add_assign_scaled(&worker, &r_b_alpha_x, &eta_b);
r_m_alpha_x_sum.add_assign_scaled(&worker, &r_c_alpha_x, &eta_c);
// r(alpha, X) * \sum (M*witness)(x) * eta_m
let beta_1 = E::Fr::from_str("137").unwrap();
let sum_a_b_c_at_beta_1 = r_m_sum.barycentric_evaluate_at(&worker, beta_1)?;
let sum_m_at_beta_1 = r_m_alpha_x_sum.barycentric_evaluate_at(&worker, beta_1)?;
let mut proper_t_0_values_on_2h = r_m_sum.clone().ifft(&worker).lde(&worker, 2).unwrap();
let tmp = r_alpha_x_values_over_h.clone().ifft(&worker).lde(&worker, 2).unwrap();
proper_t_0_values_on_2h.mul_assign(&worker, &tmp);
drop(tmp);
let mut t_0 = r_m_sum;
t_0.mul_assign(&worker, &r_alpha_x_values_over_h);
// \sum_{H} r_m(alpha, X) * eta_m * witness(x)
let mut proper_t_1_values_on_2h = r_m_alpha_x_sum.clone().ifft(&worker).lde(&worker, 2).unwrap();
let tmp = witness_values_on_h.clone().ifft(&worker).lde(&worker, 2).unwrap();
proper_t_1_values_on_2h.mul_assign(&worker, &tmp);
drop(tmp);
let mut t_1 = r_m_alpha_x_sum;
t_1.mul_assign(&worker, &witness_values_on_h);
// let r_m_sum_sum_over_h = t_0.calculate_sum(&worker)?;
// let r_m_alpha_x_sum_over_h = t_1.calculate_sum(&worker)?;
// assert!(r_m_sum_sum_over_h == r_m_alpha_x_sum_over_h);
// q1(x) = r(alpha, x) * (\ sum_{M} eta_m * z_m(x)) - (\sum_{M} eta_m * r_m(alpha, x)) w(x)
let mut q_1_poly_values_over_h = t_0;
q_1_poly_values_over_h.sub_assign(&worker, &t_1);
let mut proper_q_1_values_on_2h = proper_t_0_values_on_2h;
proper_q_1_values_on_2h.sub_assign(&worker, &proper_t_1_values_on_2h);
fn calculate_grand_sum_over_subdomain_assuming_natural_ordering_with_normalization<F: PrimeField>(
values: &Polynomial<F, Values>,
worker: &Worker
) -> Result<(F, Polynomial<F, Values>), SynthesisError> {
let mut result = vec![F::zero(); values.size() + 2];
let num_threads = worker.get_num_spawned_threads(values.size());
let mut subsums_main = vec![F::zero(); num_threads as usize];
let mut subsums_sub = vec![F::zero(); num_threads as usize];
worker.scope(values.as_ref().len(), |scope, chunk| {
for (chunk_idx, (((grand_sum, elements), s_main), s_sub)) in result[2..].chunks_mut(chunk)
.zip(values.as_ref().chunks(chunk))
.zip(subsums_main.chunks_mut(1))
.zip(subsums_sub.chunks_mut(1))
.enumerate() {
scope.spawn(move |_| {
let start_idx = chunk_idx * chunk;
for (i, (g, el)) in grand_sum.iter_mut()
.zip(elements.iter())
.enumerate() {
let this_idx = start_idx + i;
if this_idx & 1 == 0 {
s_main[0].add_assign(&el);
*g = s_main[0];
} else {
s_sub[0].add_assign(&el);
*g = s_sub[0];
}
}
});
}
});
// subsums are [a+b+c, d+e+f, x+y+z]
let mut tmp_main = F::zero();
for s in subsums_main.iter_mut() {
tmp_main.add_assign(&s);
*s = tmp_main;
}
let mut tmp_sub = F::zero();
for s in subsums_sub.iter_mut() {
tmp_sub.add_assign(&s);
*s = tmp_sub;
}
// sum over the full domain is the last element
let domain_sum_main = subsums_main.pop().expect("has at least one value");
let domain_sum_sub = subsums_sub.pop().expect("has at least one value");
let subdomain_size_as_fe = F::from_str(&format!("{}", values.size()/2)).expect("must be a valid element");
let one_over_size = subdomain_size_as_fe.inverse().ok_or(SynthesisError::DivisionByZero)?;
let mut normalization_on_main = domain_sum_main;
normalization_on_main.mul_assign(&one_over_size);
let mut normalization_on_sub = domain_sum_sub;
normalization_on_sub.mul_assign(&one_over_size);
let chunk_len = worker.get_chunk_size(values.as_ref().len());
assert_eq!(result.len() - chunk_len - 2, chunk_len * subsums_main.len());
worker.scope(0, |scope, _| {
for (chunk_idx, ((g, s_main), s_sub)) in result[(chunk_len+2)..].chunks_mut(chunk_len)
.zip(subsums_main.chunks(1))
.zip(subsums_sub.chunks(1))
.enumerate() {
scope.spawn(move |_| {
let start_idx = (chunk_idx + 1) * chunk_len;
let c_main = s_main[0];
let c_sub = s_sub[0];
for (i, g) in g.iter_mut().enumerate() {
let this_idx = start_idx + i;
if this_idx & 1 == 0 {
g.add_assign(&c_main);
} else {
g.add_assign(&c_sub);
}
}
});
}
});
let alt_total_sum_sub = result.pop().expect("must pop the last element");
let alt_total_sum_main = result.pop().expect("must pop the last element");
assert_eq!(alt_total_sum_main, domain_sum_main, "sum on main domain must match");
assert_eq!(alt_total_sum_sub, domain_sum_sub, "sum on subdomain must match");
println!("Main sum = {}", domain_sum_main);
println!("Sub sum = {}", domain_sum_sub);
Ok((domain_sum_main, Polynomial::from_values_unpadded(result)?))
}
let (proper_q_1_sum_over_2h, proper_q_1_grand_sum_poly_values_over_2h) =
calculate_grand_sum_over_subdomain_assuming_natural_ordering_with_normalization(
&proper_q_1_values_on_2h,
&worker
)?;
// let rotated_proper_q_1_grand_sum_poly_values_over_2h = proper_q_1_grand_sum_poly_values_over_2h.clone().rotate(2)?;
let mut quotient = proper_q_1_values_on_2h.clone().ifft(&worker).coset_fft(&worker);
let proper_q_1_grand_sum_poly_values_over_2h_coeffs = proper_q_1_grand_sum_poly_values_over_2h.clone().ifft(&worker);
let mut proper_q_1_grand_sum_poly_values_over_2h_coeffs_shifted = proper_q_1_grand_sum_poly_values_over_2h_coeffs.clone();
proper_q_1_grand_sum_poly_values_over_2h_coeffs_shifted.distribute_powers(&worker, domain_h.generator);
quotient.add_assign(&worker, &proper_q_1_grand_sum_poly_values_over_2h_coeffs.coset_fft(&worker));
quotient.sub_assign(&worker, &proper_q_1_grand_sum_poly_values_over_2h_coeffs_shifted.coset_fft(&worker));
let domain_2h = Domain::new_for_size((params.domain_h_size*2) as u64)?;
let mut vanishing_of_degree_h_on_2h = evaluate_vanishing_polynomial_of_degree_on_domain(
domain_h.size,
&E::Fr::multiplicative_generator(),
&domain_2h,
&worker
)?;
vanishing_of_degree_h_on_2h.batch_inversion(&worker)?;
quotient.mul_assign(&worker, &vanishing_of_degree_h_on_2h);
drop(vanishing_of_degree_h_on_2h);
let q_1_quotient_on_h = quotient.icoset_fft(&worker).fft(&worker);
// let (proper_q_1_sum_over_2h, proper_q_1_grand_sum_poly_values_over_2h) = proper_q_1_values_on_2h.calculate_grand_sum(&worker)?;
println!("Proper sum = {}", proper_q_1_sum_over_2h);
assert!(proper_q_1_sum_over_2h.is_zero());
let proper_q_1_at_beta_1 = proper_q_1_values_on_2h.barycentric_evaluate_at(&worker, beta_1)?;
println!("Proper q_1 at beta 1 = {}", proper_q_1_at_beta_1);
// assert!(proper_q_1_sum_over_2h.is_zero());
let q_1_at_beta = q_1_poly_values_over_h.barycentric_evaluate_at(&worker, beta_1)?;
println!("Hacky q_1 at beta 1 = {}", q_1_at_beta);
// let (q1_even, q_1_odd) = proper_q_1_values_on_2h.split_into_even_and_odd_assuming_natural_ordering(
// &worker,
// &E::Fr::one()
// )?;
// let (q1_even_sum, q_1_odd_sum) = proper_q_1_grand_sum_poly_values_over_2h.split_into_even_and_odd_assuming_natural_ordering(
// &worker,
// &E::Fr::one()
// )?;
println!("Committing Q1 and it's sumcheck poly");
// this is formally correct polynomial as it coincides with a sum of q_1 on H everywhere
let (q_1_sum_over_h, q_1_grand_sum_poly_values_over_h) = q_1_poly_values_over_h.calculate_grand_sum(&worker)?;
assert!(q_1_sum_over_h.is_zero());
// let q_1_commitment = commit_using_values(&q_1_poly_values_over_h, &crs.crs_values_on_h, &worker)?;
// let q_1_sum_commitment = commit_using_values(&q_1_grand_sum_poly_values_over_h, &crs.crs_values_on_h, &worker)?;
// Now we've completed the first part of the lincheck by incorporating alpha into M(X, Y)
// {
// let z = E::Fr::from_str("10000").unwrap();
// let mut z_omega = z;
// z_omega.mul_assign(&domain_h.generator);
// let grand_sum_at_z = q_1_grand_sum_poly_coeffs.evaluate_at(&worker, z);
// let grand_sum_at_z_omega = q_1_grand_sum_poly_coeffs.evaluate_at(&worker, z_omega);
// let el_at_z = q_1_poly_coeffs.evaluate_at(&worker, z);
// let vanishing_at_z = evaluate_vanishing_for_size(&z, domain_h.size);
// let quotient_at_z = q_1_sumcheck_quotient_over_h_coeffs.evaluate_at(&worker, z);
// let mut lhs = grand_sum_at_z;
// lhs.sub_assign(&grand_sum_at_z_omega);
// lhs.add_assign(&el_at_z);
// let mut rhs = vanishing_at_z;
// rhs.mul_assign("ient_at_z);
// assert_eq!(lhs, rhs, "q_1 sumcheck must pass");
// }
// we would later need to evaluate q_1(z) and q_1_sum(z) and q_1_sum(z*omega)
let beta_1 = E::Fr::from_str("137").unwrap();
// claim values of z_a, z_b, z_c and h at beta_1
let a_at_beta_1 = a_values_on_h.barycentric_evaluate_at(&worker, beta_1)?;
let b_at_beta_1 = b_values_on_h.barycentric_evaluate_at(&worker, beta_1)?;
let c_at_beta_1 = c_values_on_h.barycentric_evaluate_at(&worker, beta_1)?;
let h_at_beta_1 = h_poly_values_on_h.barycentric_evaluate_at(&worker, beta_1)?;
let vanishing_at_beta_1 = evaluate_vanishing_for_size(&beta_1, domain_h.size);
{
let mut lhs = a_at_beta_1;
lhs.mul_assign(&b_at_beta_1);
lhs.sub_assign(&c_at_beta_1);
let mut rhs = h_at_beta_1;
rhs.mul_assign(&vanishing_at_beta_1);
assert!(lhs == rhs, "ab - c == h * z_H");
}
// now we need to make q_2 = r(alpha, X) M(X, beta)
let r_beta_1_x_values = eval_unnormalized_bivariate_lagrange_poly_over_different_inputs(
beta_1,
domain_h.size,
&domain_h,
&worker
);
let r_beta_1_x_values_over_h = Polynomial::from_values(r_beta_1_x_values)?;
fn materialize_m_x_beta<F: PrimeField>(
matrix_evals_at_k: &Polynomial<F, Values>,
r_x_x_on_h: &Polynomial<F, Values>,
r_beta_x_on_h: &Polynomial<F, Values>,
row_indexes: &Vec<usize>,
col_indexes: &Vec<usize>,
domain_h: &Domain<F>,
worker: &Worker
) -> Result<Polynomial<F, Values>, SynthesisError> {
let mut result = vec![F::zero(); domain_h.size as usize];
let to_spawn = worker.get_num_spawned_threads(col_indexes.len());
let mut subresults = vec![result.clone(); to_spawn];
// M(X, Y) for X = omega^row_index and Y = omega^col_index is equal to the
// R1CS matrix M value at (row_index, col_index)
worker.scope(col_indexes.len(), |scope, chunk_size| {
for (chunk_id, ((subres, row_chunk), col_chunk)) in subresults.chunks_mut(1)
.zip(row_indexes.chunks(chunk_size))
.zip(col_indexes.chunks(chunk_size))
.enumerate() {
scope.spawn(move |_| {
let start = chunk_id * chunk_size;
let write_to_subres = &mut subres[0];
// first we go over non-degenerate indexes
for (i, (&row_index, &col_index)) in row_chunk.iter().zip(col_chunk.iter()).enumerate() {
let k_domain_index = start + i;
let r_x_x_at_h_row = &r_x_x_on_h.as_ref()[row_index];
let r_beta_x_at_h_col = &r_beta_x_on_h.as_ref()[col_index];
let val = &matrix_evals_at_k.as_ref()[k_domain_index];
let mut result = *r_x_x_at_h_row;
result.mul_assign(val);
// println!("Matrix element contribution into row {}, column {} = {}", row_index, col_index, result);
result.mul_assign(r_beta_x_at_h_col);
write_to_subres[col_index].add_assign(&result);
}
});
}
});
let subresults_ref = &subresults;
worker.scope(result.len(), |scope, chunk_size| {
for (chunk_id, chunk) in result.chunks_mut(chunk_size).enumerate() {
scope.spawn(move |_| {
let start = chunk_id * chunk_size;
for (j, el) in chunk.iter_mut().enumerate() {
let idx = start + j;
for s in subresults_ref.iter() {
if !s[idx].is_zero() {
el.add_assign(&s[idx]);
}
}
}
});
}
});
Polynomial::from_values(result)
}
let r_a_x_beta_on_h = materialize_m_x_beta(
&precomputations.a_val_over_k,
&precomputations.r_x_x_values_over_h,
&r_beta_1_x_values_over_h,
¶ms.a_row_indexes,
¶ms.a_col_indexes,
&domain_h,
&worker
)?;
let r_b_x_beta_on_h = materialize_m_x_beta(
&precomputations.b_val_over_k,
&precomputations.r_x_x_values_over_h,
&r_beta_1_x_values_over_h,
¶ms.a_row_indexes,
¶ms.a_col_indexes,
&domain_h,
&worker
)?;
let r_c_x_beta_on_h = materialize_m_x_beta(
&precomputations.c_val_over_k,
&precomputations.r_x_x_values_over_h,
&r_beta_1_x_values_over_h,
¶ms.a_row_indexes,
¶ms.a_col_indexes,
&domain_h,
&worker
)?;
let beta_2 = E::Fr::from_str("456").unwrap();
let mut r_m_beta_sum = r_a_x_beta_on_h;
r_m_beta_sum.scale(&worker, eta_a);
r_m_beta_sum.add_assign_scaled(&worker, &r_b_x_beta_on_h, &eta_b);
r_m_beta_sum.add_assign_scaled(&worker, &r_c_x_beta_on_h, &eta_c);
drop(r_b_x_beta_on_h);
drop(r_c_x_beta_on_h);
let r_m_x_beta_1_at_beta_2 = r_m_beta_sum.barycentric_evaluate_at(&worker, beta_2)?;
println!("M(beta_2, beta_1) = {}", r_m_x_beta_1_at_beta_2);
let mut q_2_poly_values_on_h = r_m_beta_sum;
q_2_poly_values_on_h.mul_assign(&worker, &r_alpha_x_values_over_h);
let r_alpha_beta_2 = r_alpha_x_values_over_h.barycentric_evaluate_at(&worker, beta_2)?;
println!("r(alpha, beta_2) = {}", r_alpha_beta_2);
let q_2_sum_value = q_2_poly_values_on_h.calculate_sum(&worker)?;
let sigma_2 = q_2_sum_value;
let one_over_h_size = size_h_as_fe.inverse().ok_or(SynthesisError::DivisionByZero)?;
let mut tmp = sigma_2;
tmp.mul_assign(&one_over_h_size);
q_2_poly_values_on_h.sub_constant(&worker, &tmp);
let (tmp, q_2_grand_sum_over_h) = q_2_poly_values_on_h.calculate_grand_sum(&worker)?;
assert!(tmp.is_zero());
println!("Committing Q2 and it's sumcheck poly");
// let q_2_commitment = commit_using_values(&q_2_poly_values_on_h, &crs.crs_values_on_h, &worker)?;
let q_2_sum_commitment = commit_using_values(&q_2_grand_sum_over_h, &crs.crs_values_on_h, &worker)?;
// TODO: check if it's better to reduce it to the single poly of degree 6K then to
// three independent ones of degree 2k
let beta_2 = E::Fr::from_str("456").unwrap();
// now calculate a polynomial f_3 over K using a definition
fn evaluate_bivariate_lagrange_over_row_or_col_poly<F: PrimeField>(
x: F,
vanishing_poly_size: u64,
row_or_col_evaluations_on_domain: &Polynomial<F, Values>,
evaluate_on_domain: &Domain<F>,
worker: &Worker
) -> Result<Polynomial<F, Values>, SynthesisError> {
assert!(evaluate_on_domain.size as usize == row_or_col_evaluations_on_domain.size());
let vanishing_at_x = evaluate_vanishing_for_size(&x, vanishing_poly_size);
let inv_vanishing_at_x = vanishing_at_x.inverse().ok_or(SynthesisError::DivisionByZero)?;
let mut inverses = row_or_col_evaluations_on_domain.clone();
inverses.map(&worker, |element| { // (x - col(k))/van(x)
let mut tmp = x;
tmp.sub_assign(&*element);
tmp.mul_assign(&inv_vanishing_at_x);
*element = tmp;
});
inverses.batch_inversion(&worker).expect("must inverse as there are no zeroes");
Ok(inverses)
}
let a_row_poly = params.a_row_poly.clone();
let b_row_poly = params.b_row_poly.clone();
let c_row_poly = params.c_row_poly.clone();
assert!(a_row_poly.size() == domain_k.size as usize);
let a_col_poly = params.a_col_poly.clone();
let b_col_poly = params.b_col_poly.clone();
let c_col_poly = params.c_col_poly.clone();
assert!(a_col_poly.size() == domain_k.size as usize);
let a_row_values_at_k = a_row_poly.fft(&worker);
let b_row_values_at_k = b_row_poly.fft(&worker);
let c_row_values_at_k = c_row_poly.fft(&worker);
let a_col_values_at_k = a_col_poly.fft(&worker);
let b_col_values_at_k = b_col_poly.fft(&worker);
let c_col_values_at_k = c_col_poly.fft(&worker);
let r_beta_1_col_a_values_over_k = evaluate_bivariate_lagrange_over_row_or_col_poly(
beta_1,
domain_h.size,
&a_col_values_at_k,
&domain_k,
&worker
)?;
let r_beta_1_col_b_values_over_k = evaluate_bivariate_lagrange_over_row_or_col_poly(
beta_1,
domain_h.size,
&b_col_values_at_k,
&domain_k,
&worker
)?;
let r_beta_1_col_c_values_over_k = evaluate_bivariate_lagrange_over_row_or_col_poly(
beta_1,
domain_h.size,
&c_col_values_at_k,
&domain_k,
&worker
)?;
let r_beta_2_row_a_values_over_k = evaluate_bivariate_lagrange_over_row_or_col_poly(
beta_2,
domain_h.size,
&a_row_values_at_k,
&domain_k,
&worker
)?;
let r_beta_2_row_b_values_over_k = evaluate_bivariate_lagrange_over_row_or_col_poly(
beta_2,
domain_h.size,
&b_row_values_at_k,
&domain_k,
&worker
)?;
let r_beta_2_row_c_values_over_k = evaluate_bivariate_lagrange_over_row_or_col_poly(
beta_2,
domain_h.size,
&c_row_values_at_k,
&domain_k,
&worker
)?;
// do multiplication over K
let mut f_3_a_values_over_k_by_eta_a = precomputations.a_val_over_k.clone();
f_3_a_values_over_k_by_eta_a.scale(&worker, eta_a);
f_3_a_values_over_k_by_eta_a.mul_assign(&worker, &r_beta_2_row_a_values_over_k);
f_3_a_values_over_k_by_eta_a.mul_assign(&worker, &r_beta_1_col_a_values_over_k);
let mut f_3_b_values_over_k_by_eta_b = precomputations.b_val_over_k.clone();
f_3_b_values_over_k_by_eta_b.scale(&worker, eta_b);
f_3_b_values_over_k_by_eta_b.mul_assign(&worker, &r_beta_2_row_b_values_over_k);
f_3_b_values_over_k_by_eta_b.mul_assign(&worker, &r_beta_1_col_b_values_over_k);
let mut f_3_c_values_over_k_by_eta_c = precomputations.c_val_over_k.clone();
f_3_c_values_over_k_by_eta_c.scale(&worker, eta_c);
f_3_c_values_over_k_by_eta_c.mul_assign(&worker, &r_beta_2_row_c_values_over_k);
f_3_c_values_over_k_by_eta_c.mul_assign(&worker, &r_beta_1_col_c_values_over_k);
// now we need to prove the following two statements
// - f_3 sums to sigma_3 over K
// - f_3 is calculated correctly
// first is simple, we did it many times
let q_3_a_by_eta_a_sum_value = f_3_a_values_over_k_by_eta_a.calculate_sum(&worker)?;
let q_3_b_by_eta_b_sum_value = f_3_b_values_over_k_by_eta_b.calculate_sum(&worker)?;
let q_3_c_by_eta_c_sum_value = f_3_c_values_over_k_by_eta_c.calculate_sum(&worker)?;
let q_3_a_by_eta_a_poly_coeffs = f_3_a_values_over_k_by_eta_a.clone().ifft(&worker);
let q_3_b_by_eta_b_poly_coeffs = f_3_b_values_over_k_by_eta_b.clone().ifft(&worker);
let q_3_c_by_eta_c_poly_coeffs = f_3_c_values_over_k_by_eta_c.clone().ifft(&worker);
// those are M(beta_2, beta_1)
let sigma_3_a = q_3_a_by_eta_a_sum_value;
let sigma_3_b = q_3_b_by_eta_b_sum_value;
let sigma_3_c = q_3_c_by_eta_c_sum_value;
let one_over_k = size_k_as_fe.inverse().ok_or(SynthesisError::DivisionByZero)?;
let mut tmp_a = one_over_k;
tmp_a.mul_assign(&sigma_3_a);
let mut tmp_b = one_over_k;
tmp_b.mul_assign(&sigma_3_b);
let mut tmp_c = one_over_k;
tmp_c.mul_assign(&sigma_3_c);
assert!(q_3_a_by_eta_a_poly_coeffs.as_ref()[0] == tmp_a);
assert!(q_3_b_by_eta_b_poly_coeffs.as_ref()[0] == tmp_b);
assert!(q_3_c_by_eta_c_poly_coeffs.as_ref()[0] == tmp_c);
f_3_a_values_over_k_by_eta_a.sub_constant(&worker, &tmp_a);
f_3_b_values_over_k_by_eta_b.sub_constant(&worker, &tmp_b);
f_3_c_values_over_k_by_eta_c.sub_constant(&worker, &tmp_c);
// these are sums of f_3_m(x) - sigma_3_m / |K|
let (t_a, q_3_a_by_eta_a_grand_sum_over_k) = f_3_a_values_over_k_by_eta_a.calculate_grand_sum(&worker)?;
let (t_b, q_3_b_by_eta_b_grand_sum_over_k) = f_3_b_values_over_k_by_eta_b.calculate_grand_sum(&worker)?;
let (t_c, q_3_c_by_eta_c_grand_sum_over_k) = f_3_c_values_over_k_by_eta_c.calculate_grand_sum(&worker)?;
assert!(t_a.is_zero());
assert!(t_b.is_zero());
assert!(t_c.is_zero());
println!("Committing Q3_A and it's sumcheck poly");
let q_3_a_by_eta_a_commitment = commit_using_values(&f_3_a_values_over_k_by_eta_a, &crs.crs_values_on_k, &worker)?;
let q_3_a_by_eta_a_sum_commitment = commit_using_values(&q_3_a_by_eta_a_grand_sum_over_k, &crs.crs_values_on_k, &worker)?;
println!("Committing Q3_B and it's sumcheck poly");
let q_3_b_by_eta_b_commitment = commit_using_values(&f_3_b_values_over_k_by_eta_b, &crs.crs_values_on_k, &worker)?;
let q_3_b_by_eta_b_sum_commitment = commit_using_values(&q_3_b_by_eta_b_grand_sum_over_k, &crs.crs_values_on_k, &worker)?;
println!("Committing Q3_C and it's sumcheck poly");
let q_3_c_by_eta_c_commitment = commit_using_values(&f_3_c_values_over_k_by_eta_c, &crs.crs_values_on_k, &worker)?;
let q_3_c_by_eta_c_sum_commitment = commit_using_values(&q_3_c_by_eta_c_grand_sum_over_k, &crs.crs_values_on_k, &worker)?;
// add the sum back to calculate for correspondance with vals
let lde_factor_for_q_3_check_over_k: usize = 2;
// this is f_3_a in the coset of size 2K
let q_3_a_by_eta_a_values_over_2k_coset = q_3_a_by_eta_a_poly_coeffs.clone().coset_lde(&worker, lde_factor_for_q_3_check_over_k)?;
let q_3_b_by_eta_b_values_over_2k_coset = q_3_b_by_eta_b_poly_coeffs.clone().coset_lde(&worker, lde_factor_for_q_3_check_over_k)?;
let q_3_c_by_eta_c_values_over_2k_coset = q_3_c_by_eta_c_poly_coeffs.clone().coset_lde(&worker, lde_factor_for_q_3_check_over_k)?;
let rational_check_linearization_challenge = E::Fr::from_str("1337").unwrap();
// now proof that f_3 is a correct derivative of vals
let vanishing_at_beta_2 = evaluate_vanishing_for_size(&beta_2, domain_h.size);
let mut vanishing_on_beta_1_by_vanishing_on_beta_2 = vanishing_at_beta_1;
vanishing_on_beta_1_by_vanishing_on_beta_2.mul_assign(&vanishing_at_beta_2);
let lde_factor_vals = (params.domain_k_size as usize) * lde_factor_for_q_3_check_over_k / params.a_matrix_poly.size();
assert!(lde_factor_vals.is_power_of_two());
// let a_matrixes_values_over_2k_coset = params.a_matrix_poly.clone().coset_lde(&worker, lde_factor_vals)?;
// let b_matrixes_values_over_2k_coset = params.b_matrix_poly.clone().coset_lde(&worker, lde_factor_vals)?;
// let c_matrixes_values_over_2k_coset = params.c_matrix_poly.clone().coset_lde(&worker, lde_factor_vals)?;
let lde_factor_row_col = (params.domain_k_size as usize) * lde_factor_for_q_3_check_over_k / params.a_row_poly.size();
assert!(lde_factor_row_col.is_power_of_two());
// let mut a_row_poly = params.a_row_poly.clone();
// a_row_poly.as_mut()[0].sub_assign(&beta_2);
// let mut b_row_poly = params.b_row_poly.clone();
// b_row_poly.as_mut()[0].sub_assign(&beta_2);
// let mut c_row_poly = params.c_row_poly.clone();
// c_row_poly.as_mut()[0].sub_assign(&beta_2);
// let a_row_minus_beta_2_over_2k_coset = a_row_poly.coset_lde(&worker, lde_factor_row_col)?;
// let b_row_minus_beta_2_over_2k_coset = b_row_poly.coset_lde(&worker, lde_factor_row_col)?;
// let c_row_minus_beta_2_over_2k_coset = c_row_poly.coset_lde(&worker, lde_factor_row_col)?;
// let mut a_col_poly = params.a_col_poly.clone();
// a_col_poly.as_mut()[0].sub_assign(&beta_1);
// let mut b_col_poly = params.b_col_poly.clone();
// b_col_poly.as_mut()[0].sub_assign(&beta_1);
// let mut c_col_poly = params.c_col_poly.clone();
// c_col_poly.as_mut()[0].sub_assign(&beta_1);
// let a_col_minus_beta_1_over_2k_coset = a_col_poly.coset_lde(&worker, lde_factor_row_col)?;
// let b_col_minus_beta_1_over_2k_coset = b_col_poly.coset_lde(&worker, lde_factor_row_col)?;
// let c_col_minus_beta_1_over_2k_coset = c_col_poly.coset_lde(&worker, lde_factor_row_col)?;
let mut a_row_minus_beta_2_over_2k_coset = precomputations.a_row_over_2k_coset.clone();
a_row_minus_beta_2_over_2k_coset.sub_constant(&worker, &beta_2);
let mut b_row_minus_beta_2_over_2k_coset = precomputations.b_row_over_2k_coset.clone();
b_row_minus_beta_2_over_2k_coset.sub_constant(&worker, &beta_2);
let mut c_row_minus_beta_2_over_2k_coset = precomputations.c_row_over_2k_coset.clone();
c_row_minus_beta_2_over_2k_coset.sub_constant(&worker, &beta_2);
let mut a_col_minus_beta_1_over_2k_coset = precomputations.a_col_over_2k_coset.clone();
a_col_minus_beta_1_over_2k_coset.sub_constant(&worker, &beta_1);
let mut b_col_minus_beta_1_over_2k_coset = precomputations.b_col_over_2k_coset.clone();
b_col_minus_beta_1_over_2k_coset.sub_constant(&worker, &beta_1);
let mut c_col_minus_beta_1_over_2k_coset = precomputations.c_col_over_2k_coset.clone();
c_col_minus_beta_1_over_2k_coset.sub_constant(&worker, &beta_1);
// for each of the metrixes A/B/C you need to make sure that
// van(beta_1) * van(beta_2) * val(x) - (row(x) - beta_2)(col(x) - beta(1))*f_3_m(x) == 0 at K
// we also aggregate it in a form
// van(beta_1) * van(beta_2) * (eta_a * val_a(x) + eta_b * val_b(x) + eta_c * val_c(x)) -
// - eta_a * (row_a(x) - beta_2)(col_a(x) - beta(1))*f_3_a(x) - eta_b * ...
// let f_3_values_over_2k_coset = q_3_poly_coeffs.clone().coset_lde(&worker, lde_factor_vals)?;
let mut linearization_challenge = E::Fr::one();
let mut val_a_total_coeffs = eta_a;
val_a_total_coeffs.mul_assign(&vanishing_on_beta_1_by_vanishing_on_beta_2);
// val_a_total_coeffs.mul_assign(&linearization_challenge);
linearization_challenge.mul_assign(&rational_check_linearization_challenge);
let mut val_b_total_coeffs = eta_b;
val_b_total_coeffs.mul_assign(&vanishing_on_beta_1_by_vanishing_on_beta_2);
val_b_total_coeffs.mul_assign(&linearization_challenge);
linearization_challenge.mul_assign(&rational_check_linearization_challenge);
let mut val_c_total_coeffs = eta_c;
val_c_total_coeffs.mul_assign(&vanishing_on_beta_1_by_vanishing_on_beta_2);
val_c_total_coeffs.mul_assign(&linearization_challenge);
// eta_a * vanishing(beta_1) * vanishing(beta_2) * linearization_challenge prefactor over val_a(k)
let mut f_3_well_formedness_poly_values_over_2k_coset = precomputations.a_val_over_2k_coset.clone();
f_3_well_formedness_poly_values_over_2k_coset.scale(&worker, val_a_total_coeffs);
f_3_well_formedness_poly_values_over_2k_coset.add_assign_scaled(&worker, &precomputations.b_val_over_2k_coset, &val_b_total_coeffs);
f_3_well_formedness_poly_values_over_2k_coset.add_assign_scaled(&worker, &precomputations.c_val_over_2k_coset, &val_c_total_coeffs);
let mut linearization_challenge = E::Fr::one();
// now compute a RHS
// this contains eta_M
let mut tmp = q_3_a_by_eta_a_values_over_2k_coset; // into 2*K size
// tmp.scale(&worker, linearization_challenge);
tmp.mul_assign(&worker, &a_row_minus_beta_2_over_2k_coset);
tmp.mul_assign(&worker, &a_col_minus_beta_1_over_2k_coset);
f_3_well_formedness_poly_values_over_2k_coset.sub_assign(&worker, &tmp);
drop(tmp);
linearization_challenge.mul_assign(&rational_check_linearization_challenge);
let mut tmp = q_3_b_by_eta_b_values_over_2k_coset; // into 2*K size
tmp.scale(&worker, linearization_challenge);
tmp.mul_assign(&worker, &b_row_minus_beta_2_over_2k_coset);
tmp.mul_assign(&worker, &b_col_minus_beta_1_over_2k_coset);
f_3_well_formedness_poly_values_over_2k_coset.sub_assign(&worker, &tmp);
drop(tmp);
linearization_challenge.mul_assign(&rational_check_linearization_challenge);
let mut tmp = q_3_c_by_eta_c_values_over_2k_coset; // into 2*K size
tmp.scale(&worker, linearization_challenge);
tmp.mul_assign(&worker, &c_row_minus_beta_2_over_2k_coset);
tmp.mul_assign(&worker, &c_col_minus_beta_1_over_2k_coset);
f_3_well_formedness_poly_values_over_2k_coset.sub_assign(&worker, &tmp);
drop(tmp);
// let domain_2k = Domain::new_for_size(domain_k.size * (lde_factor_for_q_3_check_over_k as u64))?;
fn evaluate_vanishing_polynomial_of_degree_on_domain<F: PrimeField>(
vanishing_degree: u64,
coset_factor: &F,
domain: &Domain<F>,
worker: &Worker
) -> Result<Polynomial<F, Values>, SynthesisError> {
let domain_generator = domain.generator;
let coset_factor = coset_factor.pow(&[vanishing_degree]);
let domain_generator_in_vanishing_power = domain_generator.pow(&[vanishing_degree]);
let mut minus_one = F::one();
minus_one.negate();
let mut result = vec![minus_one; domain.size as usize];
worker.scope(result.len(), |scope, chunk_size| {
for (chunk_id, chunk) in result.chunks_mut(chunk_size).enumerate() {
scope.spawn(move |_| {
let start = chunk_id * chunk_size;
let mut pow = domain_generator_in_vanishing_power.pow(&[start as u64]);
pow.mul_assign(&coset_factor);
for el in chunk.iter_mut() {
el.add_assign(&pow);
pow.mul_assign(&domain_generator_in_vanishing_power);
}
});
}
});
Polynomial::from_values(result)
}
// let mut vanishing_of_degree_k_on_2k = evaluate_vanishing_polynomial_of_degree_on_domain(
// domain_k.size,
// &E::Fr::multiplicative_generator(),
// &domain_2k,
// &worker
// )?;
// vanishing_of_degree_k_on_2k.batch_inversion(&worker)?;
// f_3_well_formedness_poly_values_over_2k_coset.mul_assign(&worker, &vanishing_of_degree_k_on_2k);
// drop(vanishing_of_degree_k_on_2k);
// We can compute faster like this if domain is of size 2k
// we divide by the polynomial that is vanishing on k, but not on 2k
// on half of the element it's equal to the following (inversed):
let vanishing_in_coset_over_k = evaluate_vanishing_for_size(&E::Fr::multiplicative_generator(), domain_k.size);
let vanishing_in_coset_over_k = vanishing_in_coset_over_k.inverse().ok_or(SynthesisError::DivisionByZero)?;
// for other elements x^n - 1 = (generator*omega)^n - 1 = - generator^n - 1 cause omega^2n == 1 on a large domain
let mut vanishing_in_coset_over_k_shifted = E::Fr::multiplicative_generator().pow(&[domain_k.size]);
vanishing_in_coset_over_k_shifted.negate();
vanishing_in_coset_over_k_shifted.sub_assign(&E::Fr::one());
let vanishing_in_coset_over_k_shifted = vanishing_in_coset_over_k_shifted.inverse().ok_or(SynthesisError::DivisionByZero)?;
worker.scope(f_3_well_formedness_poly_values_over_2k_coset.size(), |scope, chunk_size| {
for (chunk_id, chunk) in f_3_well_formedness_poly_values_over_2k_coset.as_mut().chunks_mut(chunk_size).enumerate() {
scope.spawn(move |_| {
let start = chunk_id * chunk_size;
for (j, el) in chunk.iter_mut().enumerate() {
let idx = start + j;
if idx & 1 == 0 {
el.mul_assign(&vanishing_in_coset_over_k);
} else {
el.mul_assign(&vanishing_in_coset_over_k_shifted);
}
}
});
}
});
let beta_3 = E::Fr::from_str("12345678890").unwrap();
let f_3_well_formedness_baryc_at_beta_3 = f_3_well_formedness_poly_values_over_2k_coset.barycentric_over_coset_evaluate_at(
&worker,
beta_3,
&E::Fr::multiplicative_generator()
)?;
let (f_3_even_values_on_k, f_3_odd_values_on_k) = f_3_well_formedness_poly_values_over_2k_coset.split_into_even_and_odd_assuming_natural_ordering(
&worker,
&E::Fr::multiplicative_generator()
)?;
// TODO: commit to the linear combination using some other linearization challenge
let f_3_even_commitment = commit_using_values(&f_3_even_values_on_k, &crs.crs_values_on_k, &worker)?;
let f_3_odd_commitment = commit_using_values(&f_3_odd_values_on_k, &crs.crs_values_on_k, &worker)?;
elog_verbose!("{} seconds for all the commitments", stopwatch.elapsed());
let mut beta_3_squared = beta_3;
beta_3_squared.square();
let coset_offset_inv = E::Fr::multiplicative_generator().inverse().ok_or(SynthesisError::DivisionByZero)?;
let mut beta_3_squared_by_coset_factor_squared = beta_3_squared;
beta_3_squared_by_coset_factor_squared.mul_assign(&coset_offset_inv);
beta_3_squared_by_coset_factor_squared.mul_assign(&coset_offset_inv);
let f_3_even_eval = f_3_even_values_on_k.barycentric_evaluate_at(&worker, beta_3_squared_by_coset_factor_squared)?;
let f_3_odd_eval = f_3_odd_values_on_k.barycentric_evaluate_at(&worker, beta_3_squared_by_coset_factor_squared)?;
let mut lhs = f_3_odd_eval;
lhs.mul_assign(&beta_3);
lhs.add_assign(&f_3_even_eval);
assert!(lhs == f_3_well_formedness_baryc_at_beta_3);
// now perform the opening
// open polynomials on domain K at beta3^2 / g^2 where g is a coset generator
// q_3_a_by_eta_a_poly_coeffs.as_mut()[0] = E::Fr::zero();
// q_3_b_by_eta_b_poly_coeffs.as_mut()[0] = E::Fr::zero();
// q_3_c_by_eta_c_poly_coeffs.as_mut()[0] = E::Fr::zero();
let mut beta_3_by_omega = beta_3;
beta_3_by_omega.mul_assign(&domain_k.generator);
// TODO: Evaluate those that are in a coefficient form faster
// let q_3_a_by_eta_a_eval = f_3_a_values_over_k_by_eta_a.barycentric_evaluate_at(&worker, beta_3)?;
// let q_3_b_by_eta_b_eval = f_3_b_values_over_k_by_eta_b.barycentric_evaluate_at(&worker, beta_3)?;
// let q_3_c_by_eta_c_eval = f_3_c_values_over_k_by_eta_c.barycentric_evaluate_at(&worker, beta_3)?;
let q_3_a_by_eta_a_eval = q_3_a_by_eta_a_poly_coeffs.evaluate_at(&worker, beta_3);
let q_3_b_by_eta_b_eval = q_3_b_by_eta_b_poly_coeffs.evaluate_at(&worker, beta_3);
let q_3_c_by_eta_c_eval = q_3_c_by_eta_c_poly_coeffs.evaluate_at(&worker, beta_3);
drop(q_3_a_by_eta_a_poly_coeffs);
drop(q_3_b_by_eta_b_poly_coeffs);
drop(q_3_c_by_eta_c_poly_coeffs);
let q_3_a_by_eta_a_sum_eval = q_3_a_by_eta_a_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_3)?;
let q_3_b_by_eta_b_sum_eval = q_3_b_by_eta_b_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_3)?;
let q_3_c_by_eta_c_sum_eval = q_3_c_by_eta_c_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_3)?;
let q_3_a_by_eta_a_sum_eval_shifted = q_3_a_by_eta_a_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_3_by_omega)?;
let q_3_b_by_eta_b_sum_eval_shifted = q_3_b_by_eta_b_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_3_by_omega)?;
let q_3_c_by_eta_c_sum_eval_shifted = q_3_c_by_eta_c_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_3_by_omega)?;
let val_a_eval = params.a_matrix_poly.evaluate_at(&worker, beta_3);
let val_b_eval = params.b_matrix_poly.evaluate_at(&worker, beta_3);
let val_c_eval = params.c_matrix_poly.evaluate_at(&worker, beta_3);
let row_a_eval = params.a_row_poly.evaluate_at(&worker, beta_3);
let row_b_eval = params.b_row_poly.evaluate_at(&worker, beta_3);
let row_c_eval = params.c_row_poly.evaluate_at(&worker, beta_3);
let col_a_eval = params.a_col_poly.evaluate_at(&worker, beta_3);
let col_b_eval = params.b_col_poly.evaluate_at(&worker, beta_3);
let col_c_eval = params.c_col_poly.evaluate_at(&worker, beta_3);
let polys_for_opening_for_domain_k_at_beta_3_by_gen = vec![
f_3_even_values_on_k,
f_3_odd_values_on_k,
];
let values_for_opening_for_domain_k_at_beta_3_by_gen = vec![
f_3_even_eval,
f_3_odd_eval,
];
let challenge_1 = E::Fr::from_str("99999").unwrap();
let (mut aggregation_on_k, next_challenge) = perform_batched_divisor_for_opening::<E>(
polys_for_opening_for_domain_k_at_beta_3_by_gen,
beta_3_squared_by_coset_factor_squared,
&values_for_opening_for_domain_k_at_beta_3_by_gen,
challenge_1,
E::Fr::one(),
&worker
)?;
let polys_for_opening_for_domain_k_at_beta_3 = vec![
precomputations.a_val_over_k.clone(),
precomputations.b_val_over_k.clone(),
precomputations.c_val_over_k.clone(),
precomputations.a_row_over_k.clone(),
precomputations.b_row_over_k.clone(),
precomputations.c_row_over_k.clone(),
precomputations.a_col_over_k.clone(),
precomputations.b_col_over_k.clone(),
precomputations.c_col_over_k.clone(),
f_3_a_values_over_k_by_eta_a,
f_3_b_values_over_k_by_eta_b,
f_3_c_values_over_k_by_eta_c,
q_3_a_by_eta_a_grand_sum_over_k.clone(),
q_3_b_by_eta_b_grand_sum_over_k.clone(),
q_3_c_by_eta_c_grand_sum_over_k.clone(),
];
let values_for_opening_for_domain_k_at_beta_3 = vec![
val_a_eval,
val_b_eval,
val_c_eval,
row_a_eval,
row_b_eval,
row_c_eval,
col_a_eval,
col_b_eval,
col_c_eval,
q_3_a_by_eta_a_eval,
q_3_b_by_eta_b_eval,
q_3_c_by_eta_c_eval,
q_3_a_by_eta_a_sum_eval,
q_3_b_by_eta_b_sum_eval,
q_3_c_by_eta_c_sum_eval,
];
let (aggregation_at_beta_3, next_challenge) = perform_batched_divisor_for_opening::<E>(
polys_for_opening_for_domain_k_at_beta_3,
beta_3,
&values_for_opening_for_domain_k_at_beta_3,
challenge_1,
next_challenge,
&worker
)?;
aggregation_on_k.add_assign(&worker, &aggregation_at_beta_3);
drop(aggregation_at_beta_3);
let polys_for_opening_for_domain_k_at_beta_3_by_omega = vec![
q_3_a_by_eta_a_grand_sum_over_k,
q_3_b_by_eta_b_grand_sum_over_k,
q_3_c_by_eta_c_grand_sum_over_k,
];
let values_for_opening_for_domain_k_at_beta_3_by_omega = vec![
q_3_a_by_eta_a_sum_eval_shifted,
q_3_b_by_eta_b_sum_eval_shifted,
q_3_c_by_eta_c_sum_eval_shifted,
];
let (aggregation_at_beta_3_by_omega, _) = perform_batched_divisor_for_opening::<E>(
polys_for_opening_for_domain_k_at_beta_3_by_omega,
beta_3_by_omega,
&values_for_opening_for_domain_k_at_beta_3_by_omega,
challenge_1,
next_challenge,
&worker
)?;
aggregation_on_k.add_assign(&worker, &aggregation_at_beta_3_by_omega);
drop(aggregation_at_beta_3_by_omega);
let proof_on_k = commit_using_values(
&aggregation_on_k,
&crs.crs_values_on_k,
&worker
)?;
// TODO: add aggregate here to compute for openings of individual
// f_3_a_values_over_k_by_eta_a,
// f_3_b_values_over_k_by_eta_b,
// f_3_c_values_over_k_by_eta_c,
// q_3_a_by_eta_a_eval,
// q_3_b_by_eta_b_eval,
// q_3_c_by_eta_c_eval,
// Open everything on beta_2 (on domain H)
// Since we are opening not the polynomial, but it's content with zero coefficient - set constant terms
// q_3_a_by_eta_a_poly_coeffs.as_mut()[0] = E::Fr::zero();
// q_3_b_by_eta_b_poly_coeffs.as_mut()[0] = E::Fr::zero();
// q_3_c_by_eta_c_poly_coeffs.as_mut()[0] = E::Fr::zero();
// TODO: Evaluate those that are in a coefficient form faster
// let q_3_a_by_eta_a_eval = f_3_a_values_over_k_by_eta_a.barycentric_evaluate_at(&worker, beta_3)?;
// let q_3_b_by_eta_a_eval = f_3_b_values_over_k_by_eta_b.barycentric_evaluate_at(&worker, beta_3)?;
// let q_3_c_by_eta_a_eval = f_3_c_values_over_k_by_eta_c.barycentric_evaluate_at(&worker, beta_3)?;
// let q_3_a_by_eta_a_eval = q_3_a_by_eta_a_poly_coeffs.evaluate_at(&worker, beta_2);
// let q_3_b_by_eta_a_eval = q_3_b_by_eta_b_poly_coeffs.evaluate_at(&worker, beta_2);
// let q_3_c_by_eta_a_eval = q_3_c_by_eta_c_poly_coeffs.evaluate_at(&worker, beta_2);
// let q_3_a_by_eta_a_sum_eval = q_3_a_by_eta_a_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_2)?;
// let q_3_b_by_eta_b_sum_eval = q_3_b_by_eta_b_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_2)?;
// let q_3_c_by_eta_c_sum_eval = q_3_c_by_eta_c_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_2)?;
// let q_3_a_by_eta_a_sum_eval_shifted = q_3_a_by_eta_a_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_2_by_omega)?;
// let q_3_b_by_eta_b_sum_eval_shifted = q_3_b_by_eta_b_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_2_by_omega)?;
// let q_3_c_by_eta_c_sum_eval_shifted = q_3_c_by_eta_c_grand_sum_over_k.barycentric_evaluate_at(&worker, beta_2_by_omega)?;
// let challenge_2 = E::Fr::from_str("999991234").unwrap();
// let polys_for_opening_for_domain_k_at_beta_3 = vec![
// f_3_a_values_over_k_by_eta_a,
// f_3_b_values_over_k_by_eta_b,
// f_3_c_values_over_k_by_eta_c,
// // q_3_a_by_eta_a_grand_sum_over_k.clone(),
// // q_3_b_by_eta_b_grand_sum_over_k.clone(),
// // q_3_c_by_eta_c_grand_sum_over_k.clone(),
// ];
// let values_for_opening_for_domain_k_at_beta_3 = vec![
// q_3_a_by_eta_a_eval,
// q_3_b_by_eta_a_eval,
// q_3_c_by_eta_a_eval,
// // q_3_a_by_eta_a_sum_eval,
// // q_3_b_by_eta_b_sum_eval,
// // q_3_c_by_eta_c_sum_eval,
// ];
// let proof_for_f_3_grand_sums_at_beta_3 = perform_batch_opening_from_values(
// polys_for_opening_for_domain_k_at_beta_3,
// &crs.crs_values_on_k,
// beta_3,
// &polys_for_opening_for_domain_k_at_beta_3,
// challenge_2,
// &worker
// )?;
// let challenge_3 = E::Fr::from_str("99999").unwrap();
// let polys_for_opening_for_domain_k_at_beta_2_by_omega = vec![
// q_3_a_by_eta_a_grand_sum_over_k,
// q_3_b_by_eta_b_grand_sum_over_k,
// q_3_c_by_eta_c_grand_sum_over_k,
// ];
// let values_for_opening_for_domain_k_at_beta_2_by_omega = vec![
// q_3_a_by_eta_a_sum_eval_shifted,
// q_3_b_by_eta_b_sum_eval_shifted,
// q_3_c_by_eta_c_sum_eval_shifted,
// ];
// let proof_for_f_3_grand_sums_at_beta_2_by_omega = perform_batch_opening_from_values(
// polys_for_opening_for_domain_k_at_beta_2_by_omega,
// &crs.crs_values_on_k,
// beta_2_by_omega,
// &values_for_opening_for_domain_k_at_beta_2_by_omega,
// challenge_3,
// &worker
// )?;
// open everything else on beta_2 (domain H)
let mut beta_2_by_omega = beta_2;
beta_2_by_omega.mul_assign(&domain_h.generator);
let challenge_2 = E::Fr::from_str("9999999").unwrap();
// polynomial q_2 DOES HAVE it's constant term set to zero, so we need to add a constant term here
// at the evaluation step!
let mut q_2_eval_at_beta_2 = q_2_poly_values_on_h.barycentric_evaluate_at(&worker, beta_2)?;
let mut sigma_2_over_size_of_h = one_over_h_size;
sigma_2_over_size_of_h.mul_assign(&sigma_2);
q_2_eval_at_beta_2.add_assign(&sigma_2_over_size_of_h);
let q_2_sum_eval_at_beta_2 = q_2_grand_sum_over_h.barycentric_evaluate_at(&worker, beta_2)?;
let q_2_sum_eval_at_beta_2_shifted = q_2_grand_sum_over_h.barycentric_evaluate_at(&worker, beta_2_by_omega)?;
let polys_for_opening_for_domain_h_at_beta_2 = vec![
q_2_poly_values_on_h,
q_2_grand_sum_over_h.clone(),
];
let values_for_opening_for_domain_h_at_beta_2 = vec![
q_2_eval_at_beta_2,
q_2_sum_eval_at_beta_2,
];
let (mut aggregation_on_h, next_challenge) = perform_batched_divisor_for_opening::<E>(
polys_for_opening_for_domain_h_at_beta_2,
beta_2,
&values_for_opening_for_domain_h_at_beta_2,
challenge_2,
E::Fr::one(),
&worker
)?;
let polys_for_opening_for_domain_h_at_beta_2_by_omega = vec![
q_2_grand_sum_over_h,
];
let values_for_opening_for_domain_h_at_beta_2_by_omega = vec![
q_2_sum_eval_at_beta_2_shifted,
];
let (aggregation_at_beta_2_by_omega, next_challenge) = perform_batched_divisor_for_opening::<E>(
polys_for_opening_for_domain_h_at_beta_2_by_omega,
beta_2_by_omega,
&values_for_opening_for_domain_h_at_beta_2_by_omega,
challenge_2,
next_challenge,
&worker
)?;
aggregation_on_h.add_assign(&worker, &aggregation_at_beta_2_by_omega);
drop(aggregation_at_beta_2_by_omega);
// add everything else on beta_1 (domain H)
let mut beta_1_by_omega = beta_1;
beta_1_by_omega.mul_assign(&domain_h.generator);
// let q_1_eval_at_beta_1 = q_1_poly_values_over_h.barycentric_evaluate_at(&worker, beta_1)?;
// let q_1_sum_eval_at_beta_1 = q_1_grand_sum_poly_values_over_h.barycentric_evaluate_at(&worker, beta_1)?;
// let q_1_sum_eval_at_beta_1_shifted = q_1_grand_sum_poly_values_over_h.barycentric_evaluate_at(&worker, beta_1_by_omega)?;
let q_1_eval_at_beta_1 = proper_q_1_values_on_2h.barycentric_evaluate_at(&worker, beta_1)?;
let q_1_sum_eval_at_beta_1 = proper_q_1_grand_sum_poly_values_over_2h.barycentric_evaluate_at(&worker, beta_1)?;
let q_1_sum_eval_at_beta_1_shifted = proper_q_1_grand_sum_poly_values_over_2h.barycentric_evaluate_at(&worker, beta_1_by_omega)?;
let w_at_beta_1 = witness_values_on_h.barycentric_evaluate_at(&worker, beta_1)?;
let q_1_quotient_at_beta_1 = q_1_quotient_on_h.barycentric_evaluate_at(&worker, beta_1)?;
let polys_for_opening_for_domain_h_at_beta_1 = vec![
a_values_on_h,
b_values_on_h,
c_values_on_h,
h_poly_values_on_h,
witness_values_on_h,
// q_1_poly_values_over_h,
// q_1_grand_sum_poly_values_over_h.clone(),
];
let values_for_opening_for_domain_h_at_beta_1 = vec![
a_at_beta_1,
b_at_beta_1,
c_at_beta_1,
h_at_beta_1,
w_at_beta_1,
// q_1_eval_at_beta_1,
// q_1_sum_eval_at_beta_1
];
let (aggregation_at_beta_1, next_challenge) = perform_batched_divisor_for_opening::<E>(
polys_for_opening_for_domain_h_at_beta_1,
beta_1,
&values_for_opening_for_domain_h_at_beta_1,
challenge_2,
next_challenge,
&worker
)?;
aggregation_on_h.add_assign(&worker, &aggregation_at_beta_1);
drop(aggregation_at_beta_1);
// let polys_for_opening_for_domain_h_at_beta_1_by_omega = vec![
// q_1_grand_sum_poly_values_over_h,
// ];
// let values_for_opening_for_domain_h_at_beta_1_by_omega = vec![
// q_1_sum_eval_at_beta_1_shifted
// ];
// let (aggregation_at_beta_1_by_omega, _) = perform_batched_divisor_for_opening::<E>(
// polys_for_opening_for_domain_h_at_beta_1_by_omega,
// beta_1_by_omega,
// &values_for_opening_for_domain_h_at_beta_1_by_omega,
// challenge_2,
// next_challenge,
// &worker
// )?;
// aggregation_on_h.add_assign(&worker, &aggregation_at_beta_1_by_omega);
// drop(aggregation_at_beta_1_by_omega);
// this is an opening for everything on H
let proof_on_h = commit_using_values(
&aggregation_on_h,
&crs.crs_values_on_h,
&worker
)?;
// fun time - do the checks
fn compute_from_even_and_odd<F: PrimeField>(
even: F,
odd: F,
at: F,
) -> F {
let mut res = odd;
res.mul_assign(&at);
res.add_assign(&even);
res
}
// first check f3 wellformedness
{
// vanishing(beta_1) * vanishing(beta_2) * val_m(z) - (beta_2 - row_m(z))(beta_1 - col_m(z)) q_3_m(z) = wellformedness(z)*vanishing(z)
let vanishing_at_beta_3 = evaluate_vanishing_for_size(&beta_3, domain_k.size);
let f_3_at_beta_3 = compute_from_even_and_odd(f_3_even_eval, f_3_odd_eval, beta_3);
assert_eq!(f_3_well_formedness_baryc_at_beta_3, f_3_at_beta_3, "f_3 is reconstructed properly");
let val_a_at_beta_3 = val_a_eval;
let val_b_at_beta_3 = val_b_eval;
let val_c_at_beta_3 = val_c_eval;
let row_a_at_beta_3 = row_a_eval;
let row_b_at_beta_3 = row_b_eval;
let row_c_at_beta_3 = row_c_eval;
let col_a_at_beta_3 = col_a_eval;
let col_b_at_beta_3 = col_b_eval;
let col_c_at_beta_3 = col_c_eval;
let mut lhs = E::Fr::zero();
let mut linearization_challenge = E::Fr::one();
let mut tmp = vanishing_on_beta_1_by_vanishing_on_beta_2;
tmp.mul_assign(&val_a_at_beta_3);
tmp.mul_assign(&eta_a);
let mut t_row = beta_2;
t_row.sub_assign(&row_a_at_beta_3);
let mut t_col = beta_1;
t_col.sub_assign(&col_a_at_beta_3);
t_row.mul_assign(&t_col);
t_row.mul_assign(&q_3_a_by_eta_a_eval);
tmp.sub_assign(&t_row);
tmp.mul_assign(&linearization_challenge);
lhs.add_assign(&tmp);
linearization_challenge.mul_assign(&rational_check_linearization_challenge);
let mut tmp = vanishing_on_beta_1_by_vanishing_on_beta_2;
tmp.mul_assign(&val_b_at_beta_3);
tmp.mul_assign(&eta_b);
let mut t_row = beta_2;
t_row.sub_assign(&row_b_at_beta_3);
let mut t_col = beta_1;
t_col.sub_assign(&col_b_at_beta_3);
t_row.mul_assign(&t_col);
t_row.mul_assign(&q_3_b_by_eta_b_eval);
tmp.sub_assign(&t_row);
tmp.mul_assign(&linearization_challenge);
lhs.add_assign(&tmp);
linearization_challenge.mul_assign(&rational_check_linearization_challenge);
let mut tmp = vanishing_on_beta_1_by_vanishing_on_beta_2;
tmp.mul_assign(&val_c_at_beta_3);
tmp.mul_assign(&eta_c);
let mut t_row = beta_2;
t_row.sub_assign(&row_c_at_beta_3);
let mut t_col = beta_1;
t_col.sub_assign(&col_c_at_beta_3);
t_row.mul_assign(&t_col);
t_row.mul_assign(&q_3_c_by_eta_c_eval);
tmp.sub_assign(&t_row);
tmp.mul_assign(&linearization_challenge);
lhs.add_assign(&tmp);
let mut rhs = vanishing_at_beta_3;
rhs.mul_assign(&f_3_at_beta_3);
assert_eq!(lhs, rhs, "f_3 wellformedness check");
// sumchecks for q_3_m polys
let mut sigma_3_a_over_size_of_k = one_over_k;
sigma_3_a_over_size_of_k.mul_assign(&sigma_3_a);
let mut lhs = q_3_a_by_eta_a_eval;
lhs.sub_assign(&q_3_a_by_eta_a_sum_eval_shifted);
lhs.add_assign(&q_3_a_by_eta_a_sum_eval);
lhs.sub_assign(&sigma_3_a_over_size_of_k);
let rhs = E::Fr::zero();
assert_eq!(lhs, rhs, "q_3_a sumcheck");
let mut sigma_3_b_over_size_of_k = one_over_k;
sigma_3_b_over_size_of_k.mul_assign(&sigma_3_b);
let mut lhs = q_3_b_by_eta_b_eval;
lhs.sub_assign(&q_3_b_by_eta_b_sum_eval_shifted);
lhs.add_assign(&q_3_b_by_eta_b_sum_eval);
lhs.sub_assign(&sigma_3_b_over_size_of_k);
let rhs = E::Fr::zero();
assert_eq!(lhs, rhs, "q_3_b sumcheck");
let mut sigma_3_c_over_size_of_k = one_over_k;
sigma_3_c_over_size_of_k.mul_assign(&sigma_3_c);
let mut lhs = q_3_c_by_eta_c_eval;
lhs.sub_assign(&q_3_c_by_eta_c_sum_eval_shifted);
lhs.add_assign(&q_3_c_by_eta_c_sum_eval);
lhs.sub_assign(&sigma_3_c_over_size_of_k);
let rhs = E::Fr::zero();
assert_eq!(lhs, rhs, "q_3_c sumcheck");
}
// sumcheck for q_2
{
// r(alpha, beta_2) * sigma_3 = sigma_2
let r_alpha_beta_2 = evaluate_bivariate_lagrange_at_point(
alpha,
beta_2,
domain_h.size
)?;
println!("r(alpha, beta_2) = {}", r_alpha_beta_2);
// sigma_3_m = eta_m * M(beta_2, beta_1);
// q_2(beta_2) = r(alpha, beta_2) * \sum_{m} M(beta_2, beta_1)
// so we do a sumcheck of q_2(beta_2) - q_2_sum(beta_2 * omega) - q_2_sum(beta_2) - sigma_2/|H| = 0
println!("Sigma_3_a = {}", sigma_3_a);
println!("Sigma_3_b = {}", sigma_3_b);
println!("Sigma_3_c = {}", sigma_3_c);
// reconstruct sigma_2 from the q_3 chunks
let mut sigma_3_reconstructed = E::Fr::zero();
// these contain eta_m already
sigma_3_reconstructed.add_assign(&sigma_3_a);
sigma_3_reconstructed.add_assign(&sigma_3_b);
sigma_3_reconstructed.add_assign(&sigma_3_c);
let mut q_2_at_beta_reconstructed = r_alpha_beta_2;
q_2_at_beta_reconstructed.mul_assign(&sigma_3_reconstructed);
let mut tmp_1 = q_2_at_beta_reconstructed.inverse().unwrap();
tmp_1.mul_assign(&q_2_eval_at_beta_2);
println!("tmp 1 = {}", tmp_1);
let mut tmp_2 = q_2_eval_at_beta_2.inverse().unwrap();
tmp_2.mul_assign(&q_2_at_beta_reconstructed);
println!("tmp 2 = {}", tmp_2);
let mut tmp_3 = r_m_x_beta_1_at_beta_2;
tmp_3.sub_assign(&sigma_3_a);
println!("tmp 3 = {}", tmp_3);
println!("Sigma_2 = {}", sigma_2);
// assert_eq!(q_2_eval_at_beta_2, q_2_at_beta_reconstructed, "q_2(beta_2) reconstruction");
let mut sigma_2_over_size_of_h = one_over_h_size;
sigma_2_over_size_of_h.mul_assign(&sigma_2);
let mut lhs = q_2_eval_at_beta_2;
lhs.sub_assign(&q_2_sum_eval_at_beta_2_shifted);
lhs.add_assign(&q_2_sum_eval_at_beta_2);
lhs.sub_assign(&sigma_2_over_size_of_h);
let rhs = E::Fr::zero();
assert_eq!(lhs, rhs, "q_2 sumcheck");
}
// sumcheck for q_1
{
// reconstruct value of r(alpha, beta_1) * \sum_{m} z_m(beta_1) - (sum{m} M(beta_1, alpha)) * w(beta_1) = q_1(beta_1)
let r_alpha_beta_1 = evaluate_bivariate_lagrange_at_point(
alpha,
beta_1,
domain_h.size
)?;
let mut lhs = sum_a_b_c_at_beta_1;
lhs.mul_assign(&r_alpha_beta_1);
let mut rhs = sum_m_at_beta_1;
rhs.mul_assign(&w_at_beta_1);
let mut reconstructed_q_1_at_beta_1 = lhs;
reconstructed_q_1_at_beta_1.sub_assign(&rhs);
assert_eq!(reconstructed_q_1_at_beta_1, q_1_eval_at_beta_1, "lincheck");
let mut reconstructed_q_1_at_beta_1 = E::Fr::zero();
let mut tmp = a_at_beta_1;
tmp.mul_assign(&eta_a);
reconstructed_q_1_at_beta_1.add_assign(&tmp);
let mut tmp = b_at_beta_1;
tmp.mul_assign(&eta_b);
reconstructed_q_1_at_beta_1.add_assign(&tmp);
let mut tmp = c_at_beta_1;
tmp.mul_assign(&eta_c);
reconstructed_q_1_at_beta_1.add_assign(&tmp);
reconstructed_q_1_at_beta_1.mul_assign(&r_alpha_beta_1);
let mut tmp = sigma_2;
tmp.mul_assign(&w_at_beta_1);
reconstructed_q_1_at_beta_1.sub_assign(&tmp);
// assert_eq!(reconstructed_q_1_at_beta_1, q_1_eval_at_beta_1, "q_1 at beta_1 reconstruciton");
// let mut lhs = q_1_eval_at_beta_1;
// lhs.sub_assign(&q_1_sum_eval_at_beta_1_shifted);
// lhs.add_assign(&q_1_sum_eval_at_beta_1);
// let rhs = E::Fr::zero();
// assert_eq!(lhs, rhs, "q_1 sumcheck");
let mut lhs = q_1_eval_at_beta_1;
lhs.sub_assign(&q_1_sum_eval_at_beta_1_shifted);
lhs.add_assign(&q_1_sum_eval_at_beta_1);
let mut rhs = q_1_quotient_at_beta_1;
rhs.mul_assign(&vanishing_at_beta_1);
assert_eq!(lhs, rhs, "q_1 sumcheck");
}
// last check: a*b - c = v_H * h
{
let mut lhs = a_at_beta_1;
lhs.mul_assign(&b_at_beta_1);
lhs.sub_assign(&c_at_beta_1);
let mut rhs = h_at_beta_1;
rhs.mul_assign(&vanishing_at_beta_1);
assert!(lhs == rhs, "ab - c == h * z_H");
}
// assert!(valid, "f_3 wellformedness");
// now we need to perform all the openings
// For domain K:
// - val_a_at_z, val_b_at_z, val_c_at_z
// - row_a_at_z, row_b_at_z, row_c_at_z
// - col_a_at_z, col_b_at_z, col_c_at_z
// - q_3_a_by_eta_a_commitment at z,
// - q_3_a_by_eta_a_sum_commitment at z and at z*omega
// - q_3_b_by_eta_a_commitment at z,
// - q_3_b_by_eta_a_sum_commitment at z and at z*omega
// - q_3_c_by_eta_a_commitment at z,
// - q_3_c_by_eta_a_sum_commitment at z and at z*omega
// For domain 2K (and we can move it into two openings on K):
// - f_3_well_formedness_poly_at_z
// for domain H:
// - z_a_at_z, z_b_at_z, z_c_at_z, h_at_z
// - q_1_at_z, q_1_sum_at_z, q_1_sum_at_z_omega
// - q_2_at_z, q_2_sum_at_z, q_2_sum_at_z_omega
// // -------------------------------------------
// // sanity checks
// let q_3_a_by_eta_a_at_z = q_3_a_by_eta_a_poly_coeffs.evaluate_at(&worker, z);
// let q_3_b_by_eta_b_at_z = q_3_b_by_eta_b_poly_coeffs.evaluate_at(&worker, z);
// let q_3_c_by_eta_c_at_z = q_3_c_by_eta_c_poly_coeffs.evaluate_at(&worker, z);
// let val_a_at_z = params.a_matrix_poly.evaluate_at(&worker, z);
// let val_b_at_z = params.b_matrix_poly.evaluate_at(&worker, z);
// let val_c_at_z = params.c_matrix_poly.evaluate_at(&worker, z);
// let row_a_at_z = params.a_row_poly.evaluate_at(&worker, z);
// let row_b_at_z = params.b_row_poly.evaluate_at(&worker, z);
// let row_c_at_z = params.c_row_poly.evaluate_at(&worker, z);
// let col_a_at_z = params.a_col_poly.evaluate_at(&worker, z);
// let col_b_at_z = params.b_col_poly.evaluate_at(&worker, z);
// let col_c_at_z = params.c_col_poly.evaluate_at(&worker, z);
// let vanishing_at_z = evaluate_vanishing_for_size(&z, domain_k.size);
// // vanishing(beta_1) * vanishing(beta_2) * val_m(z) - (beta_2 - row_m(z))(beta_1 - col_m(z)) q_3_m(z) = wellformedness(z)*vanishing(z)
// let mut lhs = E::Fr::zero();
// let mut linearization_challenge = E::Fr::one();
// let mut tmp = vanishing_on_beta_1_by_vanishing_on_beta_2;
// tmp.mul_assign(&val_a_at_z);
// tmp.mul_assign(&eta_a);
// let mut t_row = beta_2;
// t_row.sub_assign(&row_a_at_z);
// let mut t_col = beta_1;
// t_col.sub_assign(&col_a_at_z);
// t_row.mul_assign(&t_col);
// t_row.mul_assign(&q_3_a_by_eta_a_at_z);
// tmp.sub_assign(&t_row);
// tmp.mul_assign(&linearization_challenge);
// lhs.add_assign(&tmp);
// linearization_challenge.mul_assign(&rational_check_linearization_challenge);
// let mut tmp = vanishing_on_beta_1_by_vanishing_on_beta_2;
// tmp.mul_assign(&val_b_at_z);
// tmp.mul_assign(&eta_b);
// let mut t_row = beta_2;
// t_row.sub_assign(&row_b_at_z);
// let mut t_col = beta_1;
// t_col.sub_assign(&col_b_at_z);
// t_row.mul_assign(&t_col);
// t_row.mul_assign(&q_3_b_by_eta_b_at_z);
// tmp.sub_assign(&t_row);
// tmp.mul_assign(&linearization_challenge);
// lhs.add_assign(&tmp);
// linearization_challenge.mul_assign(&rational_check_linearization_challenge);
// let mut tmp = vanishing_on_beta_1_by_vanishing_on_beta_2;
// tmp.mul_assign(&val_c_at_z);
// tmp.mul_assign(&eta_c);
// let mut t_row = beta_2;
// t_row.sub_assign(&row_c_at_z);
// let mut t_col = beta_1;
// t_col.sub_assign(&col_c_at_z);
// t_row.mul_assign(&t_col);
// t_row.mul_assign(&q_3_c_by_eta_c_at_z);
// tmp.sub_assign(&t_row);
// tmp.mul_assign(&linearization_challenge);
// lhs.add_assign(&tmp);
// let mut rhs = vanishing_at_z;
// rhs.mul_assign(&f_3_well_formedness_poly_at_z);
// assert_eq!(lhs, rhs);
// let mut z_by_omega_k = z;
// z_by_omega_k.mul_assign(&domain_k.generator);
// let q_3_a_by_eta_a_grand_sum_poly_at_z = q_3_a_grand_sum_poly_coeffs.evaluate_at(&worker, z);
// let q_3_a_by_eta_a_grand_sum_poly_at_z_omega = q_3_a_grand_sum_poly_coeffs.evaluate_at(&worker, z_by_omega_k);
// let q_3_a_by_eta_a_values_poly_at_z = q_3_a_by_eta_a_poly_coeffs.evaluate_at(&worker, z);
// let q_3_a_sumcheck_poly_at_z = E::Fr::zero();
// // sum(z*omega) = sum(z) + el(z) everywhere on k
// // el(z) is actually el(z) - sigma_3/Domain_size
// // sum(z*omega) - sum(z) - (el(z) - sum_over_k(el)) = vanishing(z) * quotient(z)
// let mut sigma_3_a_over_size_of_k = one_over_k;
// sigma_3_a_over_size_of_k.mul_assign(&sigma_3_a);
// let mut lhs = q_3_a_by_eta_a_grand_sum_poly_at_z;
// lhs.sub_assign(&q_3_a_by_eta_a_grand_sum_poly_at_z_omega);
// lhs.add_assign(&q_3_a_by_eta_a_values_poly_at_z);
// lhs.sub_assign(&sigma_3_a_over_size_of_k);
// let mut rhs = vanishing_at_z;
// rhs.mul_assign(&q_3_a_sumcheck_poly_at_z);
// assert_eq!(lhs, rhs);
Ok(())
}
}
impl<E: Engine> ConstraintSystem<E> for ProvingAssignment<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.aux_assignment.push(f()?);
Ok(Variable(Index::Aux(self.aux_assignment.len() - 1)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.input_assignment.push(f()?);
Ok(Variable(Index::Input(self.input_assignment.len() - 1)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
let c = c(LinearCombination::zero());
self.a.push(eval(
a,
&self.input_assignment,
&self.aux_assignment
));
self.b.push(eval(
b,
&self.input_assignment,
&self.aux_assignment
));
self.c.push(eval(
c,
&self.input_assignment,
&self.aux_assignment
));
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
// pub fn create_random_proof<E, C, R, P: ParameterSource<E>>(
// circuit: C,
// params: P,
// rng: &mut R
// ) -> Result<Proof<E>, SynthesisError>
// where E: Engine, C: Circuit<E>, R: Rng
// {
// let r = rng.gen();
// let s = rng.gen();
// create_proof::<E, C, P>(circuit, params, r, s)
// }
pub fn create_proof<E, C>(
circuit: C,
params: &IndexedSetup<E>,
bases: &PrecomputedBases<E>,
) -> Result<(), SynthesisError>
where E: Engine, C: Circuit<E>
{
let worker = Worker::new();
println!("Start making precomputations");
let precomputations = IndexPrecomputations::new(¶ms, &worker).expect("must precompute");
println!("Done making precomputations");
let prover = prepare_prover(circuit)?;
prover.create_proof(params, &bases, &precomputations)
}
pub fn test_over_engine_and_circuit<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
) {
let params = generate_parameters(circuit.clone()).unwrap();
let worker = Worker::new();
let bases = PrecomputedBases::<E>::new_42_for_index(¶ms, &worker);
println!("Params domain H size = {}", params.domain_h_size);
println!("Params domain K size = {}", params.domain_k_size);
let _ = create_proof(circuit, ¶ms, &bases).unwrap();
}
pub fn test_over_engine_and_circuit_with_proving_key<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
base_path: String
) {
let params = generate_parameters(circuit.clone()).unwrap();
let bases = read_test_keys::<E>(base_path);
println!("Params domain H size = {}", params.domain_h_size);
println!("Params domain K size = {}", params.domain_k_size);
let _ = create_proof(circuit, ¶ms, &bases).unwrap();
}
pub fn serialize_bases<E: Engine, P: AsRef<std::path::Path>>(
bases: &PrecomputedBases<E>,
h_file_path: P,
k_file_path: P
) -> std::io::Result<()> {
// let mut h_writer = std::fs::File::open(h_file_path)?;
// let mut k_writer = std::fs::File::open(k_file_path)?;
let mut h_writer = std::fs::File::create(h_file_path)?;
let mut k_writer = std::fs::File::create(k_file_path)?;
bases.crs_values_on_h.write(&mut h_writer)?;
bases.crs_values_on_k.write(&mut k_writer)?;
Ok(())
}
pub fn deserialize_bases<E: Engine, P: AsRef<std::path::Path>>(
h_file_path: P,
k_file_path: P
) -> Result<PrecomputedBases<E>, SynthesisError> {
let mut h_reader = std::fs::File::open(h_file_path)?;
let mut k_reader = std::fs::File::open(k_file_path)?;
let h_values = Crs::<E, CrsForLagrangeForm>::read(&mut h_reader)?;
let k_values = Crs::<E, CrsForLagrangeForm>::read(&mut k_reader)?;
let new = PrecomputedBases::<E>{
crs_values_on_h: h_values,
crs_values_on_k: k_values,
};
Ok(new)
}
pub fn create_test_keys<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
base_file_name: String
) {
let params = generate_parameters(circuit.clone()).unwrap();
let path_h = format!("{}_h.key", base_file_name);
let path_k = format!("{}_k.key", base_file_name);
let worker = Worker::new();
let bases = PrecomputedBases::new_42_for_index(¶ms, &worker);
serialize_bases(&bases, &path_h, &path_k).expect("must serialize the bases");
}
pub fn read_test_keys<E: Engine>(
base_file_name: String
) -> PrecomputedBases<E> {
let path_h = format!("{}_h.key", base_file_name);
let path_k = format!("{}_k.key", base_file_name);
let bases = deserialize_bases::<E, _>(&path_h, &path_k).expect("must read the bases");
bases
}
#[cfg(test)]
mod test {
use crate::tests::XORDemo;
use crate::plonk::domains::*;
use crate::worker::Worker;
use super::*;
use std::marker::PhantomData;
use super::super::generator::*;
#[test]
fn test_proving_1() {
use crate::pairing::bn256::{Bn256};
let c = XORDemo::<Bn256> {
a: Some(true),
b: Some(true),
_marker: PhantomData
};
test_over_engine_and_circuit(c);
}
#[test]
fn test_proving_2() {
use crate::pairing::bn256::{Bn256, Fr};
let c = IndexerTester::<Bn256> {
a: None,
b: None,
};
test_over_engine_and_circuit(c);
}
}<file_sep>/src/plonk/better_better_cs/utils.rs
use crate::pairing::ff::PrimeField;
use crate::worker::Worker;
use crate::plonk::domains::*;
use crate::SynthesisError;
use crate::plonk::polynomials::*;
pub trait FieldBinop<F: PrimeField>: 'static + Copy + Clone + Send + Sync + std::fmt::Debug {
fn apply(&self, dest: &mut F, source: &F);
}
pub(crate) fn binop_over_slices<F: PrimeField, B: FieldBinop<F>>(worker: &Worker, binop: &B, dest: &mut [F], source: &[F]) {
assert_eq!(dest.len(), source.len());
worker.scope(dest.len(), |scope, chunk| {
for (dest, source) in dest.chunks_mut(chunk)
.zip(source.chunks(chunk)) {
scope.spawn(move |_| {
for (dest, source) in dest.iter_mut().zip(source.iter()) {
binop.apply(dest, source);
}
});
}
});
}
#[derive(Clone, Copy, Debug)]
pub struct BinopAddAssign;
impl<F: PrimeField> FieldBinop<F> for BinopAddAssign {
#[inline(always)]
fn apply(&self, dest: &mut F, source: &F) {
dest.add_assign(source);
}
}
#[derive(Clone, Copy, Debug)]
pub struct BinopAddAssignScaled<F: PrimeField>{
pub scale: F
}
impl<F: PrimeField> BinopAddAssignScaled<F> {
pub fn new(scale: F) -> Self {
Self {
scale
}
}
}
impl<F: PrimeField> FieldBinop<F> for BinopAddAssignScaled<F> {
#[inline(always)]
fn apply(&self, dest: &mut F, source: &F) {
let mut tmp = self.scale;
tmp.mul_assign(&source);
dest.add_assign(&tmp);
}
}
pub(crate) fn get_degree<F: PrimeField>(poly: &Polynomial<F, Coefficients>) -> usize {
let mut degree = poly.as_ref().len() - 1;
for c in poly.as_ref().iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break;
}
}
degree
}
pub (crate) fn calculate_inverse_vanishing_polynomial_with_last_point_cut<F: PrimeField>(
worker: &Worker,
poly_size:usize,
vahisning_size: usize,
coset_factor: F,
) -> Result<Polynomial<F, Values>, SynthesisError> {
assert!(poly_size.is_power_of_two());
assert!(vahisning_size.is_power_of_two());
// update from the paper - it should not hold for the last generator, omega^(n) in original notations
// Z(X) = (X^(n+1) - 1) / (X - omega^(n)) => Z^{-1}(X) = (X - omega^(n)) / (X^(n+1) - 1)
let domain = Domain::<F>::new_for_size(vahisning_size as u64)?;
let n_domain_omega = domain.generator;
let mut root = n_domain_omega.pow([(vahisning_size - 1) as u64]);
root.negate();
let mut negative_one = F::one();
negative_one.negate();
let mut numerator = Polynomial::<F, Values>::from_values(vec![coset_factor; poly_size])?;
// evaluate X in linear time
numerator.distribute_powers(&worker, numerator.omega);
numerator.add_constant(&worker, &root);
// numerator.add_constant(&worker, &negative_one);
// now it's a series of values in a coset
// now we should evaluate X^(n+1) - 1 in a linear time
let shift = coset_factor.pow([vahisning_size as u64]);
let mut denominator = Polynomial::<F, Values>::from_values(vec![shift; poly_size])?;
// elements are h^size - 1, (hg)^size - 1, (hg^2)^size - 1, ...
denominator.distribute_powers(&worker, denominator.omega.pow([vahisning_size as u64]));
denominator.add_constant(&worker, &negative_one);
denominator.batch_inversion(&worker)?;
numerator.mul_assign(&worker, &denominator);
Ok(numerator)
}<file_sep>/src/sonic/helped/mod.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
pub mod batch;
pub mod poly;
pub mod prover;
pub mod verifier;
pub mod helper;
pub mod parameters;
pub mod generator;
mod adapted_prover;
mod adapted_verifier;
mod adapted_helper;
pub use self::batch::{Batch};
pub use self::verifier::{MultiVerifier};
pub use self::generator::{
CircuitParameters,
generate_parameters,
generate_parameters_on_srs,
generate_parameters_on_srs_and_information,
generate_random_parameters,
generate_srs,
get_circuit_parameters,
get_circuit_parameters_for_succinct_sonic
};
pub use self::parameters::{
Proof,
SxyAdvice,
Parameters,
VerifyingKey,
PreparedVerifyingKey
};
pub use self::adapted_prover::{
create_advice,
create_advice_on_srs,
create_advice_on_information_and_srs,
create_proof,
create_proof_on_srs,
};
pub use self::adapted_verifier::{
verify_proofs,
verify_aggregate
};
pub use self::adapted_helper::{
create_aggregate
};<file_sep>/src/sonic/srs/mod.rs
mod srs;
pub use self::srs::SRS;<file_sep>/src/sonic/helped/verifier.rs
use crate::pairing::ff::{Field};
use crate::pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::helper::Aggregate;
use super::parameters::{Parameters};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
use crate::sonic::sonic::Preprocess;
pub struct MultiVerifier<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng> {
circuit: C,
pub(crate) batch: Batch<E>,
k_map: Vec<usize>,
n: usize,
q: usize,
randomness_source: R,
_marker: PhantomData<(E, S)>
}
impl<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng> MultiVerifier<E, C, S, R> {
// This constructor consumes randomness source cause it's later used internally
pub fn new(circuit: C, srs: &SRS<E>, rng: R) -> Result<Self, SynthesisError> {
let mut preprocess = Preprocess::new();
S::synthesize(&mut preprocess, &circuit)?;
Ok(MultiVerifier {
circuit,
batch: Batch::new(srs, preprocess.n),
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q,
randomness_source: rng,
_marker: PhantomData
})
}
pub fn add_aggregate(
&mut self,
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
)
{
let mut transcript = Transcript::new(&[]);
let mut y_values: Vec<E::Fr> = Vec::with_capacity(proofs.len());
for &(ref proof, ref sxyadvice) in proofs {
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y_values.push(transcript.get_challenge_scalar());
}
transcript.commit_point(&sxyadvice.s);
}
let z: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&aggregate.c);
let w: E::Fr = transcript.get_challenge_scalar();
let szw = {
let mut tmp = SxEval::new(w, self.n);
S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
tmp.finalize(z)
};
{
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.opening, random, w);
self.batch.add_commitment(aggregate.c, random);
self.batch.add_opening_value(szw, random);
}
for ((opening, value), &y) in aggregate.c_openings.iter().zip(y_values.iter()) {
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(*opening, random, y);
self.batch.add_commitment(aggregate.c, random);
self.batch.add_opening_value(*value, random);
}
let random: E::Fr = self.randomness_source.gen();
let mut expected_value = E::Fr::zero();
for ((_, advice), c_opening) in proofs.iter().zip(aggregate.c_openings.iter()) {
let mut r: E::Fr = transcript.get_challenge_scalar();
// expected value of the later opening
{
let mut tmp = c_opening.1;
tmp.mul_assign(&r);
expected_value.add_assign(&tmp);
}
r.mul_assign(&random);
self.batch.add_commitment(advice.s, r);
}
self.batch.add_opening_value(expected_value, random);
self.batch.add_opening(aggregate.s_opening, random, z);
}
/// Caller must ensure to add aggregate after adding a proof
pub fn add_proof_with_advice(
&mut self,
proof: &Proof<E>,
inputs: &[E::Fr],
advice: &SxyAdvice<E>,
)
{
let mut z = None;
self.add_proof(proof, inputs, |_z, _y| {
z = Some(_z);
Some(advice.szy)
});
let z = z.unwrap();
// We need to open up SxyAdvice.s at z using SxyAdvice.opening
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&advice.opening);
transcript.commit_point(&advice.s);
transcript.commit_scalar(&advice.szy);
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(advice.opening, random, z);
self.batch.add_commitment(advice.s, random);
self.batch.add_opening_value(advice.szy, random);
}
pub fn add_proof<F>(
&mut self,
proof: &Proof<E>,
inputs: &[E::Fr],
sxy: F
)
where F: FnOnce(E::Fr, E::Fr) -> Option<E::Fr>
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
let y: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&proof.t);
let z: E::Fr = transcript.get_challenge_scalar();
transcript.commit_scalar(&proof.rz);
transcript.commit_scalar(&proof.rzy);
let r1: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&proof.z_opening);
transcript.commit_point(&proof.zy_opening);
// First, the easy one. Let's open up proof.r at zy, using proof.zy_opening
// as the evidence and proof.rzy as the opening.
{
let random: E::Fr = self.randomness_source.gen();
let mut zy = z;
zy.mul_assign(&y);
self.batch.add_opening(proof.zy_opening, random, zy);
self.batch.add_commitment_max_n(proof.r, random);
self.batch.add_opening_value(proof.rzy, random);
}
// Now we need to compute t(z, y) with what we have. Let's compute k(y).
let mut ky = E::Fr::zero();
for (exp, input) in self.k_map.iter().zip(Some(E::Fr::one()).iter().chain(inputs.iter())) {
let mut term = y.pow(&[(*exp + self.n) as u64]);
term.mul_assign(input);
ky.add_assign(&term);
}
// Compute s(z, y)
let szy = sxy(z, y).unwrap_or_else(|| {
let mut tmp = SxEval::new(y, self.n);
S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
tmp.finalize(z)
// let mut tmp = SyEval::new(z, self.n, self.q);
// S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
// tmp.finalize(y)
});
// Finally, compute t(z, y)
// t(z, y) = (r(z, y) + s(z,y))*r(z, 1) - k(y)
let mut tzy = proof.rzy;
tzy.add_assign(&szy);
tzy.mul_assign(&proof.rz);
tzy.sub_assign(&ky);
// We open these both at the same time by keeping their commitments
// linearly independent (using r1).
{
let mut random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(proof.z_opening, random, z);
self.batch.add_opening_value(tzy, random);
self.batch.add_commitment(proof.t, random);
random.mul_assign(&r1);
self.batch.add_opening_value(proof.rz, random);
self.batch.add_commitment_max_n(proof.r, random);
}
}
pub fn get_k_map(&self) -> Vec<usize> {
return self.k_map.clone();
}
pub fn get_n(&self) -> usize {
return self.n;
}
pub fn get_q(&self) -> usize {
return self.q;
}
pub fn check_all(self) -> bool {
self.batch.check_all()
}
}
/// Check multiple proofs without aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_proofs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
proofs: &[Proof<E>],
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError> {
verify_proofs_on_srs::<E, C, S, R>(proofs, inputs, circuit, rng, ¶ms.srs)
}
/// Check multiple proofs without aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_proofs_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
proofs: &[Proof<E>],
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
srs: &SRS<E>,
) -> Result<bool, SynthesisError> {
let mut verifier = MultiVerifier::<E, C, S, R>::new(circuit, srs, rng)?;
let expected_inputs_size = verifier.get_k_map().len() - 1;
for (proof, inputs) in proofs.iter().zip(inputs.iter()) {
if inputs.len() != expected_inputs_size {
return Err(SynthesisError::Unsatisfiable);
}
verifier.add_proof(proof, &inputs, |_, _| None);
}
Ok(verifier.check_all())
}
/// Check multiple proofs with aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_aggregate<E: Engine, C: Circuit<E>, S: SynthesisDriver,R: Rng>(
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError> {
verify_aggregate_on_srs::<E, C, S, R>(proofs, aggregate, inputs, circuit, rng, ¶ms.srs)
}
/// Check multiple proofs with aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_aggregate_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
srs: &SRS<E>,
) -> Result<bool, SynthesisError> {
let mut verifier = MultiVerifier::<E, C, S, R>::new(circuit, srs, rng)?;
let expected_inputs_size = verifier.get_k_map().len() - 1;
for ((proof, advice), inputs) in proofs.iter().zip(inputs.iter()) {
if inputs.len() != expected_inputs_size {
return Err(SynthesisError::Unsatisfiable);
}
verifier.add_proof_with_advice(proof, &inputs, &advice);
}
verifier.add_aggregate(proofs, aggregate);
Ok(verifier.check_all())
}
<file_sep>/src/plonk/adaptor/alternative.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::SynthesisError;
use crate::plonk::cs::gates::Gate;
use crate::plonk::cs::gates::Coeff;
use crate::plonk::cs::gates::Variable as PlonkVariable;
use crate::plonk::cs::gates::Index as PlonkIndex;
use crate::plonk::cs::Circuit as PlonkCircuit;
use crate::plonk::cs::ConstraintSystem as PlonkConstraintSystem;
use std::marker::PhantomData;
use std::collections::{HashSet, HashMap};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MergeLcVariant {
AIsTheOnlyMeaningful,
BIsTheOnlyMeaningful,
MergeABWithConstantC,
MergeACThroughConstantB,
MergeBCThroughConstantA,
CIsTheOnlyMeaningful,
}
// These are transpilation options over A * B - C = 0 constraint
#[derive(Clone, PartialEq, Eq)]
pub enum TranspilationVariant<E: Engine> {
LeaveAsSingleVariable(E::Fr),
IntoQuandaticGate((E::Fr, E::Fr, E::Fr)),
IntoLinearGate((E::Fr, E::Fr)),
IntoSingleAdditionGate((E::Fr, E::Fr, E::Fr, E::Fr)),
IntoMultipleAdditionGates((E::Fr, E::Fr, E::Fr, E::Fr), Vec<E::Fr>),
MergeLinearCombinations((MergeLcVariant, E::Fr, Box<TranspilationVariant<E>>)),
IsConstant(E::Fr),
TransformLc(Box<(TranspilationVariant<E>, TranspilationVariant<E>, TranspilationVariant<E>)>)
}
impl<E: Engine> std::fmt::Debug for TranspilationVariant<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TranspilationVariant::LeaveAsSingleVariable(c) => {
writeln!(f, "Variant: leave LC as a single variable")?;
writeln!(f, "With coefficient {}", c)?;
},
TranspilationVariant::IntoQuandaticGate(c) => {
writeln!(f, "Variant: into quadratic gate")?;
writeln!(f, "{} + {} * x + {} * x^2", c.0, c.1, c.2)?;
},
TranspilationVariant::IntoLinearGate(c) => {
writeln!(f, "Variant: into linear gate")?;
writeln!(f, "{} + {} * x", c.0, c.1)?;
},
TranspilationVariant::IntoSingleAdditionGate(c) => {
writeln!(f, "Variant: into single addition gate")?;
writeln!(f, "{}*a + {}*b + {}*c + {} = 0", c.0, c.1, c.2, c.3)?;
},
TranspilationVariant::IntoMultipleAdditionGates(c, next) => {
writeln!(f, "Variant: into multiple addition gates")?;
writeln!(f, "{}*a + {}*b + {}*c + {} = 0", c.0, c.1, c.2, c.3)?;
writeln!(f, "{:?}", next)?;
},
TranspilationVariant::MergeLinearCombinations(c) => {
writeln!(f, "Variant: merge linear combinations")?;
writeln!(f, "Merge with hint: {:?}", c.0)?;
},
TranspilationVariant::IsConstant(c) => {
writeln!(f, "Variant: into constant factor {}", c)?;
},
TranspilationVariant::TransformLc(b) => {
writeln!(f, "Variant: into combinatoric transform LC")?;
writeln!(f, "A: {:?}", b.as_ref().0)?;
writeln!(f, "B: {:?}", b.as_ref().1)?;
writeln!(f, "C: {:?}", b.as_ref().2)?;
},
}
Ok(())
}
}
pub struct Transpiler<E: Engine> {
current_constraint_index: usize,
current_plonk_input_idx: usize,
current_plonk_aux_idx: usize,
scratch: HashSet<crate::cs::Variable>,
// deduplication_scratch: HashMap<crate::cs::Variable, E::Fr>,
deduplication_scratch: HashMap<crate::cs::Variable, usize>,
hints: Vec<(usize, TranspilationVariant<E>)>,
}
impl<E: Engine> Transpiler<E> {
pub fn new() -> Self {
Self {
current_constraint_index: 0,
current_plonk_input_idx: 1,
current_plonk_aux_idx: 0,
scratch: HashSet::with_capacity((E::Fr::NUM_BITS * 2) as usize),
deduplication_scratch: HashMap::with_capacity((E::Fr::NUM_BITS * 2) as usize),
hints: vec![],
}
}
pub fn into_hints(self) -> Vec<(usize, TranspilationVariant<E>)> {
self.hints
}
fn increment_lc_number(&mut self) -> usize {
let current_lc_number = self.current_constraint_index;
self.current_constraint_index += 1;
current_lc_number
}
fn enforce_lc_as_gates(
&mut self,
lc: LinearCombination<E>,
multiplier: E::Fr,
free_term_constant: E::Fr
) -> TranspilationVariant<E> {
// let zero_fr = E::Fr::zero();
let one_fr = E::Fr::one();
let (lc, mut constant_coeff) = split_constant_term::<E, Self>(lc);
let (contains_constant, num_linear_terms) = num_unique_values::<E, Self>(&lc, &mut self.scratch);
assert!(!contains_constant, "must have split constant term before");
assert!(num_linear_terms > 0);
// if num_linear_terms == 1 && free_term_constant == zero_fr && constant_coeff == zero_fr {
// let (_existing_var, mut coeff) = lc.as_ref()[0];
// coeff.mul_assign(&multiplier);
// let hint = TranspilationVariant::<E>::LeaveAsSingleVariable(coeff);
// return hint
// }
// else if num_linear_terms == 1 && (contains_constant || free_term_constant != zero_fr) {
// let (_, mut constant_coeff) = get_constant_term::<E, Self>(&lc);
// let (_, single_var, mut linear_coeff) = get_first_variable_with_coeff::<E, Self>(&lc);
// linear_coeff.mul_assign(&multiplier);
// constant_coeff.mul_assign(&multiplier);
// constant_coeff.sub_assign(&free_term_constant);
// let hint = TranspilationVariant::<E>::IntoSingleAdditionGate((linear_coeff, zero_fr, zero_fr, constant_coeff));
// return hint;
// }
// else
if num_linear_terms <= 3 {
let (mut a_coef, mut b_coef, mut c_coef) = rewrite_lc_into_single_enforcement_gate(&lc, self, &mut (self.scratch.clone()));
// we've made a sinlge addition gate, but we may need to scale it (my multipler)
// and also account for a RHS in a form of = constant_coeff
if multiplier == E::Fr::zero() {
assert!(free_term_constant == E::Fr::zero());
unreachable!();
// it's a constraint 0 * LC = 0
} else {
//scale
if multiplier != one_fr {
a_coef.mul_assign(&multiplier);
b_coef.mul_assign(&multiplier);
c_coef.mul_assign(&multiplier);
constant_coeff.mul_assign(&multiplier);
}
constant_coeff.sub_assign(&free_term_constant);
}
let hint = TranspilationVariant::<E>::IntoSingleAdditionGate((a_coef, b_coef, c_coef, constant_coeff));
return hint;
} else {
let (first_gate, mut other_coefs) = rewrite_lc_into_multiple_enforcement_gates(&lc, self, &mut (self.scratch.clone()));
let (mut a_coef, mut b_coef, mut c_coef) = first_gate;
if multiplier == E::Fr::zero() {
assert!(free_term_constant == E::Fr::zero());
// it's a constraint 0 * LC = 0
} else {
//scale
if multiplier != one_fr {
a_coef.mul_assign(&multiplier);
b_coef.mul_assign(&multiplier);
c_coef.mul_assign(&multiplier);
constant_coeff.mul_assign(&multiplier);
for c in other_coefs.iter_mut() {
c.mul_assign(&multiplier);
}
}
constant_coeff.sub_assign(&free_term_constant);
}
let hint = TranspilationVariant::<E>::IntoMultipleAdditionGates((a_coef, b_coef, c_coef, constant_coeff), other_coefs);
return hint;
}
}
fn rewrite_lc(&mut self, lc: &LinearCombination<E>, multiplier: E::Fr, free_term_constant: E::Fr) -> (Variable, TranspilationVariant<E>) {
let zero_fr = E::Fr::zero();
let one_fr = E::Fr::one();
let (contains_constant, num_linear_terms) = num_unique_values::<E, Self>(&lc, &mut self.scratch);
assert!(num_linear_terms > 0);
if num_linear_terms == 1 && !contains_constant && free_term_constant == zero_fr {
let (existing_var, mut coeff) = lc.as_ref()[0];
coeff.mul_assign(&multiplier);
let hint = TranspilationVariant::<E>::LeaveAsSingleVariable(coeff);
return (existing_var, hint);
} else if num_linear_terms <= 2 {
let (new_var, (mut a_coef, mut b_coef, mut c_coef, mut constant_coeff)) = rewrite_lc_into_single_addition_gate(&lc, self, &mut (self.scratch.clone()));
// we've made a sinlge addition gate, but we may need to scale it (my multipler)
// and also account for a RHS in a form of = constant_coeff
if multiplier == E::Fr::zero() {
assert!(free_term_constant == E::Fr::zero());
unreachable!();
// it's a constraint 0 * LC = 0
} else {
//scale
if multiplier != one_fr {
a_coef.mul_assign(&multiplier);
b_coef.mul_assign(&multiplier);
c_coef.mul_assign(&multiplier);
constant_coeff.mul_assign(&multiplier);
}
constant_coeff.sub_assign(&free_term_constant);
}
let hint = TranspilationVariant::<E>::IntoSingleAdditionGate((a_coef, b_coef, c_coef, constant_coeff));
return (new_var, hint);
} else {
let (new_var, first_gate, mut other_coefs) = rewrite_lc_into_multiple_addition_gates(&lc, self, &mut (self.scratch.clone()));
let (mut a_coef, mut b_coef, mut c_coef, mut constant_coeff) = first_gate;
if multiplier == E::Fr::zero() {
assert!(free_term_constant == E::Fr::zero());
// it's a constraint 0 * LC = 0
} else {
//scale
if multiplier != one_fr {
a_coef.mul_assign(&multiplier);
b_coef.mul_assign(&multiplier);
c_coef.mul_assign(&multiplier);
constant_coeff.mul_assign(&multiplier);
for c in other_coefs.iter_mut() {
c.mul_assign(&multiplier);
}
}
constant_coeff.sub_assign(&free_term_constant);
}
let hint = TranspilationVariant::<E>::IntoMultipleAdditionGates((a_coef, b_coef, c_coef, constant_coeff), other_coefs);
return (new_var, hint);
}
}
}
impl<E: Engine> crate::ConstraintSystem<E> for Transpiler<E>
{
type Root = Self;
fn one() -> crate::Variable {
crate::Variable::new_unchecked(crate::Index::Input(0))
}
fn alloc<F, A, AR>(&mut self, _: A, _f: F) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.current_plonk_aux_idx += 1;
Ok(crate::Variable::new_unchecked(crate::Index::Aux(self.current_plonk_aux_idx)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_f: F,
) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// we do +1 to not encounter Input(0)
self.current_plonk_input_idx += 1;
Ok(crate::Variable::new_unchecked(crate::Index::Input(self.current_plonk_input_idx)))
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _ann: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
{
// let ann: String = _ann().into();
// println!("Enforce {}", ann);
let zero_fr = E::Fr::zero();
let one_fr = E::Fr::one();
let mut negative_one_fr = E::Fr::one();
negative_one_fr.negate();
// we need to determine the type of transformation constraint
// let's handle trivial cases first
// A or B or C are just constant terms
let a = deduplicate_stable::<E, Self>(a(crate::LinearCombination::zero()), &mut self.deduplication_scratch);
let b = deduplicate_stable::<E, Self>(b(crate::LinearCombination::zero()), &mut self.deduplication_scratch);
let c = deduplicate_stable::<E, Self>(c(crate::LinearCombination::zero()), &mut self.deduplication_scratch);
let (a_is_constant, a_constant_coeff) = is_constant::<E, Self>(&a);
let (b_is_constant, b_constant_coeff) = is_constant::<E, Self>(&b);
let (c_is_constant, c_constant_coeff) = is_constant::<E, Self>(&c);
match (a_is_constant, b_is_constant, c_is_constant) {
(true, true, true) => {
unreachable!("R1CS has a gate 1 * 1 = 1");
},
(true, false, true) | (false, true, true) => {
// we have something like c0 * LC = c1
let lc = if !a_is_constant {
a
} else if !b_is_constant {
b
} else {
unreachable!("Either A or B LCs are constant");
};
let multiplier = if a_is_constant {
a_constant_coeff
} else if b_is_constant {
b_constant_coeff
} else {
unreachable!("Must take multiplier from A or B");
};
let hint_lc = self.enforce_lc_as_gates(lc, multiplier, c_constant_coeff);
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::MergeLinearCombinations((MergeLcVariant::MergeACThroughConstantB, one_fr, Box::new(hint_lc)));
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
},
(false, false, true) => {
// potential quadatic gate
let (is_quadratic_gate, coeffs) = is_quadratic_gate::<E, Self>(&a, &b, &c, &mut self.scratch);
if is_quadratic_gate {
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::IntoQuandaticGate(coeffs);
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
return;
}
let (_new_a_var, hint_a) = self.rewrite_lc(&a, one_fr, zero_fr);
let (_new_b_var, hint_b) = self.rewrite_lc(&b, one_fr, zero_fr);
let current_lc_number = self.increment_lc_number();
let hint_c = TranspilationVariant::<E>::IsConstant(c_constant_coeff);
let hint = TranspilationVariant::<E>::TransformLc(Box::new((hint_a, hint_b, hint_c)));
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
},
(true, false, false) | (false, true, false) => {
// LC * 1 = LC
let multiplier = if a_is_constant {
a_constant_coeff
} else if b_is_constant {
b_constant_coeff
} else {
unreachable!()
};
let lc_variant = if a_is_constant {
MergeLcVariant::MergeBCThroughConstantA
} else {
MergeLcVariant::MergeACThroughConstantB
};
if multiplier == zero_fr {
// LC_AB * 0 = LC_C => LC_C == 0
let hint_lc = self.enforce_lc_as_gates(c, one_fr, zero_fr);
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::MergeLinearCombinations((MergeLcVariant::CIsTheOnlyMeaningful, one_fr, Box::new(hint_lc)));
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
return;
}
let mut final_lc = if !a_is_constant {
a
} else if !b_is_constant {
b
} else {
unreachable!()
};
if multiplier != one_fr {
for (_, c) in final_lc.0.iter_mut() {
c.mul_assign(&multiplier);
}
}
// let final_lc = final_lc - &c;
let final_lc = subtract_lcs_with_dedup_stable::<E, Self>(final_lc, c, &mut self.deduplication_scratch);
// we rewrite final LC taking into account the constant inside and no external constant
let hint_lc = self.enforce_lc_as_gates(final_lc, one_fr, zero_fr);
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::MergeLinearCombinations((lc_variant, one_fr, Box::new(hint_lc)));
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
return;
},
(true, true, false) => {
// A and B are some constants
let mut final_constant = a_constant_coeff;
final_constant.mul_assign(&b_constant_coeff);
let hint_lc = self.enforce_lc_as_gates(c, one_fr, final_constant);
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::MergeLinearCombinations((MergeLcVariant::CIsTheOnlyMeaningful, one_fr, Box::new(hint_lc)));
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
},
(false, false, false) => {
// potentially it can still be quadratic
let (is_quadratic_gate, coeffs) = is_quadratic_gate::<E, Self>(&a, &b, &c, &mut self.scratch);
if is_quadratic_gate {
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::IntoQuandaticGate(coeffs);
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
return;
}
// rewrite into addition gates and multiplication gates
let (_new_a_var, hint_a) = self.rewrite_lc(&a, one_fr, zero_fr);
let (_new_b_var, hint_b) = self.rewrite_lc(&b, one_fr, zero_fr);
let (_new_c_var, hint_c) = self.rewrite_lc(&c, one_fr, zero_fr);
let current_lc_number = self.increment_lc_number();
let hint = TranspilationVariant::<E>::TransformLc(Box::new((hint_a, hint_b, hint_c)));
// println!("Hint = {:?}", hint);
self.hints.push((current_lc_number, hint));
}
}
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
// List of heuristics
use crate::{LinearCombination, ConstraintSystem, Variable};
fn is_quadratic_gate<E: Engine, CS: ConstraintSystem<E>>(
a: &LinearCombination<E>,
b: &LinearCombination<E>,
c: &LinearCombination<E>,
scratch: &mut HashSet::<crate::cs::Variable>
) -> (bool, (E::Fr, E::Fr, E::Fr)) {
let zero = E::Fr::zero();
let (_a_containts_constant, a_constant_coeff) = get_constant_term::<E, CS>(&a);
let (_b_containts_constant, b_constant_coeff) = get_constant_term::<E, CS>(&b);
let (_c_containts_constant, c_constant_coeff) = get_constant_term::<E, CS>(&c);
let (a_is_linear, a_linear_var, a_linear_var_coeff) = is_linear_term::<E, CS>(&a, scratch);
let (b_is_linear, b_linear_var, b_linear_var_coeff) = is_linear_term::<E, CS>(&b, scratch);
let (c_is_linear, c_linear_var, c_linear_var_coeff) = is_linear_term::<E, CS>(&c, scratch);
let (c_is_constant, _) = is_constant::<E, CS>(&c);
let is_quadratic;
if c_is_constant {
is_quadratic = a_is_linear && b_is_linear && a_linear_var == b_linear_var;
} else {
if a_is_linear && b_is_linear && c_is_linear && a_linear_var == b_linear_var && b_linear_var == c_linear_var {
is_quadratic = true;
} else {
return (false, (zero, zero, zero));
}
}
if is_quadratic {
// something like (v - 1) * (v - 1) = (v - 1)
// and we can make a quadratic gate
let mut quadratic_term = a_linear_var_coeff;
quadratic_term.mul_assign(&b_linear_var_coeff);
let mut linear_term_0 = a_constant_coeff;
linear_term_0.mul_assign(&b_linear_var_coeff);
let mut linear_term_1 = b_constant_coeff;
linear_term_1.mul_assign(&a_linear_var_coeff);
let mut linear_term = linear_term_0;
linear_term.add_assign(&linear_term_1);
if c_is_linear {
linear_term.sub_assign(&c_linear_var_coeff);
}
let mut constant_term = a_constant_coeff;
constant_term.mul_assign(&b_constant_coeff);
if c_constant_coeff != zero {
constant_term.sub_assign(&c_constant_coeff);
}
return (true, (constant_term, linear_term, quadratic_term));
}
(false, (zero, zero, zero))
}
fn is_constant<E: Engine, CS: ConstraintSystem<E>>(lc: &LinearCombination<E>) -> (bool, E::Fr) {
// formally it's an empty LC, so it's a constant 0
if lc.as_ref().len() == 0 {
return (true, E::Fr::zero());
}
let result = get_constant_term::<E, CS>(&lc);
if result.0 && lc.as_ref().len() == 1 {
return result;
}
(false, E::Fr::zero())
}
fn get_constant_term<E: Engine, CS: ConstraintSystem<E>>(lc: &LinearCombination<E>) -> (bool, E::Fr) {
let cs_one = CS::one();
for (var, coeff) in lc.as_ref().iter() {
if var == &cs_one {
return (true, *coeff);
}
}
(false, E::Fr::zero())
}
fn get_first_variable<E: Engine, CS: ConstraintSystem<E>>(lc: &LinearCombination<E>) -> (bool, Variable) {
let cs_one = CS::one();
for (var, _) in lc.as_ref().iter() {
if var != &cs_one {
return (true, *var);
}
}
(false, cs_one)
}
fn get_first_variable_with_coeff<E: Engine, CS: ConstraintSystem<E>>(lc: &LinearCombination<E>) -> (bool, Variable, E::Fr) {
let cs_one = CS::one();
for (var, coeff) in lc.as_ref().iter() {
if var != &cs_one {
return (true, *var, *coeff);
}
}
(false, cs_one, E::Fr::zero())
}
fn num_unique_values<E: Engine, CS: ConstraintSystem<E>>(lc: &LinearCombination<E>, scratch: &mut HashSet::<crate::cs::Variable>) -> (bool, usize) {
let cs_one = CS::one();
debug_assert!(scratch.is_empty());
let mut contains_constant = false;
for (var, _) in lc.as_ref().iter() {
if var != &cs_one {
scratch.insert(*var);
} else {
contains_constant = true;
}
}
let num_unique_without_constant = scratch.len();
scratch.clear();
(contains_constant, num_unique_without_constant)
}
fn is_linear_term<E: Engine, CS: ConstraintSystem<E>>(lc: &LinearCombination<E>, scratch: &mut HashSet::<crate::cs::Variable>) -> (bool, Variable, E::Fr) {
let cs_one = CS::one();
debug_assert!(scratch.is_empty());
let mut linear_coeff = E::Fr::zero();
for (var, coeff) in lc.as_ref().iter() {
if var != &cs_one {
scratch.insert(*var);
linear_coeff = *coeff;
}
}
let num_unique_without_constant = scratch.len();
if num_unique_without_constant == 1 {
let terms: Vec<_> = scratch.drain().collect();
let term = terms[0];
return (true, term, linear_coeff)
} else {
scratch.clear();
return (false, cs_one, E::Fr::zero())
}
}
fn rewrite_lc_into_single_enforcement_gate<E: Engine, CS: ConstraintSystem<E>>(
lc: &LinearCombination<E>,
_cs: &mut CS,
scratch: &mut HashSet<crate::cs::Variable>
) -> (E::Fr, E::Fr, E::Fr) {
let (_contains_constant, num_linear_terms) = num_unique_values::<E, CS>(&lc, scratch);
assert!(num_linear_terms > 0 && num_linear_terms <= 3);
assert!(!_contains_constant);
// this linear combination has only 2 non-constant terms that
// we can just make an addition gate
let cs_one = CS::one();
let mut found_a = false;
let mut found_b = false;
let mut a_coeff = E::Fr::zero();
let mut b_coeff = E::Fr::zero();
let mut c_coeff = E::Fr::zero();
let it = lc.as_ref().iter();
for (var, coeff) in it {
if var == &cs_one {
panic!("must not encounter constant terms here!");
} else {
if !found_a {
found_a = true;
a_coeff = *coeff;
} else if !found_b {
found_b = true;
b_coeff = *coeff;
} else {
c_coeff = *coeff;
}
}
}
(a_coeff, b_coeff, c_coeff)
}
fn rewrite_lc_into_multiple_enforcement_gates<E: Engine, CS: ConstraintSystem<E>>(
lc: &LinearCombination<E>,
cs: &mut CS,
scratch: &mut HashSet<crate::cs::Variable>
) -> ((E::Fr, E::Fr, E::Fr), Vec<E::Fr>) // first rewrite is full, than it's Z + a * X - Y = 0
{
// assert!(lc.as_ref().len() > 3);
// let (_contains_constant, num_linear_terms) = num_unique_values::<E, CS>(&lc, scratch);
// assert!(num_linear_terms > 3);
// // we can just make an addition gate
// let cs_one = CS::one();
// let (_, constant_term) = get_constant_term::<E, CS>(&lc);
// let mut found_a = false;
// let mut found_b = false;
// let mut a_coeff = E::Fr::zero();
// let mut b_coeff = E::Fr::zero();
// let mut c_coeff = E::Fr::zero();
// let mut it = lc.as_ref().iter();
// for (var, coeff) in &mut it {
// if var == &cs_one {
// continue;
// } else {
// if !found_a {
// found_a = true;
// a_coeff = *coeff;
// } else if !found_b {
// found_b = true;
// b_coeff = *coeff;
// } else {
// c_coeff = *coeff;
// }
// }
// }
// // we've consumed 3 values
// let first_addition_gate = (a_coeff, b_coeff, c_coeff, constant_term);
// let mut extra_coefficients = Vec::with_capacity(num_linear_terms - 3);
// for (var, coeff) in it {
// if var != &cs_one {
// extra_coefficients.push(*coeff);
// }
// }
// (first_addition_gate, extra_coefficients)
assert!(lc.as_ref().len() > 3);
let (_contains_constant, num_linear_terms) = num_unique_values::<E, CS>(&lc, scratch);
assert!(num_linear_terms > 3);
// we can just make an addition gate
let cs_one = CS::one();
let mut found_a = false;
let mut a_coeff = E::Fr::zero();
let mut b_coeff = E::Fr::zero();
let mut it = lc.as_ref().iter();
for (var, coeff) in &mut it {
if var != &cs_one {
if !found_a {
found_a = true;
a_coeff = *coeff;
} else {
b_coeff = *coeff;
break;
}
} else {
panic!("Must not encounter constant here");
}
}
// we've consumed 2 values
let mut c_coeff = E::Fr::one();
c_coeff.negate();
let _new_var = cs.alloc(|| "allocate addition gate",
|| {
unreachable!()
}
).expect("must allocate an extra variable");
let first_addition_gate = (a_coeff, b_coeff, c_coeff);
// let mut extra_coefficients = Vec::with_capacity(lc.as_ref().len() - 2);
// for (var, coeff) in it {
// if var != &cs_one {
// extra_coefficients.push(*coeff);
// new_var = cs.alloc(|| "allocate addition gate",
// || {
// unreachable!()
// }
// ).expect("must allocate an extra variable");
// }
// }
// (first_addition_gate, extra_coefficients)
// next gates are 1*old_new_var + b*original_var - new_new_var = 0
let mut extra_coefficients = Vec::with_capacity(lc.as_ref().len() - 2);
let cycle_len = it.len();
assert!(cycle_len > 1); // otherwise we could have made one gate
let mut gates_created = 0;
loop {
let (var, coeff) = it.next().expect("there should be a chain variable");
if var != &cs_one {
if gates_created != cycle_len - 2 {
extra_coefficients.push(*coeff);
let _new_var = cs.alloc(|| "allocate addition gate",
|| {
unreachable!()
}
).expect("must allocate an extra variable");
gates_created += 1;
} else {
let (_last_var, last_coeff) = it.next().expect("there should be a last chain variable");
extra_coefficients.push(*coeff);
extra_coefficients.push(*last_coeff);
break;
}
} else {
panic!("Cycle mismatch: constant term must have been split before");
}
}
(first_addition_gate, extra_coefficients)
}
fn rewrite_lc_into_single_addition_gate<E: Engine, CS: ConstraintSystem<E>>(
lc: &LinearCombination<E>,
cs: &mut CS,
scratch: &mut HashSet<crate::cs::Variable>
) -> (Variable, (E::Fr, E::Fr, E::Fr, E::Fr)) {
let (_contains_constant, num_linear_terms) = num_unique_values::<E, CS>(&lc, scratch);
assert!(num_linear_terms > 0 && num_linear_terms <= 3);
// this linear combination has only 2 non-constant terms that
// we can just make an addition gate
let cs_one = CS::one();
let mut constant_term = E::Fr::zero();
let mut found_a = false;
let mut a_coeff = E::Fr::zero();
let mut b_coeff = E::Fr::zero();
let it = lc.as_ref().iter();
for (var, coeff) in it {
if var == &cs_one {
constant_term = *coeff;
} else {
if !found_a {
found_a = true;
a_coeff = *coeff;
} else {
b_coeff = *coeff;
}
}
}
let mut c_coeff = E::Fr::one();
c_coeff.negate();
let new_var = cs.alloc(|| "allocate addition gate",
|| {
unreachable!()
}).expect("must allocate an extra variable");
(new_var, (a_coeff, b_coeff, c_coeff, constant_term))
}
fn rewrite_lc_into_multiple_addition_gates<E: Engine, CS: ConstraintSystem<E>>(
lc: &LinearCombination<E>,
cs: &mut CS,
scratch: &mut HashSet<crate::cs::Variable>
) -> (Variable, (E::Fr, E::Fr, E::Fr, E::Fr), Vec<E::Fr>) // first rewrite is full, than it's Z + a * X - Y = 0
{
assert!(lc.as_ref().len() > 2);
let (_contains_constant, num_linear_terms) = num_unique_values::<E, CS>(&lc, scratch);
assert!(num_linear_terms > 2);
// we can just make an addition gate
let cs_one = CS::one();
let (_, constant_term) = get_constant_term::<E, CS>(&lc);
let mut found_a = false;
let mut a_coeff = E::Fr::zero();
let mut b_coeff = E::Fr::zero();
let mut it = lc.as_ref().iter();
for (var, coeff) in &mut it {
if var != &cs_one {
if !found_a {
found_a = true;
a_coeff = *coeff;
} else {
b_coeff = *coeff;
break;
}
}
}
// we've consumed 2 values
let mut c_coeff = E::Fr::one();
c_coeff.negate();
let mut new_var = cs.alloc(|| "allocate addition gate",
|| {
unreachable!()
}
).expect("must allocate an extra variable");
let first_addition_gate = (a_coeff, b_coeff, c_coeff, constant_term);
let mut extra_coefficients = Vec::with_capacity(lc.as_ref().len() - 2);
for (var, coeff) in it {
if var != &cs_one {
extra_coefficients.push(*coeff);
new_var = cs.alloc(|| "allocate addition gate",
|| {
unreachable!()
}
).expect("must allocate an extra variable");
}
}
(new_var, first_addition_gate, extra_coefficients)
}
fn deduplicate<E: Engine, CS: ConstraintSystem<E>>(
lc: LinearCombination<E>,
scratch: &mut HashMap<crate::cs::Variable, E::Fr>
) -> LinearCombination<E> {
assert!(scratch.is_empty());
for (var, coeff) in lc.0.into_iter() {
if let Some(existing_coeff) = scratch.get_mut(&var) {
existing_coeff.add_assign(&coeff);
} else {
scratch.insert(var, coeff);
}
}
let as_vec: Vec<(Variable, E::Fr)> = scratch.drain().collect();
LinearCombination(as_vec)
}
fn deduplicate_stable<E: Engine, CS: ConstraintSystem<E>>(
lc: LinearCombination<E>,
scratch: &mut HashMap<crate::cs::Variable, usize>
) -> LinearCombination<E> {
assert!(scratch.is_empty());
if lc.as_ref().len() == 0 {
return lc;
}
let mut deduped_vec: Vec<(crate::cs::Variable, E::Fr)> = Vec::with_capacity(lc.as_ref().len());
for (var, coeff) in lc.0.into_iter() {
if let Some(existing_index) = scratch.get(&var) {
let (_, c) = &mut deduped_vec[*existing_index];
c.add_assign(&coeff);
} else {
let new_idx = deduped_vec.len();
deduped_vec.push((var, coeff));
scratch.insert(var, new_idx);
}
}
// let _initial_len = deduped_vec.len();
deduped_vec = deduped_vec.into_iter().filter(|(_var, coeff)| !coeff.is_zero()).collect();
// let _final_len = deduped_vec.len();
// if _initial_len != _final_len {
// println!("Encountered constraint with zero coeff for variable!");
// }
// assert!(deduped_vec.len() != 0);
scratch.clear();
LinearCombination(deduped_vec)
}
fn subtract_lcs_with_dedup_stable<E: Engine, CS: ConstraintSystem<E>>(
lc_0: LinearCombination<E>,
lc_1: LinearCombination<E>,
scratch: &mut HashMap<crate::cs::Variable, usize>
) -> LinearCombination<E> {
assert!(scratch.is_empty());
if lc_0.as_ref().len() == 0 && lc_1.as_ref().len() == 0{
return lc_0;
}
let mut deduped_vec: Vec<(crate::cs::Variable, E::Fr)> = Vec::with_capacity(lc_0.as_ref().len() + lc_1.as_ref().len());
for (var, coeff) in lc_0.0.into_iter() {
if let Some(existing_index) = scratch.get(&var) {
let (_, c) = &mut deduped_vec[*existing_index];
c.add_assign(&coeff);
} else {
let new_idx = deduped_vec.len();
deduped_vec.push((var, coeff));
scratch.insert(var, new_idx);
}
}
for (var, coeff) in lc_1.0.into_iter() {
if let Some(existing_index) = scratch.get(&var) {
let (_, c) = &mut deduped_vec[*existing_index];
c.sub_assign(&coeff);
} else {
let new_idx = deduped_vec.len();
let mut coeff_negated = coeff;
coeff_negated.negate();
deduped_vec.push((var, coeff_negated));
scratch.insert(var, new_idx);
}
}
// let _initial_len = deduped_vec.len();
deduped_vec = deduped_vec.into_iter().filter(|(_var, coeff)| !coeff.is_zero()).collect();
// let _final_len = deduped_vec.len();
// if _initial_len != _final_len {
// println!("Encountered constraint with zero coeff for variable!");
// }
// assert!(deduped_vec.len() != 0);
scratch.clear();
LinearCombination(deduped_vec)
}
fn split_constant_term<E: Engine, CS: ConstraintSystem<E>>(
mut lc: LinearCombination<E>,
) -> (LinearCombination<E>, E::Fr) {
if lc.as_ref().len() == 0 {
return (lc, E::Fr::zero());
}
let mut idx = None;
let cs_one = CS::one();
let mut constant_coeff = E::Fr::zero();
for (i, (var, coeff)) in lc.0.iter().enumerate() {
if var == &cs_one {
idx = Some(i);
constant_coeff = *coeff;
break;
}
}
if let Some(idx) = idx {
let _ = lc.0.swap_remove(idx);
return (lc, constant_coeff);
} else {
return (lc, constant_coeff);
}
}
pub struct Adaptor<'a, E: Engine, CS: PlonkConstraintSystem<E> + 'a> {
cs: &'a mut CS,
hints: &'a Vec<(usize, TranspilationVariant<E>)>,
current_constraint_index: usize,
current_hint_index: usize,
scratch: HashSet<crate::cs::Variable>,
// deduplication_scratch: HashMap<crate::cs::Variable, E::Fr>,
deduplication_scratch: HashMap<crate::cs::Variable, usize>,
}
impl<'a, E: Engine, CS: PlonkConstraintSystem<E> + 'a> Adaptor<'a, E, CS> {
// fn get_next_hint(&mut self) -> &(usize, TranspilationVariant<E>) {
// let current_hint_index = self.current_hint_index;
// let expected_constraint_index = self.current_constraint_index;
// let next_hint = &self.hints[current_hint_index];
// assert!(next_hint.0 == expected_constraint_index);
// self.current_hint_index += 1;
// self.current_constraint_index += 1;
// next_hint
// }
fn get_next_hint(&mut self) -> (usize, TranspilationVariant<E>) {
let current_hint_index = self.current_hint_index;
let expected_constraint_index = self.current_constraint_index;
let next_hint = self.hints[current_hint_index].clone();
assert!(next_hint.0 == expected_constraint_index);
self.current_hint_index += 1;
self.current_constraint_index += 1;
next_hint
}
// make a new variable based on existing ones
fn make_single_addition_gate(&mut self, lc: &LinearCombination<E>, gate_coeffs: (E::Fr, E::Fr, E::Fr, E::Fr)) -> Result<PlonkVariable, SynthesisError> {
let zero_fr = E::Fr::zero();
let mut minus_one_fr = E::Fr::one();
minus_one_fr.negate();
let (a_coeff, b_coeff, c_coeff, constant_coeff) = gate_coeffs;
assert!(c_coeff == minus_one_fr);
// we can just make an addition gate
let cs_one = Self::one();
let it = lc.as_ref().iter();
if b_coeff.is_zero() {
let mut a_var = PlonkVariable::new_unchecked(PlonkIndex::Aux(0));
for (var, _) in it {
if var == &cs_one {
continue
} else {
a_var = convert_variable(*var);
break;
}
}
let a_value = self.cs.get_value(a_var);
let new_var = self.cs.alloc(
|| {
let mut c_value = a_value?;
c_value.mul_assign(&a_coeff);
c_value.add_assign(&constant_coeff);
// c_value.negate();
Ok(c_value)
// c = constant + a*a_coeff
})?;
self.cs.new_gate((a_var, self.cs.get_dummy_variable(), new_var), (a_coeff, b_coeff, c_coeff, zero_fr, constant_coeff))?;
Ok(new_var)
} else {
let mut a_var = self.cs.get_dummy_variable();
let mut b_var = self.cs.get_dummy_variable();
let mut found_a = false;
for (var, _) in it {
if var == &cs_one {
continue
} else {
if !found_a {
found_a = true;
a_var = convert_variable(*var);
} else {
b_var = convert_variable(*var);
break;
}
}
}
let a_value = self.cs.get_value(a_var);
let b_value = self.cs.get_value(b_var);
let new_var = self.cs.alloc(
|| {
let a_value = a_value?;
let mut b_value = b_value?;
b_value.mul_assign(&b_coeff);
let mut c_value = a_value;
c_value.mul_assign(&a_coeff);
c_value.add_assign(&b_value);
c_value.add_assign(&constant_coeff);
// c_value.negate();
Ok(c_value)
// c = - constant - a*a_coeff - b*b_coeff
})?;
self.cs.new_gate((a_var, b_var, new_var), (a_coeff, b_coeff, c_coeff, zero_fr, constant_coeff))?;
Ok(new_var)
}
}
// make a new variable based on existing ones
fn enforce_lc_with_single_addition_gate(
&mut self,
lc: LinearCombination<E>,
gate_coeffs: (E::Fr, E::Fr, E::Fr, E::Fr)
) -> Result<(), SynthesisError> {
let zero_fr = E::Fr::zero();
let mut minus_one_fr = E::Fr::one();
minus_one_fr.negate();
let (lc, _const) = split_constant_term::<E, Self>(lc);
let (a_coeff, b_coeff, c_coeff, constant_coeff) = gate_coeffs;
// assert!(_const == constant_coeff);
// we can just make an addition gate
let cs_one = Self::one();
let it = lc.as_ref().iter();
let mut found_a = false;
let mut found_b = false;
let need_b = !b_coeff.is_zero();
let need_c = !c_coeff.is_zero();
let mut a_var = self.cs.get_dummy_variable();
let mut b_var = self.cs.get_dummy_variable();
let mut c_var = self.cs.get_dummy_variable();
for (var, _) in it {
if var != &cs_one {
if !found_a {
found_a = true;
a_var = convert_variable(*var);
} else if need_b && !found_b{
found_b = true;
b_var = convert_variable(*var);
} else if need_c {
c_var = convert_variable(*var);
} else {
break;
}
} else {
panic!("must not encounter constant term when enforcing a linear combination");
}
}
self.cs.new_gate((a_var, b_var, c_var), (a_coeff, b_coeff, c_coeff, zero_fr, constant_coeff))?;
Ok(())
}
// make a new variable based on existing ones
fn make_chain_of_addition_gates(
&mut self,
lc: &LinearCombination<E>,
first_gate_coeffs: (E::Fr, E::Fr, E::Fr, E::Fr),
chain_coeffs: Vec<E::Fr>
) -> Result<PlonkVariable, SynthesisError> {
let zero_fr = E::Fr::zero();
let one_fr = E::Fr::one();
let mut minus_one_fr = E::Fr::one();
minus_one_fr.negate();
let (a_coeff, b_coeff, c_coeff, constant_coeff) = first_gate_coeffs;
assert!(c_coeff == minus_one_fr);
if b_coeff.is_zero() {
return Err(SynthesisError::Unsatisfiable);
}
// assert!(!b_coeff.is_zero());
// we can just make an addition gate
let cs_one = Self::one();
let mut it = lc.as_ref().iter();
let mut new_var = if b_coeff.is_zero() {
unreachable!()
} else {
let mut a_var = self.cs.get_dummy_variable();
let mut b_var = self.cs.get_dummy_variable();
let mut found_a = false;
for (var, _) in &mut it {
if var == &cs_one {
continue
} else {
if !found_a {
found_a = true;
a_var = convert_variable(*var);
} else {
b_var = convert_variable(*var);
break;
}
}
}
let a_value = self.cs.get_value(a_var);
let b_value = self.cs.get_value(b_var);
let new_var = self.cs.alloc(
|| {
let a_value = a_value?;
let mut b_value = b_value?;
b_value.mul_assign(&b_coeff);
let mut c_value = a_value;
c_value.mul_assign(&a_coeff);
c_value.add_assign(&b_value);
c_value.add_assign(&constant_coeff);
// c_value.negate();
Ok(c_value)
// c = - constant - a*a_coeff - b*b_coeff
})?;
self.cs.new_gate((a_var, b_var, new_var), (a_coeff, b_coeff, c_coeff, zero_fr, constant_coeff))?;
new_var
};
// next gates are 1*old_new_var + b*original_var - new_new_var = 0
let mut chain_iter = chain_coeffs.into_iter();
for (var, _)in &mut it {
if var != &cs_one {
let hint_coeff = chain_iter.next().expect("chain coefficient must exist");
let original_var = convert_variable(*var);
let original_var_value = self.cs.get_value(original_var);
let new_var_value = self.cs.get_value(new_var);
let old_new_var = new_var;
new_var = self.cs.alloc(
|| {
let mut new = original_var_value?;
new.mul_assign(&hint_coeff);
new.add_assign(&new_var_value?);
Ok(new)
})?;
self.cs.new_gate((old_new_var, original_var, new_var), (one_fr, hint_coeff, minus_one_fr, zero_fr, zero_fr))?;
}
}
assert!(chain_iter.next().is_none());
Ok(new_var)
}
// make a new variable based on existing ones
fn enforce_lc_using_chain_of_addition_gates(
&mut self,
lc: LinearCombination<E>,
first_gate_coeffs: (E::Fr, E::Fr, E::Fr, E::Fr),
chain_coeffs: Vec<E::Fr>
) -> Result<(), SynthesisError> {
let zero_fr = E::Fr::zero();
let one_fr = E::Fr::one();
let mut minus_one_fr = E::Fr::one();
minus_one_fr.negate();
let (lc, _const) = split_constant_term::<E, Self>(lc);
let (a_coeff, b_coeff, c_coeff, constant_coeff) = first_gate_coeffs;
// assert!(_const == constant_coeff);
assert!(c_coeff == minus_one_fr);
if b_coeff.is_zero() {
return Err(SynthesisError::Unsatisfiable);
}
// we can just make an addition gate
let cs_one = Self::one();
let mut it = lc.as_ref().iter();
let mut new_var = if b_coeff.is_zero() {
unreachable!()
} else {
let mut a_var = self.cs.get_dummy_variable();
let mut b_var = self.cs.get_dummy_variable();
let mut found_a = false;
for (var, _) in &mut it {
if var == &cs_one {
continue
} else {
if !found_a {
found_a = true;
a_var = convert_variable(*var);
} else {
b_var = convert_variable(*var);
break;
}
}
}
let a_value = self.cs.get_value(a_var);
let b_value = self.cs.get_value(b_var);
let new_var = self.cs.alloc(
|| {
let a_value = a_value?;
let mut b_value = b_value?;
b_value.mul_assign(&b_coeff);
let mut c_value = a_value;
c_value.mul_assign(&a_coeff);
c_value.add_assign(&b_value);
c_value.add_assign(&constant_coeff);
// c_value.negate();
Ok(c_value)
// c = - constant - a*a_coeff - b*b_coeff
})?;
self.cs.new_gate((a_var, b_var, new_var), (a_coeff, b_coeff, c_coeff, zero_fr, constant_coeff))?;
new_var
};
// next gates are 1*old_new_var + b*original_var - new_new_var = 0
let cycle_len = chain_coeffs.len();
let mut chain_iter = chain_coeffs.into_iter();
assert!(cycle_len > 1);
let mut gates_created = 0;
loop {
let (var, _) = it.next().expect("there should be a chain variable");
if var != &cs_one {
if gates_created != cycle_len - 2 {
let hint_coeff = chain_iter.next().expect("there should be a chain coeff");
let original_var = convert_variable(*var);
let original_var_value = self.cs.get_value(original_var);
let new_var_value = self.cs.get_value(new_var);
let old_new_var = new_var;
new_var = self.cs.alloc(
|| {
let mut new = original_var_value?;
new.mul_assign(&hint_coeff);
new.add_assign(&new_var_value?);
Ok(new)
})?;
self.cs.new_gate((old_new_var, original_var, new_var), (one_fr, hint_coeff, minus_one_fr, zero_fr, zero_fr))?;
gates_created += 1;
} else {
let (last_var, _) = it.next().expect("there should be a last chain variable");
let hint_coeff = chain_iter.next().expect("there should be a chain coeff");
let hint_coeff_last = chain_iter.next().expect("there should be a last chain coeff");
let original_var = convert_variable(*var);
let original_last_var = convert_variable(*last_var);
self.cs.new_gate((new_var, original_var, original_last_var), (one_fr, hint_coeff, hint_coeff_last, zero_fr, zero_fr))?;
break;
}
} else {
panic!("Cycle mismatch, enforcing LC using sequence of gates requires LC without the constant term");
}
}
assert!(chain_iter.next().is_none());
Ok(())
}
// fn rewrite_lc(&mut self, lc: &LinearCombination<E>, multiplier: E::Fr, free_term_constant: E::Fr) -> (Variable, TranspilationVariant<E>) {
// let one_fr = E::Fr::one();
// let (contains_constant, num_linear_terms) = num_unique_values::<E, Self>(&lc, &mut self.scratch);
// assert!(num_linear_terms > 0);
// if num_linear_terms == 1 && !contains_constant {
// let (existing_var, coeff) = lc.as_ref()[0];
// let hint = TranspilationVariant::<E>::LeaveAsSingleVariable(coeff);
// return (existing_var, hint);
// } else if num_linear_terms <= 2 {
// let (new_var, (mut a_coef, mut b_coef, mut c_coef, mut constant_coeff)) = rewrite_lc_into_single_addition_gate(&lc, self, &mut (self.scratch.clone()));
// if multiplier == E::Fr::zero() {
// assert!(free_term_constant == E::Fr::zero());
// // it's a constraint 0 * LC = 0
// } else {
// //scale
// if multiplier != one_fr {
// a_coef.mul_assign(&multiplier);
// b_coef.mul_assign(&multiplier);
// c_coef.mul_assign(&multiplier);
// constant_coeff.mul_assign(&multiplier);
// }
// constant_coeff.sub_assign(&free_term_constant);
// }
// let hint = TranspilationVariant::<E>::IntoSingleAdditionGate((a_coef, b_coef, c_coef, constant_coeff));
// return (new_var, hint);
// } else {
// let (new_var, first_gate, mut other_coefs) = rewrite_lc_into_multiple_addition_gates(&lc, self, &mut (self.scratch.clone()));
// let (mut a_coef, mut b_coef, mut c_coef, mut constant_coeff) = first_gate;
// if multiplier == E::Fr::zero() {
// assert!(free_term_constant == E::Fr::zero());
// // it's a constraint 0 * LC = 0
// } else {
// //scale
// if multiplier != one_fr {
// a_coef.mul_assign(&multiplier);
// b_coef.mul_assign(&multiplier);
// c_coef.mul_assign(&multiplier);
// constant_coeff.mul_assign(&multiplier);
// for c in other_coefs.iter_mut() {
// c.mul_assign(&multiplier);
// }
// }
// constant_coeff.sub_assign(&free_term_constant);
// }
// let hint = TranspilationVariant::<E>::IntoMultipleAdditionGates((a_coef, b_coef, c_coef, constant_coeff), other_coefs);
// return (new_var, hint);
// }
// }
}
impl<'a, E: Engine, CS: PlonkConstraintSystem<E> + 'a> crate::ConstraintSystem<E>
for Adaptor<'a, E, CS>
{
type Root = Self;
fn one() -> crate::Variable {
crate::Variable::new_unchecked(crate::Index::Input(0))
}
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let var = self.cs.alloc(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
})?;
Ok(match var {
PlonkVariable(PlonkIndex::Aux(index)) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!("Map aux into aux"),
})
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F,
) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let var = self.cs.alloc_input(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
})?;
Ok(match var {
PlonkVariable(PlonkIndex::Input(index)) => crate::Variable::new_unchecked(crate::Index::Input(index)),
_ => unreachable!("Map input into input"),
})
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _ann: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
{
let zero_fr = E::Fr::zero();
let one_fr = E::Fr::one();
let mut minus_one_fr = E::Fr::one();
minus_one_fr.negate();
let (_, hint) = {
self.get_next_hint()
};
let a = { deduplicate_stable::<E, Self>(a(crate::LinearCombination::zero()), &mut self.deduplication_scratch) };
let b = { deduplicate_stable::<E, Self>(b(crate::LinearCombination::zero()), &mut self.deduplication_scratch) };
let c = { deduplicate_stable::<E, Self>(c(crate::LinearCombination::zero()), &mut self.deduplication_scratch) };
// let ann : String = _ann().into();
// println!("Enforcing {}", ann);
// println!("LC_A");
// for (var, coeff) in a.as_ref().iter() {
// println!("{} * {:?}", coeff, var);
// }
// println!("LC_B");
// for (var, coeff) in b.as_ref().iter() {
// println!("{} * {:?}", coeff, var);
// }
// println!("LC_C");
// for (var, coeff) in c.as_ref().iter() {
// println!("{} * {:?}", coeff, var);
// }
let (a_is_constant, a_constant_coeff) = is_constant::<E, Self>(&a);
let (b_is_constant, b_constant_coeff) = is_constant::<E, Self>(&b);
let (c_is_constant, c_constant_coeff) = is_constant::<E, Self>(&c);
let (a_has_variable, a_first_variable) = get_first_variable::<E, Self>(&a);
let (b_has_variable, b_first_variable) = get_first_variable::<E, Self>(&b);
let (c_has_variable, c_first_variable) = get_first_variable::<E, Self>(&c);
debug_assert!(a_is_constant & a_has_variable == false);
debug_assert!(b_is_constant & b_has_variable == false);
debug_assert!(c_is_constant & c_has_variable == false);
let dummy_var = self.cs.get_dummy_variable();
// variables are left, right, output
// coefficients are left, right, output, multiplication, constant
match hint {
TranspilationVariant::IntoLinearGate((c0, c1)) => {
let var = if c_has_variable {
convert_variable(c_first_variable)
} else if b_has_variable {
convert_variable(b_first_variable)
} else if a_has_variable {
convert_variable(a_first_variable)
} else {
unreachable!();
};
self.cs.new_gate((var, dummy_var, dummy_var), (c1, zero_fr, zero_fr, zero_fr, c0)).expect("must make a gate");
},
TranspilationVariant::IntoQuandaticGate((c0, c1, c2)) => {
let var = if c_has_variable {
convert_variable(c_first_variable)
} else if b_has_variable {
convert_variable(b_first_variable)
} else if a_has_variable {
convert_variable(a_first_variable)
} else {
unreachable!();
};
self.cs.new_gate((var, var, dummy_var), (c1, zero_fr, zero_fr, c2, c0)).expect("must make a gate");
},
TranspilationVariant::TransformLc(boxed_hints) => {
let (t_a, t_b, t_c) = *boxed_hints;
let mut multiplication_constant = one_fr;
let a_var = match t_a {
TranspilationVariant::IntoSingleAdditionGate(coeffs) => {
self.make_single_addition_gate(&a, coeffs).expect("must make a gate")
},
TranspilationVariant::IntoMultipleAdditionGates(coeffs, chain) => {
self.make_chain_of_addition_gates(&a, coeffs, chain).expect("must make a gate")
},
TranspilationVariant::LeaveAsSingleVariable(coeff) => {
assert!(a_has_variable);
multiplication_constant.mul_assign(&coeff);
convert_variable(a_first_variable)
},
_ => {unreachable!("{:?}", t_a)}
};
let b_var = match t_b {
TranspilationVariant::IntoSingleAdditionGate(coeffs) => {
self.make_single_addition_gate(&b, coeffs).expect("must make a gate")
},
TranspilationVariant::IntoMultipleAdditionGates(coeffs, chain) => {
self.make_chain_of_addition_gates(&b, coeffs, chain).expect("must make a gate")
},
TranspilationVariant::LeaveAsSingleVariable(coeff) => {
assert!(b_has_variable);
multiplication_constant.mul_assign(&coeff);
convert_variable(b_first_variable)
},
_ => {unreachable!("{:?}", t_b)}
};
let (c_is_just_a_constant, c_var, mut c_coeff) = match t_c {
TranspilationVariant::IntoSingleAdditionGate(coeffs) => {
(false, Some(self.make_single_addition_gate(&c, coeffs).expect("must make a gate")), one_fr)
},
TranspilationVariant::IntoMultipleAdditionGates(coeffs, chain) => {
(false, Some(self.make_chain_of_addition_gates(&c, coeffs, chain).expect("must make a gate")), one_fr)
},
TranspilationVariant::LeaveAsSingleVariable(coeff) => {
assert!(c_has_variable);
(false, Some(convert_variable(c_first_variable)), coeff)
},
TranspilationVariant::IsConstant(value) => {
assert!(c_is_constant);
assert!(c_constant_coeff == value);
(true, None, one_fr)
}
// TranspilationVariant::IntoLinearGate((c0, c1)) => {
// assert!(c_has_variable);
// multiplication_constant.mul_assign(&c1);
// (false, Some(convert_variable(c_first_variable)) )
// },
_ => {unreachable!("{:?}", t_c)}
};
if c_is_just_a_constant {
let mut constant_term = c_constant_coeff;
constant_term.negate();
// A*B == constant
self.cs.new_gate((a_var, b_var, dummy_var), (zero_fr, zero_fr, zero_fr, multiplication_constant, constant_term)).expect("must make a gate");
} else {
c_coeff.negate();
let c_var = c_var.expect("must be a variable");
self.cs.new_gate((a_var, b_var, c_var), (zero_fr, zero_fr, c_coeff, multiplication_constant, zero_fr)).expect("must make a gate");
}
},
TranspilationVariant::IntoMultipleAdditionGates(_, _) => {
// self.make_chain_of_addition_gates(&c, coeffs, chain).expect("must make a gate");
// let ann: String = _ann().into();
// println!("Enforcing {}", ann);
// println!("LC_A");
// for (var, coeff) in a.as_ref().iter() {
// println!("{} * {:?}", coeff, var);
// }
// println!("LC_B");
// for (var, coeff) in b.as_ref().iter() {
// println!("{} * {:?}", coeff, var);
// }
// println!("LC_C");
// for (var, coeff) in c.as_ref().iter() {
// println!("{} * {:?}", coeff, var);
// }
// println!("Hint is {:?}", hint);
unreachable!()
},
TranspilationVariant::IntoSingleAdditionGate(_) => {
// let ann: String = _ann().into();
// println!("Enforcing {}", ann);
// println!("Hint is {:?}", hint);
unreachable!()
},
TranspilationVariant::IsConstant(_) => {
// let ann: String = _ann().into();
// println!("Enforcing {}", ann);
// println!("Hint is {:?}", hint);
unreachable!()
},
TranspilationVariant::LeaveAsSingleVariable(_) => {
// let ann: String = _ann().into();
// println!("Enforcing {}", ann);
// println!("Hint is {:?}", hint);
unreachable!()
},
TranspilationVariant::MergeLinearCombinations((merge_variant, coeff, merge_hint)) => {
let multiplier = if a_is_constant {
a_constant_coeff
} else if b_is_constant {
b_constant_coeff
} else {
unreachable!()
};
assert!(coeff == one_fr);
let lc_into_rewriting = match merge_variant {
MergeLcVariant::MergeACThroughConstantB => {
assert!(b_is_constant);
let mut final_lc = a;
if multiplier != one_fr {
for (_, c) in final_lc.0.iter_mut() {
c.mul_assign(&multiplier);
}
}
subtract_lcs_with_dedup_stable::<E, Self>(final_lc, c, &mut self.deduplication_scratch)
// final_lc - &c
},
MergeLcVariant::MergeBCThroughConstantA => {
assert!(a_is_constant);
let mut final_lc = b;
if multiplier != one_fr {
for (_, c) in final_lc.0.iter_mut() {
c.mul_assign(&multiplier);
}
}
subtract_lcs_with_dedup_stable::<E, Self>(final_lc, c, &mut self.deduplication_scratch)
// final_lc - &c
},
MergeLcVariant::CIsTheOnlyMeaningful => {
let mut tmp = one_fr;
tmp.mul_assign(&a_constant_coeff);
tmp.mul_assign(&b_constant_coeff);
assert!(tmp.is_zero() || (a_is_constant && b_is_constant));
c
},
_ => {
unreachable!()
}
};
let h = *merge_hint;
match h {
TranspilationVariant::IntoSingleAdditionGate(coeffs) => {
self.enforce_lc_with_single_addition_gate(lc_into_rewriting, coeffs).expect("must make a gate");
},
TranspilationVariant::IntoMultipleAdditionGates(coeffs, chain) => {
self.enforce_lc_using_chain_of_addition_gates(lc_into_rewriting, coeffs, chain).expect("must make a gate");
},
// TranspilationVariant::LeaveAsSingleVariable(coeff) => {
// let (contains_a_variable, variable) = get_first_variable::<E, Self>(&lc_into_rewriting);
// assert!(contains_a_variable);
// // enforce that final variable after rewriting is exactly zero
// self.cs.new_gate((convert_variable(variable), dummy_var, dummy_var), (coeff, zero_fr, zero_fr, zero_fr, zero_fr)).expect("must make a gate");
// },
// TranspilationVariant::IntoLinearGate((c0, c1)) => {
// let (contains_a_variable, variable) = get_first_variable::<E, Self>(&lc_into_rewriting);
// assert!(contains_a_variable);
// self.cs.new_gate((convert_variable(variable), dummy_var, dummy_var), (c1, zero_fr, zero_fr, zero_fr, c0)).expect("must make a gate");
// }
_ => {
unreachable!("{:?}", h);
}
};
}
}
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
fn convert_variable(r1cs_variable: crate::Variable) -> PlonkVariable {
let var = match r1cs_variable.get_unchecked() {
crate::Index::Input(0) => {unreachable!("can not convert input variable number 0 (CS::one)")},
crate::Index::Aux(0) => {unreachable!("can not convert aux variable labeled as 0 (taken by Plonk CS)")},
crate::Index::Input(i) => PlonkVariable(PlonkIndex::Input(i)),
crate::Index::Aux(i) => PlonkVariable(PlonkIndex::Aux(i)),
};
var
}
use std::cell::Cell;
pub struct AdaptorCircuit<'a, E:Engine, C: crate::Circuit<E>>{
circuit: Cell<Option<C>>,
hints: &'a Vec<(usize, TranspilationVariant<E>)>,
}
impl<'a, E:Engine, C: crate::Circuit<E>> AdaptorCircuit<'a, E, C> {
pub fn new<'b>(circuit: C, hints: &'b Vec<(usize, TranspilationVariant<E>)>) -> Self
where 'b: 'a
{
Self {
circuit: Cell::new(Some(circuit)),
hints: hints
}
}
}
impl<'a, E: Engine, C: crate::Circuit<E>> PlonkCircuit<E> for AdaptorCircuit<'a, E, C> {
fn synthesize<CS: PlonkConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let mut adaptor = Adaptor {
cs: cs,
hints: self.hints,
current_constraint_index: 0,
current_hint_index: 0,
scratch: HashSet::with_capacity((E::Fr::NUM_BITS * 2) as usize),
deduplication_scratch: HashMap::with_capacity((E::Fr::NUM_BITS * 2) as usize),
};
let c = self.circuit.replace(None).expect("Must replace a circuit out from cell");
match c.synthesize(&mut adaptor) {
Err(_) => return Err(SynthesisError::AssignmentMissing),
Ok(_) => {}
};
Ok(())
}
}
#[test]
fn transpile_xor_using_adaptor() {
use crate::tests::XORDemo;
use crate::cs::Circuit;
use crate::pairing::bn256::Bn256;
use crate::plonk::plonk::generator::*;
use crate::plonk::plonk::prover::*;
let c = XORDemo::<Bn256> {
a: None,
b: None,
_marker: PhantomData
};
let mut transpiler = Transpiler::new();
c.synthesize(&mut transpiler).expect("sythesize into traspilation must succeed");
let hints = transpiler.hints;
let c = XORDemo::<Bn256> {
a: None,
b: None,
_marker: PhantomData
};
let adapted_curcuit = AdaptorCircuit::new(c, &hints);
let mut assembly = GeneratorAssembly::<Bn256>::new();
adapted_curcuit.synthesize(&mut assembly).expect("sythesize of transpiled into CS must succeed");
assembly.finalize();
// for (i, g) in assembly.aux_gates.iter().enumerate() {
// println!("Gate {} = {:?}", i, g);
// }
let c = XORDemo::<Bn256> {
a: Some(true),
b: Some(true),
_marker: PhantomData
};
println!("Trying to prove");
let adapted_curcuit = AdaptorCircuit::new(c, &hints);
let mut prover = ProvingAssembly::<Bn256>::new();
adapted_curcuit.synthesize(&mut prover).unwrap();
prover.finalize();
assert!(prover.is_satisfied());
}<file_sep>/src/plonk/commitments/transparent/fri/naive_fri/naive_fri.rs
use crate::pairing::ff::PrimeField;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::polynomials::*;
use crate::worker::*;
use crate::SynthesisError;
use crate::plonk::commitments::transparent::iop::*;
use crate::plonk::commitments::transparent::utils::log2_floor;
use crate::plonk::commitments::transcript::Prng;
use crate::plonk::commitments::transparent::precomputations::*;
use super::super::*;
pub struct NaiveFriIop<F: PrimeField, I: IOP<F>> {
_marker_f: std::marker::PhantomData<F>,
_marker_i: std::marker::PhantomData<I>,
}
impl<F: PrimeField, I: IOP<F> > FriIop<F> for NaiveFriIop<F, I> {
const DEGREE: usize = 2;
type IopType = I;
type ProofPrototype = FRIProofPrototype<F, I>;
type Proof = FRIProof<F, I>;
type Params = ();
fn proof_from_lde<P: Prng<F, Input = < < I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F> >::HashOutput>,
C: FriPrecomputations<F>
>(
lde_values: &Polynomial<F, Values>,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
precomputations: &C,
worker: &Worker,
prng: &mut P,
params: &Self::Params
) -> Result<Self::ProofPrototype, SynthesisError> {
NaiveFriIop::proof_from_lde_by_values(
lde_values,
lde_factor,
output_coeffs_at_degree_plus_one,
precomputations,
worker,
prng
)
}
fn prototype_into_proof(
prototype: Self::ProofPrototype,
iop_values: &Polynomial<F, Values>,
natural_first_element_indexes: Vec<usize>,
_params: &Self::Params
) -> Result<Self::Proof, SynthesisError> {
prototype.produce_proof(iop_values, natural_first_element_indexes)
}
fn get_fri_challenges<P: Prng<F, Input = < < I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F> >::HashOutput> >(
proof: &Self::Proof,
prng: &mut P,
_params: &Self::Params
) -> Vec<F> {
let mut fri_challenges = vec![];
for root in proof.roots.iter() {
let iop_challenge = {
prng.commit_input(&root);
prng.get_challenge()
};
fri_challenges.push(iop_challenge);
}
fri_challenges
}
fn verify_proof_with_challenges(
proof: &Self::Proof,
natural_element_indexes: Vec<usize>,
expected_value: &[F],
fri_challenges: &[F],
_params: &Self::Params
) -> Result<bool, SynthesisError> {
Self::verify_proof_queries(proof, natural_element_indexes, Self::DEGREE, expected_value, fri_challenges)
}
}
use std::time::Instant;
#[derive(PartialEq, Eq, Clone)]
pub struct FRIProofPrototype<F: PrimeField, I: IOP<F>> {
pub l0_commitment: I,
pub intermediate_commitments: Vec<I>,
pub intermediate_values: Vec< Polynomial<F, Values> >,
pub challenges: Vec<F>,
pub final_root: < <I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput,
pub final_coefficients: Vec<F>,
pub initial_degree_plus_one: usize,
pub output_coeffs_at_degree_plus_one: usize,
pub lde_factor: usize,
}
impl<F: PrimeField, I: IOP<F>> FriProofPrototype<F, I> for FRIProofPrototype<F, I> {
fn get_roots(&self) -> Vec< < <I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput> {
let mut roots = vec![];
roots.push(self.l0_commitment.get_root().clone());
for c in self.intermediate_commitments.iter() {
roots.push(c.get_root().clone());
}
roots
}
fn get_final_root(&self) -> < <I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput {
self.final_root.clone()
}
fn get_final_coefficients(&self) -> Vec<F> {
self.final_coefficients.clone()
}
}
#[derive(PartialEq, Eq, Clone)]
pub struct FRIProof<F: PrimeField, I: IOP<F>> {
pub queries: Vec< Vec< <I as IOP<F> >::Query > >,
pub roots: Vec< < <I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F>>::HashOutput>,
pub final_coefficients: Vec<F>,
pub initial_degree_plus_one: usize,
pub output_coeffs_at_degree_plus_one: usize,
pub lde_factor: usize,
}
impl<F: PrimeField, I: IOP<F>> FriProof<F, I> for FRIProof<F, I> {
fn get_final_coefficients(&self) -> &[F] {
&self.final_coefficients
}
}
impl<F: PrimeField, I: IOP<F>> NaiveFriIop<F, I> {
pub fn proof_from_lde_through_coefficients<P: Prng<F, Input = < < I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F> >::HashOutput> >(
lde_values: Polynomial<F, Values>,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
worker: &Worker,
prng: &mut P
) -> Result<FRIProofPrototype<F, I>, SynthesisError> {
let l0_commitment: I = I::create(lde_values.as_ref());
let initial_domain_size = lde_values.size();
assert!(output_coeffs_at_degree_plus_one.is_power_of_two());
assert!(lde_factor.is_power_of_two());
let initial_degree_plus_one = initial_domain_size / lde_factor;
let num_steps = log2_floor(initial_degree_plus_one / output_coeffs_at_degree_plus_one) as usize;
let initial_polynomial = lde_values.ifft(&worker);
let mut initial_polynomial_coeffs = initial_polynomial.into_coeffs();
initial_polynomial_coeffs.truncate(initial_degree_plus_one);
let mut intermediate_commitments = vec![];
let mut intermediate_values = vec![];
let mut challenges = vec![];
let mut next_domain_challenge = {
prng.commit_input(&l0_commitment.get_root());
prng.get_challenge()
};
challenges.push(next_domain_challenge);
let mut next_domain_size = initial_polynomial_coeffs.len() / 2;
let mut coeffs = initial_polynomial_coeffs;
let mut roots = vec![];
for step in 0..num_steps {
let mut next_coefficients = vec![F::zero(); next_domain_size];
let coeffs_slice: &[F] = coeffs.as_ref();
assert!(next_coefficients.len()*2 == coeffs_slice.len());
worker.scope(next_coefficients.len(), |scope, chunk| {
for (v, old) in next_coefficients.chunks_mut(chunk)
.zip(coeffs_slice.chunks(chunk*2)) {
scope.spawn(move |_| {
for (v, old) in v.iter_mut().zip(old.chunks(2)) {
// a_0 + beta * a_1
let x = old[0];
let mut tmp = old[1];
tmp.mul_assign(&next_domain_challenge);
tmp.add_assign(&x);
*v = tmp;
}
});
}
});
let next_coeffs_as_poly = Polynomial::from_coeffs(next_coefficients.clone())?;
let next_values_as_poly = next_coeffs_as_poly.lde(&worker, lde_factor)?;
let intermediate_iop = I::create(next_values_as_poly.as_ref());
if step < num_steps - 1 {
let root = intermediate_iop.get_root();
roots.push(root);
next_domain_challenge = {
prng.commit_input(&intermediate_iop.get_root());
prng.get_challenge()
};
challenges.push(next_domain_challenge);
next_domain_size /= 2;
intermediate_commitments.push(intermediate_iop);
intermediate_values.push(next_values_as_poly);
coeffs = next_coefficients;
}
}
// challenges.pop().expect("will work");
// let final_root = roots.pop().expect("will work");
let final_root = roots.last().expect("will work").clone();
assert!(challenges.len() == num_steps);
assert!(intermediate_commitments.len() == num_steps);
assert!(intermediate_values.len() == num_steps);
let final_poly_coeffs = coeffs;
assert!(final_poly_coeffs.len() == output_coeffs_at_degree_plus_one);
Ok(FRIProofPrototype {
l0_commitment,
intermediate_commitments,
intermediate_values,
challenges,
final_root,
final_coefficients: final_poly_coeffs,
initial_degree_plus_one,
output_coeffs_at_degree_plus_one,
lde_factor,
})
}
pub fn proof_from_lde_by_values<P: Prng<F, Input = < < I::Tree as IopTree<F> >::TreeHasher as IopTreeHasher<F> >::HashOutput>,
C: FriPrecomputations<F>
>(
lde_values: &Polynomial<F, Values>,
lde_factor: usize,
output_coeffs_at_degree_plus_one: usize,
precomputations: &C,
worker: &Worker,
prng: &mut P
) -> Result<FRIProofPrototype<F, I>, SynthesisError> {
println!("Starting FRI");
let start = Instant::now();
let mut roots = vec![];
let l0_commitment: I = I::create(lde_values.as_ref());
let root = l0_commitment.get_root();
roots.push(root);
let initial_domain_size = lde_values.size();
assert_eq!(precomputations.domain_size(), initial_domain_size);
let mut two = F::one();
two.double();
let two_inv = two.inverse().expect("should exist");
assert!(output_coeffs_at_degree_plus_one.is_power_of_two());
assert!(lde_factor.is_power_of_two());
let initial_degree_plus_one = initial_domain_size / lde_factor;
let num_steps = log2_floor(initial_degree_plus_one / output_coeffs_at_degree_plus_one) as usize;
let mut intermediate_commitments = vec![];
let mut intermediate_values = vec![];
let mut challenges = vec![];
let mut next_domain_challenge = {
prng.commit_input(&l0_commitment.get_root());
prng.get_challenge()
};
challenges.push(next_domain_challenge);
let mut next_domain_size = initial_domain_size / 2;
let mut values_slice = lde_values.as_ref();
let omegas_inv_ref: &[F] = precomputations.omegas_inv_ref();
// step 0: fold totally by 2
// step 1: fold totally by 4
// etc...
for i in 0..num_steps {
// we step over 1, omega, omega^2,
// then over 1, omega^2,
// etc.
let stride = 1 << i;
let mut next_values = vec![F::zero(); next_domain_size];
assert!(values_slice.len() == next_values.len() * 2);
worker.scope(next_values.len(), |scope, chunk| {
for (i, v) in next_values.chunks_mut(chunk).enumerate() {
scope.spawn(move |_| {
let initial_k = i*chunk;
for (j, v) in v.iter_mut().enumerate() {
let idx = initial_k + j;
debug_assert!(idx < next_domain_size);
let omega_idx = idx * stride;
let f_at_omega = values_slice[idx];
let f_at_minus_omega = values_slice[idx + next_domain_size];
let mut v_even_coeffs = f_at_omega;
v_even_coeffs.add_assign(&f_at_minus_omega);
let mut v_odd_coeffs = f_at_omega;
v_odd_coeffs.sub_assign(&f_at_minus_omega);
v_odd_coeffs.mul_assign(&omegas_inv_ref[omega_idx]);
// those can be treated as (doubled) evaluations of polynomials that
// are themselves made only from even or odd coefficients of original poly
// (with reduction of degree by 2) on a domain of the size twice smaller
// with an extra factor of "omega" in odd coefficients
// to do assemble FRI step we just need to add them with a random challenge
let mut tmp = v_odd_coeffs;
tmp.mul_assign(&next_domain_challenge);
tmp.add_assign(&v_even_coeffs);
tmp.mul_assign(&two_inv);
*v = tmp;
}
});
}
});
if i < num_steps - 1 {
let intermediate_iop = I::create(next_values.as_ref());
let root = intermediate_iop.get_root();
roots.push(root);
next_domain_challenge = {
prng.commit_input(&intermediate_iop.get_root());
prng.get_challenge()
};
challenges.push(next_domain_challenge);
next_domain_size >>= 1;
intermediate_commitments.push(intermediate_iop);
}
let next_values_as_poly = Polynomial::from_values(next_values)?;
intermediate_values.push(next_values_as_poly);
values_slice = intermediate_values.last().expect("is something").as_ref();
}
let final_root = roots.last().expect("will work").clone();
assert_eq!(challenges.len(), num_steps);
assert_eq!(roots.len(), num_steps);
assert_eq!(intermediate_commitments.len(), num_steps-1);
assert_eq!(intermediate_values.len(), num_steps);
let final_poly_values = Polynomial::from_values(values_slice.to_vec())?;
let final_poly_coeffs = final_poly_values.ifft(&worker);
let mut final_poly_coeffs = final_poly_coeffs.into_coeffs();
let mut degree = final_poly_coeffs.len() - 1;
for c in final_poly_coeffs.iter().rev() {
if c.is_zero() {
degree -= 1;
} else {
break
}
}
assert!(degree < output_coeffs_at_degree_plus_one, "polynomial degree is too large, coeffs = {:?}", final_poly_coeffs);
final_poly_coeffs.truncate(output_coeffs_at_degree_plus_one);
println!("Done FRI for degree {} in {:?}", lde_values.size()/lde_factor, start.elapsed());
Ok(FRIProofPrototype {
l0_commitment,
intermediate_commitments,
intermediate_values,
challenges,
final_root,
final_coefficients: final_poly_coeffs,
initial_degree_plus_one,
output_coeffs_at_degree_plus_one,
lde_factor,
})
}
}<file_sep>/src/plonk/better_better_cs/gadgets/num.rs
use crate::pairing::{
Engine,
};
use crate::pairing::ff::{
Field,
PrimeField,
PrimeFieldRepr,
BitIterator
};
use crate::{
SynthesisError,
};
use crate::plonk::better_better_cs::cs::{
Variable,
ConstraintSystem,
ArithmeticTerm,
MainGateTerm
};
use super::assignment::{
Assignment
};
use std::ops::{Add, Sub};
pub struct AllocatedNum<E: Engine> {
value: Option<E::Fr>,
variable: Variable
}
impl<E: Engine> Clone for AllocatedNum<E> {
fn clone(&self) -> Self {
AllocatedNum {
value: self.value,
variable: self.variable
}
}
}
impl<E: Engine> AllocatedNum<E> {
pub fn get_variable(&self) -> Variable {
self.variable
}
pub fn get_value(&self) -> Option<E::Fr> {
self.value
}
pub fn alloc<CS, F>(
cs: &mut CS,
value: F,
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>,
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let mut new_value = None;
let var = cs.alloc(
|| {
let tmp = value()?;
new_value = Some(tmp);
Ok(tmp)
}
)?;
Ok(AllocatedNum {
value: new_value,
variable: var
})
}
pub fn add<CS>(
&self,
cs: &mut CS,
other: &Self
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
{
let mut value = None;
let addition_result = cs.alloc(|| {
let mut tmp = *self.value.get()?;
tmp.add_assign(other.value.get()?);
value = Some(tmp);
Ok(tmp)
})?;
let self_term = ArithmeticTerm::from_variable(self.variable);
let other_term = ArithmeticTerm::from_variable(other.variable);
let result_term = ArithmeticTerm::from_variable(addition_result);
let mut term = MainGateTerm::new();
term.add_assign(self_term);
term.add_assign(other_term);
term.sub_assign(result_term);
cs.allocate_main_gate(term)?;
Ok(AllocatedNum {
value: value,
variable: addition_result
})
}
pub fn add_constant<CS>(
&self,
cs: &mut CS,
constant: E::Fr
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
{
let mut value = None;
let addition_result = cs.alloc(|| {
let mut tmp = *self.value.get()?;
tmp.add_assign(&constant);
value = Some(tmp);
Ok(tmp)
})?;
let self_term = ArithmeticTerm::from_variable(self.variable);
let other_term = ArithmeticTerm::constant(constant);
let result_term = ArithmeticTerm::from_variable(addition_result);
let mut term = MainGateTerm::new();
term.add_assign(self_term);
term.add_assign(other_term);
term.sub_assign(result_term);
cs.allocate_main_gate(term)?;
Ok(AllocatedNum {
value: value,
variable: addition_result
})
}
pub fn mul<CS>(
&self,
cs: &mut CS,
other: &Self
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
{
let mut value = None;
let product = cs.alloc(|| {
let mut tmp = *self.value.get()?;
tmp.mul_assign(other.value.get()?);
value = Some(tmp);
Ok(tmp)
})?;
let self_term = ArithmeticTerm::from_variable(self.variable).mul_by_variable(other.variable);
let result_term = ArithmeticTerm::from_variable(product);
let mut term = MainGateTerm::new();
term.add_assign(self_term);
term.sub_assign(result_term);
cs.allocate_main_gate(term)?;
Ok(AllocatedNum {
value: value,
variable: product
})
}
pub fn square<CS>(
&self,
cs: &mut CS,
) -> Result<Self, SynthesisError>
where CS: ConstraintSystem<E>
{
self.mul(cs, &self)
}
}
<file_sep>/src/plonk/better_better_cs/trees/binary_tree.rs
use crate::pairing::{Engine};
use crate::pairing::ff::{PrimeField, PrimeFieldRepr};
use crate::worker::Worker;
use crate::plonk::commitments::transparent::utils::log2_floor;
use super::*;
use super::tree_hash::*;
#[derive(Debug)]
pub struct BinaryTree<E: Engine, H: BinaryTreeHasher<E::Fr>> {
pub (crate) size: usize,
pub (crate) num_leafs: usize,
pub (crate) num_combined: usize,
pub (crate) nodes: Vec<H::Output>,
pub (crate) params: BinaryTreeParams,
pub (crate) tree_hasher: H,
_marker: std::marker::PhantomData<E>
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BinaryTreeParams {
pub values_per_leaf: usize
}
use std::time::Instant;
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> BinaryTree<E, H> {
fn hash_into_leaf(tree_hasher: &H, values: &[E::Fr]) -> H::Output {
tree_hasher.leaf_hash(values)
}
fn make_full_path(&self, leaf_index: usize, leaf_pair_hash: H::Output) -> Vec<H::Output> {
let mut nodes = &self.nodes[..];
let mut path = vec![];
path.push(leaf_pair_hash);
let mut idx = leaf_index;
idx >>= 1;
for _ in 0..log2_floor(nodes.len() / 2) {
let half_len = nodes.len() / 2;
let (next_level, this_level) = nodes.split_at(half_len);
let pair_idx = idx ^ 1usize;
let value = this_level[pair_idx];
path.push(value);
idx >>= 1;
nodes = next_level;
}
path
}
pub(crate) fn size(&self) -> usize {
self.size
}
pub(crate) fn num_leafs(&self) -> usize {
self.num_leafs
}
pub fn create_from_combined_leafs(
leafs: &[Vec<&[E::Fr]>],
num_combined: usize,
tree_hasher: H,
params: &BinaryTreeParams
) -> Self {
let values_per_leaf = params.values_per_leaf;
let num_leafs = leafs.len();
let values_per_leaf_supplied = leafs[0].len() * leafs[0][0].len();
assert_eq!(num_combined, leafs[0].len(), "invalid number of total combined leafs");
assert_eq!(values_per_leaf, values_per_leaf_supplied, "values per leaf from params and from data is not consistent");
assert!(num_leafs.is_power_of_two(), "tree must be binary");
let num_nodes = num_leafs;
let size = num_leafs * values_per_leaf;
let mut nodes = vec![H::placeholder_output(); num_nodes];
let worker = Worker::new();
let mut leaf_hashes = vec![H::placeholder_output(); num_leafs];
let hasher_ref = &tree_hasher;
{
worker.scope(leaf_hashes.len(), |scope, chunk| {
for (i, lh) in leaf_hashes.chunks_mut(chunk)
.enumerate() {
scope.spawn(move |_| {
let mut scratch_space = Vec::with_capacity(values_per_leaf);
let base_idx = i*chunk;
for (j, lh) in lh.iter_mut().enumerate() {
// idx is index of the leaf
let idx = base_idx + j;
let leaf_values_ref = &leafs[idx];
for &lv in leaf_values_ref.iter() {
scratch_space.extend_from_slice(lv);
}
debug_assert_eq!(scratch_space.len(), values_per_leaf);
*lh = hasher_ref.leaf_hash(&scratch_space[..]);
scratch_space.truncate(0);
}
});
}
});
}
println!("Leaf hashes completed");
// leafs are now encoded and hashed, so let's make a tree
let num_levels = log2_floor(num_leafs) as usize;
let mut nodes_for_hashing = &mut nodes[..];
// separately hash last level, which hashes leaf hashes into first nodes
{
let _level = num_levels-1;
let inputs = &mut leaf_hashes[..];
let (_, outputs) = nodes_for_hashing.split_at_mut(nodes_for_hashing.len()/2);
assert!(outputs.len() * 2 == inputs.len());
assert!(outputs.len().is_power_of_two());
worker.scope(outputs.len(), |scope, chunk| {
for (o, i) in outputs.chunks_mut(chunk)
.zip(inputs.chunks(chunk*2)) {
scope.spawn(move |_| {
let mut hash_input = [H::placeholder_output(); 2];
for (o, i) in o.iter_mut().zip(i.chunks(2)) {
hash_input[0] = i[0];
hash_input[1] = i[1];
*o = hasher_ref.node_hash(&hash_input, _level);
}
});
}
});
}
for _level in (0..(num_levels-1)).rev() {
// do the trick - split
let (next_levels, inputs) = nodes_for_hashing.split_at_mut(nodes_for_hashing.len()/2);
let (_, outputs) = next_levels.split_at_mut(next_levels.len() / 2);
assert!(outputs.len() * 2 == inputs.len());
assert!(outputs.len().is_power_of_two());
worker.scope(outputs.len(), |scope, chunk| {
for (o, i) in outputs.chunks_mut(chunk)
.zip(inputs.chunks(chunk*2)) {
scope.spawn(move |_| {
let mut hash_input = [H::placeholder_output(); 2];
for (o, i) in o.iter_mut().zip(i.chunks(2)) {
hash_input[0] = i[0];
hash_input[1] = i[1];
*o = hasher_ref.node_hash(&hash_input, _level);
}
});
}
});
nodes_for_hashing = next_levels;
}
Self {
size: size,
num_leafs: num_leafs,
nodes: nodes,
num_combined,
tree_hasher: tree_hasher,
params: params.clone(),
_marker: std::marker::PhantomData
}
}
pub(crate) fn create(values: &[E::Fr], tree_hasher: H, params: &BinaryTreeParams) -> Self {
assert!(params.values_per_leaf.is_power_of_two());
let values_per_leaf = params.values_per_leaf;
let num_leafs = values.len() / values_per_leaf;
assert!(num_leafs.is_power_of_two());
let num_nodes = num_leafs;
// size is a total number of elements
let size = values.len();
let mut nodes = vec![H::placeholder_output(); num_nodes];
let worker = Worker::new();
let mut leaf_hashes = vec![H::placeholder_output(); num_leafs];
let hasher_ref = &tree_hasher;
{
worker.scope(leaf_hashes.len(), |scope, chunk| {
for (i, lh) in leaf_hashes.chunks_mut(chunk)
.enumerate() {
scope.spawn(move |_| {
let base_idx = i*chunk;
for (j, lh) in lh.iter_mut().enumerate() {
let idx = base_idx + j;
let values_start = idx * values_per_leaf;
let values_end = values_start + values_per_leaf;
*lh = hasher_ref.leaf_hash(&values[values_start..values_end]);
}
});
}
});
}
// leafs are now encoded and hashed, so let's make a tree
let num_levels = log2_floor(num_leafs) as usize;
let mut nodes_for_hashing = &mut nodes[..];
// separately hash last level, which hashes leaf hashes into first nodes
{
let _level = num_levels-1;
let inputs = &mut leaf_hashes[..];
let (_, outputs) = nodes_for_hashing.split_at_mut(nodes_for_hashing.len()/2);
assert!(outputs.len() * 2 == inputs.len());
assert!(outputs.len().is_power_of_two());
worker.scope(outputs.len(), |scope, chunk| {
for (o, i) in outputs.chunks_mut(chunk)
.zip(inputs.chunks(chunk*2)) {
scope.spawn(move |_| {
let mut hash_input = [H::placeholder_output(); 2];
for (o, i) in o.iter_mut().zip(i.chunks(2)) {
hash_input[0] = i[0];
hash_input[1] = i[1];
*o = hasher_ref.node_hash(&hash_input, _level);
}
});
}
});
}
for _level in (0..(num_levels-1)).rev() {
// do the trick - split
let (next_levels, inputs) = nodes_for_hashing.split_at_mut(nodes_for_hashing.len()/2);
let (_, outputs) = next_levels.split_at_mut(next_levels.len() / 2);
assert!(outputs.len() * 2 == inputs.len());
assert!(outputs.len().is_power_of_two());
worker.scope(outputs.len(), |scope, chunk| {
for (o, i) in outputs.chunks_mut(chunk)
.zip(inputs.chunks(chunk*2)) {
scope.spawn(move |_| {
let mut hash_input = [H::placeholder_output(); 2];
for (o, i) in o.iter_mut().zip(i.chunks(2)) {
hash_input[0] = i[0];
hash_input[1] = i[1];
*o = hasher_ref.node_hash(&hash_input, _level);
}
});
}
});
nodes_for_hashing = next_levels;
}
Self {
size: size,
nodes: nodes,
num_leafs: num_leafs,
num_combined: 1,
tree_hasher: tree_hasher,
params: params.clone(),
_marker: std::marker::PhantomData
}
}
pub fn get_commitment(&self) -> H::Output {
self.nodes[1].clone()
}
pub fn produce_query(&self, indexes: Vec<usize>, values: &[E::Fr]) -> Query<E, H> {
// we never expect that query is mis-alligned, so check it
debug_assert!(indexes[0] % self.params.values_per_leaf == 0);
debug_assert!(indexes.len() == self.params.values_per_leaf);
debug_assert!(indexes == (indexes[0]..(indexes[0]+self.params.values_per_leaf)).collect::<Vec<_>>());
debug_assert!(*indexes.last().expect("is some") < self.size());
debug_assert!(*indexes.last().expect("is some") < values.len());
let query_values = Vec::from(&values[indexes[0]..(indexes[0]+self.params.values_per_leaf)]);
let leaf_index = indexes[0] / self.params.values_per_leaf;
let pair_index = leaf_index ^ 1;
let leaf_pair_hash = self.tree_hasher.leaf_hash(&values[(pair_index*self.params.values_per_leaf)..((pair_index+1)*self.params.values_per_leaf)]);
let path = self.make_full_path(leaf_index, leaf_pair_hash);
Query::<E, H> {
indexes: indexes,
values: query_values,
path: path,
}
}
pub fn produce_multiquery(
&self,
indexes: Vec<usize>,
num_combined: usize,
leafs: &[Vec<&[E::Fr]>]
) -> MultiQuery<E, H> {
// debug_assert!(indexes[0] % self.params.values_per_leaf == 0);
// debug_assert!(indexes.len() == self.params.values_per_leaf);
debug_assert!(indexes == (indexes[0]..(indexes[0]+(self.params.values_per_leaf/self.num_combined))).collect::<Vec<_>>());
debug_assert!(*indexes.last().expect("is some") < self.size());
debug_assert!(leafs[0].len() == num_combined);
let leaf_index = indexes[0] / (self.params.values_per_leaf / num_combined);
let mut query_values = Vec::with_capacity(indexes.len());
let this_leaf = &leafs[leaf_index];
for part in this_leaf.iter() {
query_values.push(part.to_vec());
}
let pair_index = leaf_index ^ 1;
let mut scratch_space = Vec::with_capacity(self.params.values_per_leaf);
let pair_leaf_combination = &leafs[pair_index];
for r in pair_leaf_combination.iter() {
// walk over the polynomials
scratch_space.extend_from_slice(r);
}
let leaf_pair_hash = self.tree_hasher.leaf_hash(&scratch_space);
let path = self.make_full_path(leaf_index, leaf_pair_hash);
MultiQuery::<E, H> {
indexes: indexes,
values: query_values,
num_combined,
path: path,
}
}
pub fn verify_query(
commitment: &H::Output,
query: &Query<E, H>,
params: &BinaryTreeParams,
tree_hasher: &H
) -> bool {
if query.values().len() != params.values_per_leaf {
return false;
}
let mut hash = tree_hasher.leaf_hash(query.values());
let mut idx = query.indexes()[0] / params.values_per_leaf;
let mut hash_input = [H::placeholder_output(); 2];
for el in query.path.iter() {
{
if idx & 1usize == 0 {
hash_input[0] = hash;
hash_input[1] = *el;
} else {
hash_input[0] = *el;
hash_input[1] = hash;
}
}
hash = tree_hasher.node_hash(&hash_input, 0);
idx >>= 1;
}
&hash == commitment
}
pub fn verify_multiquery(
commitment: &H::Output,
query: &MultiQuery<E, H>,
params: &BinaryTreeParams,
tree_hasher: &H
) -> bool {
let values = query.values_flattened();
if values.len() != params.values_per_leaf {
return false;
}
let num_combined = query.num_combined();
let mut hash = tree_hasher.leaf_hash(&values);
let mut idx = query.indexes()[0] / (params.values_per_leaf / num_combined);
let mut hash_input = [H::placeholder_output(); 2];
for el in query.path.iter() {
{
if idx & 1usize == 0 {
hash_input[0] = hash;
hash_input[1] = *el;
} else {
hash_input[0] = *el;
hash_input[1] = hash;
}
}
hash = tree_hasher.node_hash(&hash_input, 0);
idx >>= 1;
}
&hash == commitment
}
}
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> PartialEq for BinaryTree<E, H> {
fn eq(&self, other: &Self) -> bool {
self.get_commitment() == other.get_commitment()
}
}
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> Eq for BinaryTree<E, H> {}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Query<E: Engine, H: BinaryTreeHasher<E::Fr>> {
indexes: Vec<usize>,
values: Vec<E::Fr>,
path: Vec<H::Output>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct MultiQuery<E: Engine, H: BinaryTreeHasher<E::Fr>> {
indexes: Vec<usize>,
values: Vec<Vec<E::Fr>>,
num_combined: usize,
path: Vec<H::Output>,
}
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> Query<E, H> {
pub fn indexes(&self) -> Vec<usize> {
self.indexes.clone()
}
pub fn values(&self) -> &[E::Fr] {
&self.values
}
pub fn path(&self) -> &[H::Output] {
&self.path
}
}
impl<E: Engine, H: BinaryTreeHasher<E::Fr>> MultiQuery<E, H> {
fn indexes(&self) -> Vec<usize> {
self.indexes.clone()
}
fn values_flattened(&self) -> Vec<E::Fr> {
let mut concat = Vec::with_capacity(self.values.len() + self.values[0].len());
for v in self.values.iter() {
concat.extend_from_slice(&v[..]);
}
concat
}
fn num_combined(&self) -> usize {
debug_assert_eq!(self.num_combined, self.values.len());
self.num_combined
}
}<file_sep>/src/plonk/commitments/mod.rs
use crate::pairing::ff::PrimeField;
use crate::plonk::polynomials::*;
use crate::plonk::commitments::transcript::*;
pub mod transparent;
pub mod transcript;
pub trait CommitmentScheme<F: PrimeField> {
type Commitment: std::fmt::Debug;
type OpeningProof;
type IntermediateData;
type Meta: Clone;
type Prng: Prng<F>;
const REQUIRES_PRECOMPUTATION: bool;
const IS_HOMOMORPHIC: bool;
fn new_for_size(max_degree_plus_one: usize, meta: Self::Meta) -> Self;
fn precompute(&self, poly: &Polynomial<F, Coefficients>) -> Option<Self::IntermediateData>;
fn commit_single(&self, poly: &Polynomial<F, Coefficients>) -> (Self::Commitment, Option<Self::IntermediateData>);
fn commit_multiple(&self, polynomials: Vec<&Polynomial<F, Coefficients>>, degrees: Vec<usize>, aggregation_coefficient: F) -> (Self::Commitment, Option<Vec<Self::IntermediateData>>);
fn open_single(&self, poly: &Polynomial<F, Coefficients>, at_point: F, opening_value: F, data: &Option<&Self::IntermediateData>, prng: &mut Self::Prng) -> Self::OpeningProof;
fn open_multiple(&self, polynomials: Vec<&Polynomial<F, Coefficients>>, degrees: Vec<usize>, aggregation_coefficient: F, at_points: Vec<F>, opening_values: Vec<F>, data: &Option<Vec<&Self::IntermediateData>>, prng: &mut Self::Prng) -> Self::OpeningProof;
fn verify_single(&self, commitment: &Self::Commitment, at_point: F, claimed_value: F, proof: &Self::OpeningProof, prng: &mut Self::Prng) -> bool;
fn verify_multiple_openings(&self, commitments: Vec<&Self::Commitment>, at_points: Vec<F>, claimed_values: &Vec<F>, aggregation_coefficient: F, proof: &Self::OpeningProof, prng: &mut Self::Prng) -> bool;
}<file_sep>/src/multiexp_experiments.rs
use crate::pairing::{
CurveAffine,
CurveProjective,
Engine
};
use crate::pairing::ff::{
PrimeField,
Field,
PrimeFieldRepr,
ScalarEngine};
use std::sync::Arc;
use super::source::*;
use std::future::{Future};
use std::task::{Context, Poll};
use std::pin::{Pin};
extern crate futures;
use self::futures::future::{join_all, JoinAll};
use self::futures::executor::block_on;
use super::worker::{Worker, WorkerFuture};
use super::SynthesisError;
use cfg_if;
/// This genious piece of code works in the following way:
/// - choose `c` - the bit length of the region that one thread works on
/// - make `2^c - 1` buckets and initialize them with `G = infinity` (that's equivalent of zero)
/// - there is no bucket for "zero" cause it's not necessary
/// - go over the pairs `(base, scalar)`
/// - for each scalar calculate `scalar % 2^c` and add the base (without any multiplications!) to the
/// corresponding bucket
/// - at the end each bucket will have an accumulated value that should be multiplied by the corresponding factor
/// between `1` and `2^c - 1` to get the right value
/// - here comes the first trick - you don't need to do multiplications at all, just add all the buckets together
/// starting from the first one `(a + b + c + ...)` and than add to the first sum another sum of the form
/// `(b + c + d + ...)`, and than the third one `(c + d + ...)`, that will result in the proper prefactor infront of every
/// accumulator, without any multiplication operations at all
/// - that's of course not enough, so spawn the next thread
/// - this thread works with the same bit width `c`, but SKIPS lowers bits completely, so it actually takes values
/// in the form `(scalar >> c) % 2^c`, so works on the next region
/// - spawn more threads until you exhaust all the bit length
/// - you will get roughly `[bitlength / c] + 1` inaccumulators
/// - double the highest accumulator enough times, add to the next one, double the result, add the next accumulator, continue
///
/// Demo why it works:
/// ```text
/// a * G + b * H = (a_2 * (2^c)^2 + a_1 * (2^c)^1 + a_0) * G + (b_2 * (2^c)^2 + b_1 * (2^c)^1 + b_0) * H
/// ```
/// - make buckets over `0` labeled coefficients
/// - make buckets over `1` labeled coefficients
/// - make buckets over `2` labeled coefficients
/// - accumulators over each set of buckets will have an implicit factor of `(2^c)^i`, so before summing thme up
/// "higher" accumulators must be doubled `c` times
///
fn multiexp_inner<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
skip: u32,
c: u32,
handle_trivial: bool
) -> WorkerFuture< <G as CurveAffine>::Projective, SynthesisError>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
// Perform this region of the multiexp
let this = {
// let bases = bases.clone();
// let exponents = exponents.clone();
// let density_map = density_map.clone();
// This is a Pippenger’s algorithm
pool.compute(move || {
// Accumulate the result
let mut acc = G::Projective::zero();
// Build a source for the bases
let mut bases = bases.new();
// Create buckets to place remainders s mod 2^c,
// it will be 2^c - 1 buckets (no bucket for zeroes)
// Create space for the buckets
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
// Sort the bases into buckets
for (&exp, density) in exponents.iter().zip(density_map.as_ref().iter()) {
// Go over density and exponents
if density {
if exp == zero {
bases.skip(1)?;
} else if exp == one {
if handle_trivial {
bases.add_assign_mixed(&mut acc)?;
} else {
bases.skip(1)?;
}
} else {
// Place multiplication into the bucket: Separate s * P as
// (s/2^c) * P + (s mod 2^c) P
// First multiplication is c bits less, so one can do it,
// sum results from different buckets and double it c times,
// then add with (s mod 2^c) P parts
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % (1 << c);
if exp != 0 {
bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?;
} else {
bases.skip(1)?;
}
}
}
}
// Summation by parts
// e.g. 3a + 2b + 1c = a +
// (a) + b +
// ((a) + b) + c
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
Ok(acc)
})
};
this
}
#[inline(always)]
fn multiexp_inner_impl<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
skip: u32,
c: u32,
handle_trivial: bool
) -> WorkerFuture< <G as CurveAffine>::Projective, SynthesisError>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
multiexp_inner(pool, bases, density_map, exponents, skip, c, handle_trivial)
// multiexp_inner_with_prefetch_stable(pool, bases, density_map, exponents, skip, c, handle_trivial)
}
fn multiexp_inner_with_prefetch_stable<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
skip: u32,
c: u32,
handle_trivial: bool
) -> WorkerFuture< <G as CurveAffine>::Projective, SynthesisError>
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
// Perform this region of the multiexp
let this = {
let bases = bases.clone();
let exponents = exponents.clone();
let density_map = density_map.clone();
// This is a Pippenger’s algorithm
pool.compute(move || {
// Accumulate the result
let mut acc = G::Projective::zero();
// Build a source for the bases
let mut bases = bases.new();
// Create buckets to place remainders s mod 2^c,
// it will be 2^c - 1 buckets (no bucket for zeroes)
// Create space for the buckets
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let padding = Arc::new(vec![zero]);
let mask = (1u64 << c) - 1;
// Sort the bases into buckets
for ((&exp, &next_exp), density) in exponents.iter()
.zip(exponents.iter().skip(1).chain(padding.iter()))
.zip(density_map.as_ref().iter()) {
// no matter what happens - prefetch next bucket
if next_exp != zero && next_exp != one {
let mut next_exp = next_exp;
next_exp.shr(skip);
let next_exp = next_exp.as_ref()[0] & mask;
if next_exp != 0 {
let p: *const <G as CurveAffine>::Projective = &buckets[(next_exp - 1) as usize];
crate::prefetch::prefetch_l1_pointer(p);
}
}
// Go over density and exponents
if density {
if exp == zero {
bases.skip(1)?;
} else if exp == one {
if handle_trivial {
bases.add_assign_mixed(&mut acc)?;
} else {
bases.skip(1)?;
}
} else {
// Place multiplication into the bucket: Separate s * P as
// (s/2^c) * P + (s mod 2^c) P
// First multiplication is c bits less, so one can do it,
// sum results from different buckets and double it c times,
// then add with (s mod 2^c) P parts
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % mask;
if exp != 0 {
bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?;
} else {
bases.skip(1)?;
}
}
}
}
// Summation by parts
// e.g. 3a + 2b + 1c = a +
// (a) + b +
// ((a) + b) + c
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
Ok(acc)
})
};
this
}
/// Perform multi-exponentiation. The caller is responsible for ensuring the
/// query size is the same as the number of exponents.
pub fn future_based_multiexp<G: CurveAffine>(
pool: &Worker,
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>
) -> ChunksJoiner< <G as CurveAffine>::Projective >
{
assert!(exponents.len() <= bases.len());
let c = if exponents.len() < 32 {
3u32
} else {
let mut width = (f64::from(exponents.len() as u32)).ln().ceil() as u32;
let mut num_chunks = <G::Scalar as PrimeField>::NUM_BITS / width;
if <G::Scalar as PrimeField>::NUM_BITS % width != 0 {
num_chunks += 1;
}
if num_chunks < pool.cpus as u32 {
width = <G::Scalar as PrimeField>::NUM_BITS / (pool.cpus as u32);
if <G::Scalar as PrimeField>::NUM_BITS % (pool.cpus as u32) != 0 {
width += 1;
}
}
width
};
let mut skip = 0;
let mut futures = Vec::with_capacity((<G::Engine as ScalarEngine>::Fr::NUM_BITS / c + 1) as usize);
while skip < <G::Engine as ScalarEngine>::Fr::NUM_BITS {
let chunk_future = if skip == 0 {
future_based_dense_multiexp_impl(pool, bases.clone(), exponents.clone(), 0, c, true)
} else {
future_based_dense_multiexp_impl(pool, bases.clone(), exponents.clone(), skip, c, false)
};
futures.push(chunk_future);
skip += c;
}
let join = join_all(futures);
ChunksJoiner {
join,
c
}
}
/// Perform multi-exponentiation. The caller is responsible for ensuring the
/// query size is the same as the number of exponents.
pub fn future_based_dense_multiexp_over_fixed_width_windows<G: CurveAffine>(
pool: &Worker,
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
c: u32
) -> ChunksJoiner< <G as CurveAffine>::Projective >
{
assert!(exponents.len() <= bases.len());
let mut skip = 0;
let mut futures = Vec::with_capacity((<G::Engine as ScalarEngine>::Fr::NUM_BITS / c + 1) as usize);
while skip < <G::Engine as ScalarEngine>::Fr::NUM_BITS {
let chunk_future = if skip == 0 {
// future_based_buffered_dense_multiexp_impl(pool, bases.clone(), exponents.clone(), 0, c, true)
future_based_dense_multiexp_impl(pool, bases.clone(), exponents.clone(), 0, c, true)
} else {
// future_based_buffered_dense_multiexp_impl(pool, bases.clone(), exponents.clone(), skip, c, false)
future_based_dense_multiexp_impl(pool, bases.clone(), exponents.clone(), skip, c, false)
};
futures.push(chunk_future);
skip += c;
}
let join = join_all(futures);
ChunksJoiner {
join,
c
}
}
fn future_based_dense_multiexp_impl<G: CurveAffine>(
pool: &Worker,
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
skip: u32,
c: u32,
handle_trivial: bool
) -> WorkerFuture< <G as CurveAffine>::Projective, SynthesisError>
{
// Perform this region of the multiexp
let this = {
let bases = bases.clone();
let exponents = exponents.clone();
let bases = bases.clone();
// This is a Pippenger’s algorithm
pool.compute(move || {
// Accumulate the result
let mut acc = G::Projective::zero();
// Create buckets to place remainders s mod 2^c,
// it will be 2^c - 1 buckets (no bucket for zeroes)
// Create space for the buckets
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let padding = Arc::new(vec![zero]);
let mask = 1 << c;
// Sort the bases into buckets
for ((&exp, base), &next_exp) in exponents.iter()
.zip(bases.iter())
.zip(exponents.iter().skip(1).chain(padding.iter())) {
// no matter what happens - prefetch next bucket
if next_exp != zero && next_exp != one {
let mut next_exp = next_exp;
next_exp.shr(skip);
let next_exp = next_exp.as_ref()[0] % mask;
if next_exp != 0 {
let p: *const <G as CurveAffine>::Projective = &buckets[(next_exp - 1) as usize];
crate::prefetch::prefetch_l1_pointer(p);
}
}
// Go over density and exponents
if exp == zero {
continue
} else if exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
} else {
continue
}
} else {
// Place multiplication into the bucket: Separate s * P as
// (s/2^c) * P + (s mod 2^c) P
// First multiplication is c bits less, so one can do it,
// sum results from different buckets and double it c times,
// then add with (s mod 2^c) P parts
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % mask;
if exp != 0 {
(&mut buckets[(exp - 1) as usize]).add_assign_mixed(base);
} else {
continue;
}
}
}
// Summation by parts
// e.g. 3a + 2b + 1c = a +
// (a) + b +
// ((a) + b) + c
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
Ok(acc)
})
};
this
}
fn future_based_buffered_dense_multiexp_impl<G: CurveAffine>(
pool: &Worker,
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
skip: u32,
c: u32,
handle_trivial: bool
) -> WorkerFuture< <G as CurveAffine>::Projective, SynthesisError>
{
// Perform this region of the multiexp
let this = {
let bases = bases.clone();
let exponents = exponents.clone();
let bases = bases.clone();
// This is a Pippenger’s algorithm
pool.compute(move || {
// Accumulate the result
let mut acc = G::Projective::zero();
// Create buckets to place remainders s mod 2^c,
// it will be 2^c - 1 buckets (no bucket for zeroes)
// Create space for the buckets
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let mask = 1 << c;
const BUFFER_SIZE: usize = 64;
let mut buffers: Vec<Vec<G>> = vec![Vec::with_capacity(BUFFER_SIZE); (1 << c) - 1];
// Sort the bases into buckets
for (&exp, &base) in exponents.iter()
.zip(bases.iter()) {
// Go over density and exponents
if exp == zero {
continue
} else if exp == one {
if handle_trivial {
acc.add_assign_mixed(&base);
} else {
continue
}
} else {
// Place multiplication into the bucket: Separate s * P as
// (s/2^c) * P + (s mod 2^c) P
// First multiplication is c bits less, so one can do it,
// sum results from different buckets and double it c times,
// then add with (s mod 2^c) P parts
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % mask;
if exp != 0 {
let idx = (exp - 1) as usize;
if buffers[idx].len() == BUFFER_SIZE {
let mut el = buckets[idx];
for b in buffers[idx].iter(){
el.add_assign_mixed(&b);
}
buffers[idx].truncate(0);
buckets[idx] = el;
}
buffers[idx].push(base);
} else {
continue;
}
}
}
// we have some unprocessed left, so add them to the buckets
for (idx, buffer) in buffers.into_iter().enumerate() {
let mut el = buckets[idx];
for b in buffer.into_iter() {
el.add_assign_mixed(&b);
}
buckets[idx] = el;
}
// Summation by parts
// e.g. 3a + 2b + 1c = a +
// (a) + b +
// ((a) + b) + c
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
Ok(acc)
})
};
this
}
/// Perform multi-exponentiation. The caller is responsible for ensuring the
/// query size is the same as the number of exponents.
pub fn multiexp<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>
) -> ChunksJoiner< <G as CurveAffine>::Projective >
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
if let Some(query_size) = density_map.as_ref().get_query_size() {
// If the density map has a known query size, it should not be
// inconsistent with the number of exponents.
assert!(query_size == exponents.len());
}
let mut skip = 0;
let mut futures = Vec::with_capacity((<G::Engine as ScalarEngine>::Fr::NUM_BITS / c + 1) as usize);
while skip < <G::Engine as ScalarEngine>::Fr::NUM_BITS {
let chunk_future = if skip == 0 {
multiexp_inner_impl(pool, bases.clone(), density_map.clone(), exponents.clone(), 0, c, true)
} else {
multiexp_inner_impl(pool, bases.clone(), density_map.clone(), exponents.clone(), skip, c, false)
};
futures.push(chunk_future);
skip += c;
}
let join = join_all(futures);
ChunksJoiner {
join,
c
}
}
pub(crate) fn multiexp_with_fixed_width<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
c: u32
) -> ChunksJoiner< <G as CurveAffine>::Projective >
where for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: CurveAffine,
S: SourceBuilder<G>
{
if let Some(query_size) = density_map.as_ref().get_query_size() {
// If the density map has a known query size, it should not be
// inconsistent with the number of exponents.
assert!(query_size == exponents.len());
}
let mut skip = 0;
let mut futures = Vec::with_capacity((<G::Engine as ScalarEngine>::Fr::NUM_BITS / c + 1) as usize);
while skip < <G::Engine as ScalarEngine>::Fr::NUM_BITS {
let chunk_future = if skip == 0 {
multiexp_inner_impl(pool, bases.clone(), density_map.clone(), exponents.clone(), 0, c, true)
} else {
multiexp_inner_impl(pool, bases.clone(), density_map.clone(), exponents.clone(), skip, c, false)
};
futures.push(chunk_future);
skip += c;
}
let join = join_all(futures);
ChunksJoiner {
join,
c
}
}
pub struct ChunksJoiner<G: CurveProjective> {
join: JoinAll< WorkerFuture<G, SynthesisError> >,
c: u32
}
impl<G: CurveProjective> Future for ChunksJoiner<G> {
type Output = Result<G, SynthesisError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output>
{
let c = self.as_ref().c;
let join = unsafe { self.map_unchecked_mut(|s| &mut s.join) };
match join.poll(cx) {
Poll::Ready(v) => {
let v = join_chunks(v, c);
return Poll::Ready(v);
},
Poll::Pending => {
return Poll::Pending;
}
}
}
}
impl<G: CurveProjective> ChunksJoiner<G> {
pub fn wait(self) -> <Self as Future>::Output {
block_on(self)
}
}
fn join_chunks<G: CurveProjective>
(chunks: Vec<Result<G, SynthesisError>>, c: u32) -> Result<G, SynthesisError> {
if chunks.len() == 0 {
return Ok(G::zero());
}
let mut iter = chunks.into_iter().rev();
let higher = iter.next().expect("is some chunk result");
let mut higher = higher?;
for chunk in iter {
let this = chunk?;
for _ in 0..c {
higher.double();
}
higher.add_assign(&this);
}
Ok(higher)
}
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
#[allow(dead_code)]
pub fn dense_multiexp<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
// do some heuristics here
// we proceed chunks of all points, and all workers do the same work over
// some scalar width, so to have expected number of additions into buckets to 1
// we have to take log2 from the expected chunk(!) length
let c = if exponents.len() < 32 {
3u32
} else {
let chunk_len = pool.get_chunk_size(exponents.len());
(f64::from(chunk_len as u32)).ln().ceil() as u32
// (f64::from(exponents.len() as u32)).ln().ceil() as u32
};
// dense_multiexp_inner_unrolled_with_prefetch(pool, bases, exponents, 0, c, true)
dense_multiexp_inner(pool, bases, exponents, 0, c, true)
}
fn dense_multiexp_inner<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
mut skip: u32,
c: u32,
handle_trivial: bool
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
use std::sync::{Mutex};
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
// then over another region, etc. No Arc required
let this = {
// let mask = (1u64 << c) - 1u64;
let this_region = Mutex::new(<G as CurveAffine>::Projective::zero());
let arc = Arc::new(this_region);
pool.scope(bases.len(), |scope, chunk| {
for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let this_region_rwlock = arc.clone();
// let handle =
scope.spawn(move |_| {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
// Accumulate the result
let mut acc = G::Projective::zero();
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
for (base, &exp) in base.iter().zip(exp.iter()) {
// let index = (exp.as_ref()[0] & mask) as usize;
// if index != 0 {
// buckets[index - 1].add_assign_mixed(base);
// }
// exp.shr(c as u32);
if exp != zero {
if exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
}
} else {
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % (1 << c);
if exp != 0 {
buckets[(exp - 1) as usize].add_assign_mixed(base);
}
}
}
}
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
let mut guard = match this_region_rwlock.lock() {
Ok(guard) => guard,
Err(_) => {
panic!("poisoned!");
// poisoned.into_inner()
}
};
(*guard).add_assign(&acc);
});
}
});
let this_region = Arc::try_unwrap(arc).unwrap();
let this_region = this_region.into_inner().unwrap();
this_region
};
skip += c;
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
// There isn't another region, and this will be the highest region
return Ok(this);
} else {
// next region is actually higher than this one, so double it enough times
let mut next_region = dense_multiexp_inner(
pool, bases, exponents, skip, c, false).unwrap();
for _ in 0..c {
next_region.double();
}
next_region.add_assign(&this);
return Ok(next_region);
}
}
fn get_window_size_for_length(length: usize, chunk_length: usize) -> u32 {
if length < 32 {
return 3u32;
} else {
let exact = (f64::from(chunk_length as u32)).ln();
let floor = exact.floor();
if exact > floor + 0.5f64 {
return exact.ceil() as u32;
} else {
return floor as u32;
}
// (f64::from(chunk_length as u32)).ln().ceil() as u32
// (f64::from(length as u32)).ln().ceil() as u32
};
}
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
#[allow(dead_code)]
pub fn dense_multiexp_uniform<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
// do some heuristics here
// we proceed chunks of all points, and all workers do the same work over
// some scalar width, so to have expected number of additions into buckets to 1
// we have to take log2 from the expected chunk(!) length
let c = if exponents.len() < 32 {
3u32
} else {
let chunk_len = pool.get_chunk_size(exponents.len());
(f64::from(chunk_len as u32)).ln().ceil() as u32
// (f64::from(exponents.len() as u32)).ln().ceil() as u32
};
let num_threads = pool.cpus;
let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
use std::sync::{Arc, Barrier};
const SYNCHRONIZATION_STEP: usize = 1 << 17;
const READ_BY: usize = 1 << 7;
const MIN_STACK_SIZE: usize = 1 << 21; // 2MB
assert!(SYNCHRONIZATION_STEP % READ_BY == 0);
let num_rounds = bases.len() / SYNCHRONIZATION_STEP;
let limit = ((<G::Engine as ScalarEngine>::Fr::NUM_BITS / c) + 1) as usize;
let mut buckets_schedule = Vec::with_capacity(limit);
let mut tmp = <G::Engine as ScalarEngine>::Fr::NUM_BITS;
for _ in 0..limit {
if tmp != 0 {
let bits = if tmp >= c {
tmp -= c;
c
} else {
let tt = tmp;
tmp = 0;
tt
};
buckets_schedule.push(bits);
} else {
break;
}
}
let mut barriers = Vec::with_capacity(buckets_schedule.len());
for _ in 0..buckets_schedule.len() {
let mut tt = Vec::with_capacity(num_rounds);
for _ in 0..num_rounds {
let t = Barrier::new(num_threads);
tt.push(t);
}
barriers.push(tt);
}
let barrs = &barriers;
let b_schedule = &buckets_schedule;
let g1_proj_size = std::mem::size_of::<<G as CurveAffine>::Projective>();
let scalar_size = std::mem::size_of::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>();
// potentially we can keep all the buckets on a stack,
// but it would be around 96 MB for BN254 for c = 20;
let _space_to_keep_bases_on_stack = (1 << c) * g1_proj_size;
let mut stack_size = (g1_proj_size + scalar_size) * READ_BY;
if stack_size < MIN_STACK_SIZE {
stack_size = MIN_STACK_SIZE;
}
let thread_step = num_threads * READ_BY;
use crossbeam::thread;
thread::scope(|s| {
for (thread_idx, subresult) in subresults.iter_mut().enumerate() {
let builder = s.builder().stack_size(stack_size);
builder.spawn(move |_| {
let limit = bases.len();
let bases = &bases[..limit];
let exponents = &exponents[..limit];
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); 1 << c];
let mut skip_bits = 0;
for (chunk_schedule_idx, window_size) in b_schedule.iter().enumerate() {
let mask = (1 << window_size) - 1;
if chunk_schedule_idx != 0 {
(&mut buckets).truncate(0);
(&mut buckets).resize(1 << window_size, <G as CurveAffine>::Projective::zero());
}
for (i, (bases, exponents)) in bases.chunks(SYNCHRONIZATION_STEP)
.zip(exponents.chunks(SYNCHRONIZATION_STEP))
.enumerate() {
let num_subchunks = bases.len() / thread_step;
let remainder_start = num_subchunks * thread_step;
let remainder_end = bases.len();
// assert_eq!(remainder_start, remainder_end, "only support power of two multiexp size for now");
let mut bases_holder = [G::zero(); READ_BY];
let mut exponents_holder = [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr::default(); READ_BY];
for subchunk_start_idx in ((thread_idx*READ_BY)..remainder_start).step_by(thread_step) {
bases_holder.copy_from_slice(&bases[subchunk_start_idx..(subchunk_start_idx+READ_BY)]);
exponents_holder.copy_from_slice(&exponents[subchunk_start_idx..(subchunk_start_idx+READ_BY)]);
let mut exps_iter = exponents_holder.iter();
let mut bases_iter = bases_holder.iter();
// semi-unroll first
let mut tmp = *exps_iter.next().unwrap();
tmp.shr(skip_bits);
let mut this_exp = (tmp.as_ref()[0] & mask) as usize;
let mut base_prefetch_counter = 0;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
for (&next_exp, this_base) in exps_iter.zip(&mut bases_iter) {
if this_exp != 0 {
buckets[this_exp].add_assign_mixed(this_base);
}
let mut next_exp = next_exp;
next_exp.shr(skip_bits);
this_exp = (next_exp.as_ref()[0] & mask) as usize;
base_prefetch_counter += 1;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
}
// finish
if this_exp != 0 {
let last_base = bases_iter.next().unwrap();
buckets[this_exp].add_assign_mixed(last_base);
}
}
// process the remainder
let remainder_start = remainder_start + (thread_idx*READ_BY);
if remainder_start < remainder_end {
let remainder_end = if remainder_start + READ_BY > remainder_end {
remainder_end
} else {
remainder_start + READ_BY
};
let exponents_holder = &exponents[remainder_start..remainder_end];
let bases_holder = &bases[remainder_start..remainder_end];
let mut exps_iter = exponents_holder.iter();
let mut bases_iter = bases_holder.iter();
// semi-unroll first
let mut tmp = *exps_iter.next().unwrap();
tmp.shr(skip_bits);
let mut this_exp = (tmp.as_ref()[0] & mask) as usize;
let mut base_prefetch_counter = 0;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
for (&next_exp, this_base) in exps_iter.zip(&mut bases_iter) {
if this_exp != 0 {
buckets[this_exp].add_assign_mixed(this_base);
}
let mut next_exp = next_exp;
next_exp.shr(skip_bits);
this_exp = (next_exp.as_ref()[0] & mask) as usize;
base_prefetch_counter += 1;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
}
// finish
if this_exp != 0 {
let last_base = bases_iter.next().unwrap();
buckets[this_exp].add_assign_mixed(last_base);
}
}
(&barrs[chunk_schedule_idx][i]).wait();
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = buckets[1];
let mut running_sum = G::Projective::zero();
for exp in buckets.iter().skip(2).rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip_bits {
acc.double();
}
subresult.add_assign(&acc);
skip_bits += window_size;
}
}).unwrap();
}
}).unwrap();
let mut result = subresults.drain(0..1).collect::<Vec<_>>()[0];
for t in subresults.into_iter() {
result.add_assign(&t);
}
Ok(result)
}
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
pub fn stack_allocated_dense_multiexp<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
let chunk_len = pool.get_chunk_size(exponents.len());
let c = get_window_size_for_length(exponents.len(), chunk_len);
match c {
12 => stack_allocated_dense_multiexp_12(pool, bases, exponents),
13 => stack_allocated_dense_multiexp_13(pool, bases, exponents),
14 => stack_allocated_dense_multiexp_14(pool, bases, exponents),
15 => stack_allocated_dense_multiexp_15(pool, bases, exponents),
16 => stack_allocated_dense_multiexp_16(pool, bases, exponents),
17 => stack_allocated_dense_multiexp_17(pool, bases, exponents),
18 => stack_allocated_dense_multiexp_18(pool, bases, exponents),
_ => unimplemented!("not implemented for windows = {}", c)
}
}
// /// Perform multi-exponentiation. The caller is responsible for ensuring that
// /// the number of bases is the same as the number of exponents.
// pub fn producer_consumer_dense_multiexp<G: CurveAffine>(
// pool: &Worker,
// bases: & [G],
// exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
// ) -> Result<<G as CurveAffine>::Projective, SynthesisError>
// {
// if exponents.len() != bases.len() {
// return Err(SynthesisError::AssignmentMissing);
// }
// let num_workers = pool.cpus - 1;
// let mut window_size = (<G::Engine as ScalarEngine>::Fr::NUM_BITS as usize) / num_workers;
// if (<G::Engine as ScalarEngine>::Fr::NUM_BITS as usize) % num_workers != 0 {
// window_size += 1;
// }
// if window_size > 20 {
// println!("Degrading to normal one");
// return dense_multiexp(pool, bases, exponents);
// }
// println!("Windows size = {} for {} multiexp size on {} CPUs", window_size, exponents.len(), pool.cpus);
// use crossbeam::thread;
// use crossbeam_queue::{ArrayQueue};
// const CAPACITY: usize = 1 << 16;
// let mask = (1u64 << window_size) - 1u64;
// use std::sync::atomic::{AtomicBool, Ordering};
// let end = AtomicBool::from(false);
// let finished = &end;
// thread::scope(|s| {
// let mut txes = Vec::with_capacity(num_workers);
// let mut rxes = Vec::with_capacity(num_workers);
// for _ in 0..num_workers {
// let queue = ArrayQueue::<(G, usize)>::new(CAPACITY);
// let queue = Arc::from(queue);
// txes.push(queue.clone());
// rxes.push(queue);
// }
// // first we spawn thread that produces indexes
// s.spawn(move |_| {
// for (exp, base) in (exponents.iter().copied()).zip(bases.iter().copied()) {
// let mut exp = exp;
// let mut skip = 0u32;
// for c in txes.iter() {
// exp.shr(skip);
// let index = (exp.as_ref()[0] & mask) as usize;
// skip += window_size as u32;
// 'inner: loop {
// if !c.is_full() {
// c.push((base, index)).unwrap();
// break 'inner;
// }
// }
// }
// }
// finished.store(true, Ordering::Relaxed);
// });
// for rx in rxes.into_iter() {
// s.spawn(move |_| {
// let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << window_size) - 1];
// loop {
// if !rx.is_empty() {
// let (base, index) = rx.pop().unwrap();
// if index != 0 {
// buckets[index - 1].add_assign_mixed(&base);
// }
// } else {
// let ended = finished.load(Ordering::Relaxed);
// if ended {
// break;
// }
// }
// }
// let mut running_sum = G::Projective::zero();
// let mut acc = G::Projective::zero();
// for exp in buckets.into_iter().rev() {
// running_sum.add_assign(&exp);
// acc.add_assign(&running_sum);
// }
// });
// }
// }).unwrap();
// Ok(G::Projective::zero())
// }
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
pub fn stack_allocated_uncompensated_dense_multiexp<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
// do some heuristics here
// we proceed chunks of all points, and all workers do the same work over
// some scalar width, so to have expected number of additions into buckets to 1
// we have to take log2 from the expected chunk(!) length
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
match c {
12 => stack_allocated_dense_multiexp_12(pool, bases, exponents),
13 => stack_allocated_dense_multiexp_13(pool, bases, exponents),
14 => stack_allocated_dense_multiexp_14(pool, bases, exponents),
15 => stack_allocated_dense_multiexp_15(pool, bases, exponents),
16 => stack_allocated_dense_multiexp_16(pool, bases, exponents),
17 => stack_allocated_dense_multiexp_17(pool, bases, exponents),
18 => stack_allocated_dense_multiexp_18(pool, bases, exponents),
_ => unimplemented!("not implemented for windows = {}", c)
}
}
fn stack_allocated_dense_multiexp_inner<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
const WINDOW_SIZE: usize = 13;
const SYNCHRONIZATION_STEP: usize = 1 << 17;
const READ_BY: usize = 1 << 7;
const MIN_STACK_SIZE: usize = 1 << 21; // 2MB
const NUM_BUCKETS: usize = 1 << WINDOW_SIZE;
const MASK: u64 = (1 << WINDOW_SIZE) - 1;
assert!(SYNCHRONIZATION_STEP % READ_BY == 0);
assert_eq!(c as usize, WINDOW_SIZE);
let num_threads = pool.cpus;
let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
use std::sync::{Arc, Barrier};
let num_rounds = bases.len() / SYNCHRONIZATION_STEP;
let mut num_windows = (<G::Engine as ScalarEngine>::Fr::NUM_BITS / c) as usize;
if <G::Engine as ScalarEngine>::Fr::NUM_BITS % c != 0 {
num_windows += 1;
}
let mut barriers = Vec::with_capacity(num_windows);
for _ in 0..num_windows {
let mut tt = Vec::with_capacity(num_rounds);
for _ in 0..num_rounds {
let t = Barrier::new(num_threads);
tt.push(t);
}
barriers.push(tt);
}
let barrs = &barriers;
let g1_projective_size = std::mem::size_of::<<G as CurveAffine>::Projective>();
let g1_affine_size = std::mem::size_of::<G>();
let scalar_size = std::mem::size_of::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>();
let usize_size = std::mem::size_of::<usize>();
// potentially we can keep all the buckets on a stack,
// but it would be around 2.6 MB for BN254 for c = 18;
let space_to_keep_buckets_on_stack = (1 << WINDOW_SIZE) * g1_projective_size;
let mut stack_size = (g1_affine_size + scalar_size + usize_size) * READ_BY + space_to_keep_buckets_on_stack + (1<<16);
if stack_size < MIN_STACK_SIZE {
stack_size = MIN_STACK_SIZE;
}
let thread_step = num_threads * READ_BY;
use crossbeam::thread;
thread::scope(|s| {
for (thread_idx, subresult) in subresults.iter_mut().enumerate() {
let builder = s.builder().stack_size(stack_size);
builder.spawn(move |_| {
let limit = bases.len();
let bases = &bases[..limit];
let exponents = &exponents[..limit];
let mut buckets = [<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let mut skip_bits = 0;
for chunk_schedule_idx in 0..num_windows {
if chunk_schedule_idx != 0 {
buckets = [<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
}
for (i, (bases, exponents)) in bases.chunks(SYNCHRONIZATION_STEP)
.zip(exponents.chunks(SYNCHRONIZATION_STEP))
.enumerate() {
let num_subchunks = bases.len() / thread_step;
let remainder_start = num_subchunks * thread_step;
let remainder_end = bases.len();
// assert_eq!(remainder_start, remainder_end, "only support power of two multiexp size for now");
let mut bases_holder = [G::zero(); READ_BY];
let mut exponents_holder = [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr::default(); READ_BY];
let mut indexes_holder = [0usize; READ_BY];
for subchunk_start_idx in ((thread_idx*READ_BY)..remainder_start).step_by(thread_step) {
exponents_holder.copy_from_slice(&exponents[subchunk_start_idx..(subchunk_start_idx+READ_BY)]);
for (index, &exp) in indexes_holder.iter_mut().zip(exponents_holder.iter()) {
let mut exp = exp;
exp.shr(skip_bits);
*index = (exp.as_ref()[0] & MASK) as usize;
}
bases_holder.copy_from_slice(&bases[subchunk_start_idx..(subchunk_start_idx+READ_BY)]);
let mut bases_iter = bases_holder.iter();
let mut indexes_iter = indexes_holder.iter();
// semi-unroll first
let mut this_exp = *(&mut indexes_iter).next().unwrap();
let mut base_prefetch_counter = 0;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
for (&index, this_base) in indexes_iter.zip(&mut bases_iter) {
if this_exp != 0 {
buckets[this_exp].add_assign_mixed(&this_base);
}
this_exp = index;
base_prefetch_counter += 1;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
}
// finish
if this_exp != 0 {
let last_base = bases_iter.next().unwrap();
buckets[this_exp].add_assign_mixed(&last_base);
}
}
// process the remainder
let remainder_start = remainder_start + (thread_idx*READ_BY);
if remainder_start < remainder_end {
let remainder_end = if remainder_start + READ_BY > remainder_end {
remainder_end
} else {
remainder_start + READ_BY
};
let limit = remainder_end - remainder_start;
exponents_holder[..limit].copy_from_slice(&exponents[remainder_start..remainder_end]);
for (index, &exp) in indexes_holder.iter_mut().zip(exponents_holder.iter()) {
let mut exp = exp;
exp.shr(skip_bits);
*index = (exp.as_ref()[0] & MASK) as usize;
}
bases_holder[..limit].copy_from_slice(&bases[remainder_start..remainder_end]);
let mut bases_iter = bases_holder[..limit].iter();
let mut indexes_iter = indexes_holder[..limit].iter();
// semi-unroll first
let mut this_exp = *indexes_iter.next().unwrap();
let mut base_prefetch_counter = 0;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
for (&index, this_base) in indexes_iter.zip(&mut bases_iter) {
if this_exp != 0 {
buckets[this_exp].add_assign_mixed(this_base);
}
this_exp = index;
base_prefetch_counter += 1;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
}
// finish
if this_exp != 0 {
let last_base = bases_iter.next().unwrap();
buckets[this_exp].add_assign_mixed(last_base);
}
}
(&barrs[chunk_schedule_idx][i]).wait();
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = buckets[1];
let mut running_sum = G::Projective::zero();
for exp in buckets.iter().skip(2).rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip_bits {
acc.double();
}
subresult.add_assign(&acc);
skip_bits += WINDOW_SIZE as u32;
}
}).unwrap();
}
}).unwrap();
let mut result = subresults.drain(0..1).collect::<Vec<_>>()[0];
for t in subresults.into_iter() {
result.add_assign(&t);
}
Ok(result)
}
pub fn map_reduce_multiexp<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
// do some heuristics here
// we proceed chunks of all points, and all workers do the same work over
// some scalar width, so to have expected number of additions into buckets to 1
// we have to take log2 from the expected chunk(!) length
let c = if exponents.len() < 32 {
3u32
} else {
let chunk_len = pool.get_chunk_size(exponents.len());
(f64::from(chunk_len as u32)).ln().ceil() as u32
};
let chunk_len = pool.get_chunk_size(exponents.len());
pool.scope(0, |scope, _| {
for (b, e) in bases.chunks(chunk_len).zip(exponents.chunks(chunk_len)) {
scope.spawn(move |_| {
serial_multiexp_inner(b, e, c).unwrap();
});
}
});
Ok(<G as CurveAffine>::Projective::zero())
}
pub fn map_reduce_multiexp_over_fixed_window<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if <G::Engine as ScalarEngine>::Fr::NUM_BITS == 254 {
return map_reduce_multiexp_over_fixed_window_254(pool, bases, exponents, c);
}
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
let chunk_len = pool.get_chunk_size(exponents.len());
let num_threads = pool.cpus;
let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
pool.scope(0, |scope, _| {
for ((b, e), s) in bases.chunks(chunk_len).zip(exponents.chunks(chunk_len)).zip(subresults.iter_mut()) {
scope.spawn(move |_| {
// *s = test_memory_serial(b, e, c).unwrap();
*s = serial_multiexp_inner(b, e, c).unwrap();
});
}
});
let mut result = <G as CurveAffine>::Projective::zero();
for s in subresults.into_iter() {
result.add_assign(&s);
}
Ok(result)
}
pub fn buffered_multiexp_over_fixed_window_and_buffer_size<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32,
buffer_size: usize
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
let mut num_runs = (<G::Engine as ScalarEngine>::Fr::NUM_BITS / c) as usize;
if <G::Engine as ScalarEngine>::Fr::NUM_BITS % c != 0 {
num_runs += 1;
}
let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_runs];
pool.scope(0, |scope, _| {
let mut skip = 0u32;
for s in subresults.iter_mut() {
scope.spawn(move |_| {
*s = buffered_multiexp_inner(bases, exponents, c, skip, buffer_size).unwrap();
});
skip += c;
}
});
let mut result = <G as CurveAffine>::Projective::zero();
for s in subresults.into_iter() {
result.add_assign(&s);
}
Ok(result)
}
fn map_reduce_multiexp_over_fixed_window_254<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
let chunk_len = pool.get_chunk_size(exponents.len());
let num_threads = pool.cpus;
let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
pool.scope(0, |scope, _| {
for ((b, e), s) in bases.chunks(chunk_len).zip(exponents.chunks(chunk_len)).zip(subresults.iter_mut()) {
scope.spawn(move |_| {
let subres = match c {
7 => map_reduce_multiexp_254_inner_7(b, e),
8 => map_reduce_multiexp_254_inner_8(b, e),
9 => map_reduce_multiexp_254_inner_9(b, e),
10 => map_reduce_multiexp_254_inner_10(b, e),
11 => map_reduce_multiexp_254_inner_11(b, e),
12 => map_reduce_multiexp_254_inner_12(b, e),
13 => map_reduce_multiexp_254_inner_13(b, e),
14 => map_reduce_multiexp_254_inner_14(b, e),
15 => map_reduce_multiexp_254_inner_15(b, e),
16 => map_reduce_multiexp_254_inner_16(b, e),
17 => map_reduce_multiexp_254_inner_17(b, e),
18 => map_reduce_multiexp_254_inner_18(b, e),
_ => unimplemented!("window size is not supported"),
};
*s = subres.expect("must calcualate contribution from serial multiexp");
});
}
});
let mut result = <G as CurveAffine>::Projective::zero();
for s in subresults.into_iter() {
result.add_assign(&s);
}
Ok(result)
}
fn serial_multiexp_inner<G: CurveAffine>(
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
let num_buckets = (1 << c) - 1;
let mask: u64 = (1u64 << c) - 1u64;
// let bases = bases.to_vec();
// let exponents = exponents.to_vec();
let mut result = <G as CurveAffine>::Projective::zero();
let mut num_runs = (<G::Engine as ScalarEngine>::Fr::NUM_BITS / c) as usize;
if <G::Engine as ScalarEngine>::Fr::NUM_BITS % c != 0 {
num_runs += 1;
}
let mut buckets = vec![vec![<G as CurveAffine>::Projective::zero(); num_buckets]; num_runs as usize];
for (&base, &exp) in bases.into_iter().zip(exponents.into_iter()) {
for window_index in 0..num_runs {
let mut exp = exp;
exp.shr(c);
let index = (exp.as_ref()[0] & mask) as usize;
if index != 0 {
// let mut tmp = buckets[window_index][index - 1];
// tmp.add_assign_mixed(&base);
// buckets[window_index][index - 1] = tmp;
buckets[window_index][index - 1].add_assign_mixed(&base);
}
}
}
let mut skip_bits = 0;
for b in buckets.into_iter() {
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in b.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip_bits {
acc.double();
}
result.add_assign(&acc);
skip_bits += c as u32;
}
Ok(result)
}
// dummy function with minimal branching
fn test_memory_serial<G: CurveAffine>(
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
let num_buckets = (1 << c) - 1;
let mask: u64 = (1u64 << c) - 1u64;
// let bases = bases.to_vec();
// let exponents = exponents.to_vec();
let mut result = <G as CurveAffine>::Projective::zero();
let mut num_runs = (<G::Engine as ScalarEngine>::Fr::NUM_BITS / c) as usize;
if <G::Engine as ScalarEngine>::Fr::NUM_BITS % c != 0 {
num_runs += 1;
}
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); num_runs as usize];
for (&base, &exp) in bases.into_iter().zip(exponents.into_iter()) {
for window_index in 0..num_runs {
let mut exp = exp;
exp.shr(c);
let index = (exp.as_ref()[0] & mask) as usize;
if index != 0 {
buckets[window_index].add_assign_mixed(&base);
}
}
}
for _ in 0..num_runs {
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..100 {
acc.double();
}
result.add_assign(&acc);
}
Ok(result)
}
fn buffered_multiexp_inner<G: CurveAffine>(
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
c: u32,
skip: u32,
buffer_size: usize
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
let num_buckets = (1 << c) - 1;
let mask: u64 = (1u64 << c) - 1u64;
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); num_buckets];
let mut buffers: Vec<Vec<G>> = vec![Vec::with_capacity(buffer_size); num_buckets];
for (&base, &exp) in bases.into_iter().zip(exponents.into_iter()) {
let mut exp = exp;
exp.shr(skip);
let index = (exp.as_ref()[0] & mask) as usize;
if index != 0 {
let idx = index - 1;
if buffers[idx].len() == buffer_size {
// buffer is full, so let's collapse
let mut el = buckets[idx];
for b in buffers[idx].iter() {
el.add_assign_mixed(&b);
}
buckets[idx] = el;
buffers[idx].truncate(0);
}
// we always add the point
buffers[idx].push(base);
}
}
// we have some unprocessed left, so add them to the buckets
for (idx, buffer) in buffers.into_iter().enumerate() {
let mut el = buckets[idx];
for b in buffer.into_iter() {
el.add_assign_mixed(&b);
}
buckets[idx] = el;
}
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip {
acc.double();
}
Ok(acc)
}
macro_rules! construct_stack_multiexp {
( $visibility:vis fn $name:ident ( $n_words:tt ); ) => {
$visibility fn $name<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
const WINDOW_SIZE: usize = $n_words;
const SYNCHRONIZATION_STEP: usize = 1 << 17;
const READ_BY: usize = 1 << 7;
const MIN_STACK_SIZE: usize = 1 << 21; // 2MB
const NUM_BUCKETS: usize = 1 << WINDOW_SIZE;
const MASK: u64 = (1 << WINDOW_SIZE) - 1;
assert!(SYNCHRONIZATION_STEP % READ_BY == 0);
let num_threads = pool.cpus;
let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
use std::sync::{Arc, Barrier};
let num_rounds = bases.len() / SYNCHRONIZATION_STEP;
let mut num_windows = (<G::Engine as ScalarEngine>::Fr::NUM_BITS as usize) / WINDOW_SIZE;
if (<G::Engine as ScalarEngine>::Fr::NUM_BITS as usize) % WINDOW_SIZE != 0 {
num_windows += 1;
}
let mut barriers = Vec::with_capacity(num_windows);
for _ in 0..num_windows {
let mut tt = Vec::with_capacity(num_rounds);
for _ in 0..num_rounds {
let t = Barrier::new(num_threads);
tt.push(t);
}
barriers.push(tt);
}
let barrs = &barriers;
let g1_projective_size = std::mem::size_of::<<G as CurveAffine>::Projective>();
let g1_affine_size = std::mem::size_of::<G>();
let scalar_size = std::mem::size_of::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>();
let usize_size = std::mem::size_of::<usize>();
// potentially we can keep all the buckets on a stack,
// but it would be around 2.6 MB for BN254 for c = 18;
let space_to_keep_buckets_on_stack = (1 << WINDOW_SIZE) * g1_projective_size;
let mut stack_size = (g1_affine_size + scalar_size + usize_size) * READ_BY + space_to_keep_buckets_on_stack + (1<<16);
if stack_size < MIN_STACK_SIZE {
stack_size = MIN_STACK_SIZE;
}
let thread_step = num_threads * READ_BY;
use crossbeam::thread;
thread::scope(|s| {
for (thread_idx, subresult) in subresults.iter_mut().enumerate() {
let builder = s.builder().stack_size(stack_size);
builder.spawn(move |_| {
let limit = bases.len();
let bases = &bases[..limit];
let exponents = &exponents[..limit];
let mut buckets = [<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let mut skip_bits = 0;
for chunk_schedule_idx in 0..num_windows {
if chunk_schedule_idx != 0 {
buckets = [<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
}
for (i, (bases, exponents)) in bases.chunks(SYNCHRONIZATION_STEP)
.zip(exponents.chunks(SYNCHRONIZATION_STEP))
.enumerate() {
let num_subchunks = bases.len() / thread_step;
let remainder_start = num_subchunks * thread_step;
let remainder_end = bases.len();
// assert_eq!(remainder_start, remainder_end, "only support power of two multiexp size for now");
let mut bases_holder = [G::zero(); READ_BY];
let mut exponents_holder = [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr::default(); READ_BY];
let mut indexes_holder = [0usize; READ_BY];
for subchunk_start_idx in ((thread_idx*READ_BY)..remainder_start).step_by(thread_step) {
exponents_holder.copy_from_slice(&exponents[subchunk_start_idx..(subchunk_start_idx+READ_BY)]);
for (index, &exp) in indexes_holder.iter_mut().zip(exponents_holder.iter()) {
let mut exp = exp;
exp.shr(skip_bits);
*index = (exp.as_ref()[0] & MASK) as usize;
}
bases_holder.copy_from_slice(&bases[subchunk_start_idx..(subchunk_start_idx+READ_BY)]);
let mut bases_iter = bases_holder.iter();
let mut indexes_iter = indexes_holder.iter();
// semi-unroll first
let mut this_exp = *(&mut indexes_iter).next().unwrap();
let mut base_prefetch_counter = 0;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
for (&index, this_base) in indexes_iter.zip(&mut bases_iter) {
if this_exp != 0 {
buckets[this_exp].add_assign_mixed(&this_base);
}
this_exp = index;
base_prefetch_counter += 1;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
}
// finish
if this_exp != 0 {
let last_base = bases_iter.next().unwrap();
buckets[this_exp].add_assign_mixed(&last_base);
}
}
// process the remainder
let remainder_start = remainder_start + (thread_idx*READ_BY);
if remainder_start < remainder_end {
let remainder_end = if remainder_start + READ_BY > remainder_end {
remainder_end
} else {
remainder_start + READ_BY
};
let limit = remainder_end - remainder_start;
exponents_holder[..limit].copy_from_slice(&exponents[remainder_start..remainder_end]);
for (index, &exp) in indexes_holder.iter_mut().zip(exponents_holder.iter()) {
let mut exp = exp;
exp.shr(skip_bits);
*index = (exp.as_ref()[0] & MASK) as usize;
}
bases_holder[..limit].copy_from_slice(&bases[remainder_start..remainder_end]);
let mut bases_iter = bases_holder[..limit].iter();
let mut indexes_iter = indexes_holder[..limit].iter();
// semi-unroll first
let mut this_exp = *indexes_iter.next().unwrap();
let mut base_prefetch_counter = 0;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
for (&index, this_base) in indexes_iter.zip(&mut bases_iter) {
if this_exp != 0 {
buckets[this_exp].add_assign_mixed(this_base);
}
this_exp = index;
base_prefetch_counter += 1;
crate::prefetch::prefetch_l1_pointer(&buckets[this_exp]);
crate::prefetch::prefetch_l1_pointer(&bases_holder[base_prefetch_counter]);
}
// finish
if this_exp != 0 {
let last_base = bases_iter.next().unwrap();
buckets[this_exp].add_assign_mixed(last_base);
}
}
(&barrs[chunk_schedule_idx][i]).wait();
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = buckets[1];
let mut running_sum = G::Projective::zero();
for exp in buckets.iter().skip(2).rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip_bits {
acc.double();
}
subresult.add_assign(&acc);
skip_bits += WINDOW_SIZE as u32;
}
}).unwrap();
}
}).unwrap();
let mut result = subresults.drain(0..1).collect::<Vec<_>>()[0];
for t in subresults.into_iter() {
result.add_assign(&t);
}
Ok(result)
}
}
}
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_12(12););
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_13(13););
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_14(14););
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_15(15););
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_16(16););
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_17(17););
construct_stack_multiexp!(pub fn stack_allocated_dense_multiexp_18(18););
macro_rules! construct_map_reduce_multiexp_inner {
( $visibility:vis fn $name:ident ( $n_window: tt, $n_bits: tt); ) => {
$visibility fn $name<G: CurveAffine>(
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
const WINDOW: u32 = $n_window;
const NUM_BUCKETS: usize = (1 << WINDOW) - 1;
const MASK: u64 = (1u64 << WINDOW) - 1u64;
const NUM_RUNS: usize = ($n_bits / $n_window) + (($n_bits % $n_window > 0) as u32) as usize;
let mut num_runs_runtime = (<G::Engine as ScalarEngine>::Fr::NUM_BITS / WINDOW) as usize;
if <G::Engine as ScalarEngine>::Fr::NUM_BITS % WINDOW != 0 {
num_runs_runtime += 1;
}
assert_eq!(NUM_RUNS, num_runs_runtime, "invalid number of windows in runtime");
let mut result = <G as CurveAffine>::Projective::zero();
let mut buckets = vec![vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS]; NUM_RUNS as usize];
let mut bucket_indexes = [0usize; NUM_RUNS];
for (&base, &exp) in bases.into_iter().zip(exponents.into_iter()) {
// first we form bucket indexes
let mut exp = exp;
for (window_index, bucket_index) in bucket_indexes.iter_mut().enumerate() {
let index = (exp.as_ref()[0] & MASK) as usize;
exp.shr(WINDOW);
if index != 0 {
crate::prefetch::prefetch_l1_pointer(&buckets[window_index][index - 1]);
}
*bucket_index = index;
}
for (window_index, &index) in bucket_indexes.iter().enumerate() {
if index != 0 {
// let mut tmp = buckets[window_index][index - 1];
// tmp.add_assign_mixed(&base);
// buckets[window_index][index - 1] = tmp;
buckets[window_index][index - 1].add_assign_mixed(&base);
}
}
}
let mut skip_bits = 0;
for b in buckets.into_iter() {
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in b.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip_bits {
acc.double();
}
result.add_assign(&acc);
skip_bits += WINDOW;
}
Ok(result)
}
}
}
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_7(7, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_8(8, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_9(9, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_10(10, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_11(11, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_12(12, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_13(13, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_14(14, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_15(15, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_16(16, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_17(17, 254););
construct_map_reduce_multiexp_inner!(pub fn map_reduce_multiexp_254_inner_18(18, 254););
pub fn l3_shared_multexp<G: CurveAffine>(
pool: &Worker,
common_bases: & [G],
exponents_set: &[&[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]],
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
for exponents in exponents_set.iter() {
if exponents.len() != common_bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
}
const NUM_WINDOWS: usize = 22;
const WINDOW_SIZE: u32 = 12;
const MASK: u64 = (1u64 << WINDOW_SIZE) - 1;
const NUM_BUCKETS: usize = 1 << WINDOW_SIZE;
const SYNCHRONIZATION_STEP: usize = 1 << 14; // if element size is 64 = 2^7 bytes then we take around 2^21 = 2MB of cache
assert!((WINDOW_SIZE as usize) * NUM_WINDOWS >= 254);
fn get_bits<Repr: PrimeFieldRepr>(el: Repr, start: usize) -> u64 {
const WINDOW_SIZE: u32 = 12;
const MASK: u64 = (1u64 << WINDOW_SIZE) - 1;
let end = (start + (WINDOW_SIZE as usize)) % 256;
let word_begin = start / 64;
let word_end = end / 64;
let result: u64;
if word_begin == word_end {
let shift = start % 64;
result = (el.as_ref()[word_begin] >> shift) & MASK;
} else {
let shift_low = start % 64;
let shift_high = 64 - shift_low;
result = ((el.as_ref()[word_begin] >> shift_low) | (el.as_ref()[word_end] << shift_high)) & MASK;
}
result
}
// let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
let limit = common_bases.len() - 2;
use std::sync::Barrier;
let num_threads = NUM_WINDOWS * exponents_set.len();
let num_sync_rounds = limit / SYNCHRONIZATION_STEP;
let mut barriers = Vec::with_capacity(num_sync_rounds);
for _ in 0..num_sync_rounds {
let t = Barrier::new(num_threads);
barriers.push(t);
}
let barrs = &barriers;
pool.scope(0, |scope, _| {
for (exponents_idx, exponents) in exponents_set.iter().enumerate() {
// first one is unrolled manually
let mut start = 0;
if exponents_idx == 0 {
scope.spawn(move |_| {
let mut barriers_it = barrs.iter();
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let tmp = exponents[0];
let mut this_index = get_bits(tmp, start) as usize;
for i in 0..limit {
unsafe { crate::prefetch::prefetch_l3(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l3(common_bases.get_unchecked(i+1)) };
unsafe { crate::prefetch::prefetch_l1(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(common_bases.get_unchecked(i+1)) };
let tmp = unsafe { *exponents.get_unchecked(i+1) };
let base = unsafe { *common_bases.get_unchecked(i) };
let next_index = get_bits(tmp, start) as usize;
if this_index != 0 {
unsafe { buckets.get_unchecked_mut(this_index-1).add_assign_mixed(&base) };
}
if next_index != 0 {
unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index-1)) };
this_index = next_index;
}
// unsafe { buckets.get_unchecked_mut(this_index).add_assign_mixed(&base) };
// unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index)) };
// this_index = next_index;
// i != 0 and i % SYNCHRONIZATION_STEP == 0
if i !=0 && (i & (SYNCHRONIZATION_STEP - 1)) == 0 {
barriers_it.next().unwrap().wait();
}
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..start {
acc.double();
}
});
} else {
// we do not to prefetch bases, only exponents
scope.spawn(move |_| {
let mut barriers_it = barrs.iter();
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let tmp = exponents[0];
let mut this_index = get_bits(tmp, start) as usize;
for i in 0..limit {
unsafe { crate::prefetch::prefetch_l3(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(common_bases.get_unchecked(i+1)) };
let tmp = unsafe { *exponents.get_unchecked(i+1) };
let base = unsafe { *common_bases.get_unchecked(i) };
let next_index = get_bits(tmp, start) as usize;
if this_index != 0 {
unsafe { buckets.get_unchecked_mut(this_index-1).add_assign_mixed(&base) };
}
if next_index != 0 {
unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index-1)) };
this_index = next_index;
}
// unsafe { buckets.get_unchecked_mut(this_index).add_assign_mixed(&base) };
// unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index)) };
// this_index = next_index;
// i != 0 and i % SYNCHRONIZATION_STEP == 0
if i !=0 && (i & (SYNCHRONIZATION_STEP - 1)) == 0 {
barriers_it.next().unwrap().wait();
}
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..start {
acc.double();
}
});
}
for _ in 1..NUM_WINDOWS {
// no L3 prefetches here
start += WINDOW_SIZE as usize;
scope.spawn(move |_| {
let mut barriers_it = barrs.iter();
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let tmp = exponents[0];
let mut this_index = get_bits(tmp, start) as usize;
for i in 0..limit {
unsafe { crate::prefetch::prefetch_l1(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(common_bases.get_unchecked(i+1)) };
let tmp = unsafe { *exponents.get_unchecked(i+1) };
let base = unsafe { *common_bases.get_unchecked(i) };
let next_index = get_bits(tmp, start) as usize;
if this_index != 0 {
unsafe { buckets.get_unchecked_mut(this_index-1).add_assign_mixed(&base) };
}
if next_index != 0 {
unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index-1)) };
this_index = next_index;
}
// unsafe { buckets.get_unchecked_mut(this_index).add_assign_mixed(&base) };
// unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index)) };
// this_index = next_index;
// i != 0 and i % SYNCHRONIZATION_STEP == 0
if i !=0 && (i & (SYNCHRONIZATION_STEP - 1)) == 0 {
barriers_it.next().unwrap().wait();
}
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..start {
acc.double();
}
});
}
}
});
// let mut result = <G as CurveAffine>::Projective::zero();
// for s in subresults.into_iter() {
// result.add_assign(&s);
// }
Ok(<G as CurveAffine>::Projective::zero())
}
pub fn l3_shared_multexp_with_local_access<G: CurveAffine>(
pool: &Worker,
common_bases: & [G],
exponents_set: &[&[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]],
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
use std::sync::atomic::{AtomicUsize, Ordering};
static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
assert_eq!(exponents_set.len(), 2, "only support unroll by 2 for now");
for exponents in exponents_set.iter() {
if exponents.len() != common_bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
}
const NUM_WINDOWS: usize = 22;
const WINDOW_SIZE: u32 = 12;
const MASK: u64 = (1u64 << WINDOW_SIZE) - 1;
const NUM_BUCKETS: usize = 1 << WINDOW_SIZE;
assert!((WINDOW_SIZE as usize) * NUM_WINDOWS >= 254);
fn get_bits<Repr: PrimeFieldRepr>(el: Repr, start: usize) -> u64 {
const WINDOW_SIZE: u32 = 12;
const MASK: u64 = (1u64 << WINDOW_SIZE) - 1;
let end = (start + (WINDOW_SIZE as usize)) % 256;
let word_begin = start / 64;
let word_end = end / 64;
let result: u64;
if word_begin == word_end {
let shift = start % 64;
result = (el.as_ref()[word_begin] >> shift) & MASK;
} else {
let shift_low = start % 64;
let shift_high = 64 - shift_low;
result = ((el.as_ref()[word_begin] >> shift_low) | (el.as_ref()[word_end] << shift_high)) & MASK;
}
result
}
// let mut subresults = vec![<G as CurveAffine>::Projective::zero(); num_threads];
let limit = common_bases.len() - 2;
pool.scope(0, |scope, _| {
for (exponents_idx, exponents) in exponents_set.iter().enumerate() {
// first one is unrolled manually
let mut start = 0;
if exponents_idx == 0 {
scope.spawn(move |_| {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let tmp = exponents[0];
let mut this_index = get_bits(tmp, start) as usize;
for i in 0..limit {
// unsafe { crate::prefetch::prefetch_l3(exponents.get_unchecked(i+2)) };
// unsafe { crate::prefetch::prefetch_l3(common_bases.get_unchecked(i+1)) };
unsafe { crate::prefetch::prefetch_l1(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(common_bases.get_unchecked(i+1)) };
let tmp = unsafe { *exponents.get_unchecked(i+1) };
let base = unsafe { *common_bases.get_unchecked(i) };
let next_index = get_bits(tmp, start) as usize;
// if this_index != 0 {
// unsafe { buckets.get_unchecked_mut(this_index-1).add_assign_mixed(&base) };
// }
// if next_index != 0 {
// unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index-1)) };
// this_index = next_index;
// }
unsafe { buckets.get_unchecked_mut(this_index).add_assign_mixed(&base) };
unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index)) };
this_index = next_index;
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..start {
acc.double();
}
});
} else {
// we do not to prefetch bases, only exponents
scope.spawn(move |_| {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let tmp = exponents[0];
let mut this_index = get_bits(tmp, start) as usize;
for i in 0..limit {
// unsafe { crate::prefetch::prefetch_l3(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(common_bases.get_unchecked(i+1)) };
let tmp = unsafe { *exponents.get_unchecked(i+1) };
let base = unsafe { *common_bases.get_unchecked(i) };
let next_index = get_bits(tmp, start) as usize;
unsafe { buckets.get_unchecked_mut(this_index).add_assign_mixed(&base) };
unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index)) };
this_index = next_index;
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..start {
acc.double();
}
});
}
for _ in 0..128 {
let _ = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
}
// std::thread::sleep(std::time::Duration::from_nanos(100));
for _ in 1..NUM_WINDOWS {
// no L3 prefetches here
start += WINDOW_SIZE as usize;
scope.spawn(move |_| {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); NUM_BUCKETS];
let tmp = exponents[0];
let mut this_index = get_bits(tmp, start) as usize;
for i in 0..limit {
unsafe { crate::prefetch::prefetch_l1(exponents.get_unchecked(i+2)) };
unsafe { crate::prefetch::prefetch_l1(common_bases.get_unchecked(i+1)) };
let tmp = unsafe { *exponents.get_unchecked(i+1) };
let base = unsafe { *common_bases.get_unchecked(i) };
let next_index = get_bits(tmp, start) as usize;
unsafe { buckets.get_unchecked_mut(this_index).add_assign_mixed(&base) };
unsafe { crate::prefetch::prefetch_l1(buckets.get_unchecked(next_index)) };
this_index = next_index;
}
// buckets are filled with the corresponding accumulated value, now sum
let mut acc = G::Projective::zero();
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..start {
acc.double();
}
});
}
}
});
// let mut result = <G as CurveAffine>::Projective::zero();
// for s in subresults.into_iter() {
// result.add_assign(&s);
// }
Ok(<G as CurveAffine>::Projective::zero())
}
#[allow(dead_code)]
pub fn dense_unrolled_multiexp_with_prefetch<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
// do some heuristics here
// we proceed chunks of all points, and all workers do the same work over
// some scalar width, so to have expected number of additions into buckets to 1
// we have to take log2 from the expected chunk(!) length
let c = if exponents.len() < 32 {
3u32
} else {
let chunk_len = pool.get_chunk_size(exponents.len());
(f64::from(chunk_len as u32)).ln().ceil() as u32
// (f64::from(exponents.len() as u32)).ln().ceil() as u32
};
dense_multiexp_inner_unrolled_with_prefetch(pool, bases, exponents, 0, c, true)
}
#[allow(dead_code)]
fn dense_multiexp_inner_unrolled_with_prefetch<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
mut skip: u32,
c: u32,
handle_trivial: bool
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
const UNROLL_BY: usize = 8;
use std::sync::{Mutex};
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
// then over another region, etc. No Arc required
let this = {
let mask = (1u64 << c) - 1u64;
let this_region = Mutex::new(<G as CurveAffine>::Projective::zero());
let arc = Arc::new(this_region);
pool.scope(bases.len(), |scope, chunk| {
for (bases, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let this_region_rwlock = arc.clone();
// let handle =
scope.spawn(move |_| {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
// Accumulate the result
let mut acc = G::Projective::zero();
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let unrolled_steps = bases.len() / UNROLL_BY;
let remainder = bases.len() % UNROLL_BY;
let mut offset = 0;
for _ in 0..unrolled_steps {
// [0..7]
for i in 0..UNROLL_BY {
crate::prefetch::prefetch_l3_pointer(&bases[offset+i] as *const _);
crate::prefetch::prefetch_l3_pointer(&exp[offset+i] as *const _);
}
// offset + [0..6]
for i in 0..(UNROLL_BY-1) {
let this_exp = exp[offset+i];
let mut next_exp = exp[offset+i+1];
let base = &bases[offset+i];
if this_exp != zero {
if this_exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
}
} else {
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
if this_exp != 0 {
buckets[(this_exp - 1) as usize].add_assign_mixed(base);
}
}
}
{
next_exp.shr(skip);
let next_exp = next_exp.as_ref()[0] & mask;
if next_exp != 0 {
crate::prefetch::prefetch_l3_pointer(&buckets[(next_exp - 1) as usize] as *const _);
}
}
}
// offset + 7
let this_exp = exp[offset+(UNROLL_BY-1)];
let base = &bases[offset+(UNROLL_BY-1)];
if this_exp != zero {
if this_exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
}
} else {
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
if this_exp != 0 {
buckets[(this_exp - 1) as usize].add_assign_mixed(base);
}
}
}
// go into next region
offset += UNROLL_BY;
}
for _ in 0..remainder {
let this_exp = exp[offset];
let base = &bases[offset];
if this_exp != zero {
if this_exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
}
} else {
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
if this_exp != 0 {
buckets[(this_exp - 1) as usize].add_assign_mixed(base);
}
}
}
offset += 1;
}
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
let mut guard = match this_region_rwlock.lock() {
Ok(guard) => guard,
Err(_) => {
panic!("poisoned!");
// poisoned.into_inner()
}
};
(*guard).add_assign(&acc);
});
}
});
let this_region = Arc::try_unwrap(arc).unwrap();
let this_region = this_region.into_inner().unwrap();
this_region
};
skip += c;
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
// There isn't another region, and this will be the highest region
return Ok(this);
} else {
// next region is actually higher than this one, so double it enough times
let mut next_region = dense_multiexp_inner_unrolled_with_prefetch(
pool, bases, exponents, skip, c, false).unwrap();
for _ in 0..c {
next_region.double();
}
next_region.add_assign(&this);
return Ok(next_region);
}
}
#[allow(dead_code)]
pub fn dense_multiexp_with_manual_unrolling<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
// do some heuristics here
// we proceed chunks of all points, and all workers do the same work over
// some scalar width, so to have expected number of additions into buckets to 1
// we have to take log2 from the expected chunk(!) length
let c = if exponents.len() < 32 {
3u32
} else {
let chunk_len = pool.get_chunk_size(exponents.len());
(f64::from(chunk_len as u32)).ln().ceil() as u32
// (f64::from(exponents.len() as u32)).ln().ceil() as u32
};
dense_multiexp_with_manual_unrolling_impl(pool, bases, exponents, 0, c, true)
// dense_multiexp_with_manual_unrolling_impl_2(pool, bases, exponents, 0, c, true)
}
#[allow(dead_code)]
fn dense_multiexp_with_manual_unrolling_impl<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
mut skip: u32,
c: u32,
handle_trivial: bool
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
const UNROLL_BY: usize = 1024;
use std::sync::{Mutex};
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
// then over another region, etc. No Arc required
let this = {
let mask = (1u64 << c) - 1u64;
let this_region = Mutex::new(<G as CurveAffine>::Projective::zero());
let arc = Arc::new(this_region);
pool.scope(bases.len(), |scope, chunk| {
for (bases, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let this_region_rwlock = arc.clone();
// let handle =
scope.spawn(move |_| {
// make buckets for ALL exponents including 0 and 1
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); 1 << c];
// let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let mut this_chunk_exponents = [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr::default(); UNROLL_BY];
let mut next_chunk_exponents = [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr::default(); UNROLL_BY];
let mut this_chunk_bases = [G::zero(); UNROLL_BY];
let mut next_chunk_bases = [G::zero(); UNROLL_BY];
let unrolled_steps = bases.len() / UNROLL_BY;
assert!(unrolled_steps >= 2);
let remainder = bases.len() % UNROLL_BY;
assert_eq!(remainder, 0);
// first step is manually unrolled
// manually copy to the stack
let mut start_idx = 0;
let mut end_idx = UNROLL_BY;
this_chunk_exponents.copy_from_slice(&exp[start_idx..end_idx]);
this_chunk_bases.copy_from_slice(&bases[start_idx..end_idx]);
start_idx += UNROLL_BY;
end_idx += UNROLL_BY;
next_chunk_exponents.copy_from_slice(&exp[start_idx..end_idx]);
next_chunk_bases.copy_from_slice(&bases[start_idx..end_idx]);
let mut intra_chunk_idx = 0;
let mut previous_exponent_index = 0;
let mut previous_base = G::zero();
let mut this_exponent_index = 0;
let mut this_base = G::zero();
let this_exp = this_chunk_exponents[intra_chunk_idx];
if this_exp == one {
if handle_trivial {
this_exponent_index = 1;
}
} else {
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
this_exponent_index = this_exp as usize;
}
this_base = this_chunk_bases[intra_chunk_idx];
previous_base = this_base;
previous_exponent_index = this_exponent_index;
crate::prefetch::prefetch_l2_pointer(&buckets[previous_exponent_index] as *const _);
intra_chunk_idx += 1;
// now we can roll
for _ in 1..(unrolled_steps-1) {
while intra_chunk_idx < UNROLL_BY {
// add what was processed in a previous step
(&mut buckets[previous_exponent_index]).add_assign_mixed(&previous_base);
let this_exp = this_chunk_exponents[intra_chunk_idx];
if this_exp == one {
if handle_trivial {
this_exponent_index = 1;
}
} else {
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
this_exponent_index = this_exp as usize;
}
this_base = this_chunk_bases[intra_chunk_idx];
previous_base = this_base;
previous_exponent_index = this_exponent_index;
crate::prefetch::prefetch_l2_pointer(&buckets[previous_exponent_index] as *const _);
intra_chunk_idx += 1;
}
// swap and read next chunk
this_chunk_bases = next_chunk_bases;
this_chunk_exponents = next_chunk_exponents;
start_idx += UNROLL_BY;
end_idx += UNROLL_BY;
next_chunk_exponents.copy_from_slice(&exp[start_idx..end_idx]);
next_chunk_bases.copy_from_slice(&bases[start_idx..end_idx]);
intra_chunk_idx = 0;
}
// process the last one
{
while intra_chunk_idx < UNROLL_BY {
// add what was processed in a previous step
(&mut buckets[previous_exponent_index]).add_assign_mixed(&previous_base);
let this_exp = this_chunk_exponents[intra_chunk_idx];
if this_exp == one {
if handle_trivial {
this_exponent_index = 1;
}
} else {
let mut this_exp = this_exp;
this_exp.shr(skip);
let this_exp = this_exp.as_ref()[0] & mask;
this_exponent_index = this_exp as usize;
}
this_base = this_chunk_bases[intra_chunk_idx];
previous_base = this_base;
previous_exponent_index = this_exponent_index;
crate::prefetch::prefetch_l2_pointer(&buckets[previous_exponent_index] as *const _);
intra_chunk_idx += 1;
}
// very last addition
(&mut buckets[previous_exponent_index]).add_assign_mixed(&previous_base);
}
let _: Vec<_> = buckets.drain(..1).collect();
let acc: Vec<_> = buckets.drain(..1).collect();
let mut acc = acc[0];
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
let mut guard = match this_region_rwlock.lock() {
Ok(guard) => guard,
Err(_) => {
panic!("poisoned!");
// poisoned.into_inner()
}
};
(*guard).add_assign(&acc);
});
}
});
let this_region = Arc::try_unwrap(arc).unwrap();
let this_region = this_region.into_inner().unwrap();
this_region
};
skip += c;
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
// There isn't another region, and this will be the highest region
return Ok(this);
} else {
// next region is actually higher than this one, so double it enough times
let mut next_region = dense_multiexp_with_manual_unrolling_impl(
pool, bases, exponents, skip, c, false).unwrap();
for _ in 0..c {
next_region.double();
}
next_region.add_assign(&this);
return Ok(next_region);
}
}
#[allow(dead_code)]
fn dense_multiexp_with_manual_unrolling_impl_2<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
mut skip: u32,
c: u32,
_handle_trivial: bool
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
// we assume that a single memory fetch is around 10-12 ns, so before any operation
// we ideally should prefetch a memory unit for a next operation
const CACHE_BY: usize = 1024;
use std::sync::{Mutex};
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
// then over another region, etc. No Arc required
let this = {
let mask = (1u64 << c) - 1u64;
let this_region = Mutex::new(<G as CurveAffine>::Projective::zero());
let arc = Arc::new(this_region);
pool.scope(bases.len(), |scope, chunk| {
for (bases, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let this_region_rwlock = arc.clone();
// let handle =
scope.spawn(move |_| {
// make buckets for ALL exponents including 0 and 1
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); 1 << c];
// let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
// let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let mut exponents_chunk = [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr::default(); CACHE_BY];
let mut bases_chunk = [G::zero(); CACHE_BY];
let unrolled_steps = bases.len() / CACHE_BY;
assert!(unrolled_steps >= 2);
let remainder = bases.len() % CACHE_BY;
assert_eq!(remainder, 0);
use std::ptr::NonNull;
let mut basket_pointers_to_process = [(NonNull::< <G as CurveAffine>::Projective>::dangling(), G::zero()); CACHE_BY];
let basket_pointer = buckets.as_mut_ptr();
let mut start_idx = 0;
let mut end_idx = CACHE_BY;
for _ in 0..(unrolled_steps-1) {
exponents_chunk.copy_from_slice(&exp[start_idx..end_idx]);
bases_chunk.copy_from_slice(&bases[start_idx..end_idx]);
let mut bucket_idx = 0;
for (e, b) in exponents_chunk.iter().zip(bases_chunk.iter()) {
let mut this_exp = *e;
this_exp.shr(skip);
let this_exp = (this_exp.as_ref()[0] & mask) as usize;
if this_exp != 0 {
let ptr = unsafe { NonNull::new_unchecked(basket_pointer.add(this_exp)) };
basket_pointers_to_process[bucket_idx] = (ptr, *b);
bucket_idx += 1;
}
}
for i in 0..bucket_idx {
crate::prefetch::prefetch_l1_pointer(basket_pointers_to_process[i].0.as_ptr() as *const _);
}
crate::prefetch::prefetch_l2_pointer(&bases[end_idx] as *const _);
crate::prefetch::prefetch_l2_pointer(&bases[end_idx+1] as *const _);
for i in 0..bucket_idx {
let (mut ptr, to_add) = basket_pointers_to_process[i];
let point_ref: &mut _ = unsafe { ptr.as_mut()};
point_ref.add_assign_mixed(&to_add);
}
start_idx += CACHE_BY;
end_idx += CACHE_BY;
}
drop(basket_pointer);
let _: Vec<_> = buckets.drain(..1).collect();
let acc: Vec<_> = buckets.drain(..1).collect();
let mut acc = acc[0];
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
let mut guard = match this_region_rwlock.lock() {
Ok(guard) => guard,
Err(_) => {
panic!("poisoned!");
// poisoned.into_inner()
}
};
(*guard).add_assign(&acc);
});
}
});
let this_region = Arc::try_unwrap(arc).unwrap();
let this_region = this_region.into_inner().unwrap();
this_region
};
skip += c;
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
// There isn't another region, and this will be the highest region
return Ok(this);
} else {
// next region is actually higher than this one, so double it enough times
let mut next_region = dense_multiexp_with_manual_unrolling_impl_2(
pool, bases, exponents, skip, c, false).unwrap();
for _ in 0..c {
next_region.double();
}
next_region.add_assign(&this);
return Ok(next_region);
}
}
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
#[allow(dead_code)]
pub fn dense_multiexp_consume<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
dense_multiexp_inner_consume(pool, bases, exponents, c)
}
fn dense_multiexp_inner_consume<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>,
c: u32,
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
// spawn exactly required number of threads at the time, not more
// each thread mutates part of the exponents and walks over the same range of bases
use std::sync::mpsc::{channel};
let (tx, rx) = channel();
pool.scope(bases.len(), |scope, chunk| {
for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let tx = tx.clone();
scope.spawn(move |_| {
let mut skip = 0;
let mut result = G::Projective::zero();
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); 1 << c];
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
// let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
let padding = Some(<G::Engine as ScalarEngine>::Fr::zero().into_repr());
let mask: u64 = (1 << c) - 1;
loop {
let mut next_bucket_index = (exp[0].as_ref()[0] & mask) as usize;
let exp_next_constant_iter = exp.iter().skip(1);
// let this_exp_to_use = exp.iter();
let mut acc = G::Projective::zero();
// for ((base, &this_exp_to_use), &next_exp_to_prefetch) in base.iter()
// .zip(this_exp_to_use)
// .zip(exp_next_constant_iter.chain(padding.iter()))
// {
for (base, &next_exp_to_prefetch) in base.iter()
.zip(exp_next_constant_iter.chain(padding.iter()))
{
let this_bucket_index = next_bucket_index;
{
// if next_exp_to_prefetch != zero && next_exp_to_prefetch != one {
if next_exp_to_prefetch != zero {
let mut e = next_exp_to_prefetch;
e.shr(skip);
next_bucket_index = (next_exp_to_prefetch.as_ref()[0] & mask) as usize;
if next_bucket_index > 0 {
// crate::prefetch::prefetch_l3(&buckets[next_bucket_index]);
crate::prefetch::prefetch_l3_pointer(&buckets[next_bucket_index] as *const _);
}
} else {
next_bucket_index = 0;
}
}
if this_bucket_index > 0 {
buckets[this_bucket_index].add_assign_mixed(base);
}
// // now add base to the bucket that we've
// if this_bucket_index > 1 {
// buckets[this_bucket_index].add_assign_mixed(base);
// } else {
// acc.add_assign_mixed(base);
// }
}
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
// now start from the last one and add
for exp in buckets.iter().skip(1).rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
for _ in 0..skip {
acc.double();
}
result.add_assign(&acc);
skip += c;
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
// next chunk is the last one
tx.send(result).unwrap();
break;
} else {
buckets.truncate(0);
buckets.resize(1 << c, <G as CurveAffine>::Projective::zero());
}
}
});
}
});
// do something with rx
let mut result = <G as CurveAffine>::Projective::zero();
for value in rx.try_iter() {
result.add_assign(&value);
}
Ok(result)
}
#[cfg(test)]
mod test {
use super::*;
fn naive_multiexp<G: CurveAffine>(
bases: Arc<Vec<G>>,
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>
) -> G::Projective
{
assert_eq!(bases.len(), exponents.len());
let mut acc = G::Projective::zero();
for (base, exp) in bases.iter().zip(exponents.iter()) {
acc.add_assign(&base.mul(*exp));
}
acc
}
#[test]
fn test_new_multiexp_with_bls12() {
use rand::{self, Rand};
use crate::pairing::bls12_381::Bls12;
use self::futures::executor::block_on;
const SAMPLES: usize = 1 << 14;
let rng = &mut rand::thread_rng();
let v = Arc::new((0..SAMPLES).map(|_| <Bls12 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>());
let g = Arc::new((0..SAMPLES).map(|_| <Bls12 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>());
let naive = naive_multiexp(g.clone(), v.clone());
let pool = Worker::new();
let fast = block_on(
multiexp(
&pool,
(g, 0),
FullDensity,
v
)
).unwrap();
assert_eq!(naive, fast);
}
#[test]
fn test_valid_bn254_multiexp() {
use rand::{self, Rand};
use crate::pairing::bn256::Bn256;
const SAMPLES: usize = 1 << 14;
let pool = Worker::new();
let rng = &mut rand::thread_rng();
let v = (0..SAMPLES).map(|_| <Bn256 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>();
let g = (0..SAMPLES).map(|_| <Bn256 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>();
let dense = dense_multiexp(
&pool,
&g,
&v,
).unwrap();
let v = Arc::new(v);
let g = Arc::new(g);
let naive = naive_multiexp(g.clone(), v.clone());
assert_eq!(dense, naive);
use self::futures::executor::block_on;
let fast_dense = future_based_multiexp(
&pool,
g.clone(),
v.clone()
).wait().unwrap();
assert_eq!(naive, fast_dense);
let fast = block_on(
multiexp(
&pool,
(g, 0),
FullDensity,
v
)
).unwrap();
assert_eq!(naive, fast);
}
#[test]
#[ignore]
fn test_new_multexp_speed_with_bn256() {
use rand::{self, Rand};
use crate::pairing::bn256::Bn256;
use num_cpus;
let cpus = num_cpus::get();
const SAMPLES: usize = 1 << 22;
let rng = &mut rand::thread_rng();
let v = Arc::new((0..SAMPLES).map(|_| <Bn256 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>());
let g = Arc::new((0..SAMPLES).map(|_| <Bn256 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>());
let pool = Worker::new();
use self::futures::executor::block_on;
let start = std::time::Instant::now();
let _fast = block_on(
multiexp(
&pool,
(g, 0),
FullDensity,
v
)
).unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("Elapsed {} ns for {} samples", duration_ns, SAMPLES);
let time_per_sample = duration_ns/(SAMPLES as f64);
println!("Tested on {} samples on {} CPUs with {} ns per multiplication", SAMPLES, cpus, time_per_sample);
}
#[test]
fn test_dense_multiexp_vs_new_multiexp() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::pairing::bn256::Bn256;
use num_cpus;
// const SAMPLES: usize = 1 << 22;
const SAMPLES: usize = 1 << 16;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v = (0..SAMPLES).map(|_| <Bn256 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>();
let g = (0..SAMPLES).map(|_| <Bn256 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>();
println!("Done generating test points and scalars");
let pool = Worker::new();
let start = std::time::Instant::now();
let dense = dense_multiexp(
&pool, &g, &v.clone()).unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ns for dense for {} samples", duration_ns, SAMPLES);
let start = std::time::Instant::now();
let _map_reduce = map_reduce_multiexp_over_fixed_window(
&pool,
&g,
&v,
11
).unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ns for map reduce for {} samples", duration_ns, SAMPLES);
// assert_eq!(dense, map_reduce);
let start = std::time::Instant::now();
let buffered = buffered_multiexp_over_fixed_window_and_buffer_size(
&pool,
&g,
&v,
11,
64,
).unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ns for buffered multiexp for {} samples", duration_ns, SAMPLES);
assert_eq!(dense, buffered);
use self::futures::executor::block_on;
let start = std::time::Instant::now();
let sparse = block_on(
multiexp(
&pool,
(Arc::new(g), 0),
FullDensity,
Arc::new(v)
)
).unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ns for sparse for {} samples", duration_ns, SAMPLES);
assert_eq!(dense, sparse);
}
#[test]
fn test_bench_sparse_multiexp() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use num_cpus;
type Eng = crate::pairing::bls12_381::Bls12;
const SAMPLES: usize = 1 << 22;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v = (0..SAMPLES).map(|_| <Eng as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>();
let g = (0..SAMPLES).map(|_| <Eng as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>();
println!("Done generating test points and scalars");
let pool = Worker::new();
let start = std::time::Instant::now();
let _sparse = multiexp(
&pool,
(Arc::new(g), 0),
FullDensity,
Arc::new(v)
).wait().unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ms for sparse for {} samples on {:?}", duration_ns/1000.0f64, SAMPLES, Eng{});
}
#[test]
fn test_bench_dense_consuming_multiexp() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
// type Eng = crate::pairing::bn256::Bn256;
type Eng = crate::pairing::bls12_381::Bls12;
use num_cpus;
const SAMPLES: usize = 1 << 22;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v = (0..SAMPLES).map(|_| <Eng as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>();
let g = (0..SAMPLES).map(|_| <Eng as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>();
let pool = Worker::new();
let start = std::time::Instant::now();
let _dense = dense_multiexp_consume(
&pool,
&g,
v
).unwrap();
println!("{:?} for dense for {} samples", start.elapsed(), SAMPLES);
}
fn calculate_parameters(size: usize, threads: usize, bits: u32) {
let mut chunk_len = size / threads;
if size / threads != 0 {
chunk_len += 1;
}
let raw_size = (f64::from(chunk_len as u32)).ln();
let new_window_size = if raw_size.floor() + 0.5 < raw_size {
raw_size.ceil() as u32
} else {
raw_size.floor() as u32
};
let window_size = (f64::from(chunk_len as u32)).ln().ceil() as u32;
let mut num_windows = bits / window_size;
let leftover = bits % window_size;
if leftover != 0 {
num_windows += 1;
}
let uncompensated_window = (f64::from(size as u32)).ln().ceil() as u32;
let mut num_uncompensated_windows = bits / uncompensated_window;
let uncompensated_leftover = bits % uncompensated_window;
if uncompensated_leftover != 0 {
num_uncompensated_windows += 1;
}
println!("For size {} and {} cores: chunk len {}, {} windows, average window {} bits, leftover {} bits. Alternative window size = {}", size, threads, chunk_len, num_windows, window_size, leftover, new_window_size);
// println!("Raw window size = {}", raw_size);
// println!("Uncompensated: {} windows, arevage window {} bits, leftover {} bits", num_uncompensated_windows, uncompensated_window, uncompensated_leftover);
// (f64::from(exponents.len() as u32)).ln().ceil() as u32
}
#[test]
fn test_sizes_for_bn254() {
let sizes = vec![1<<23, 1<<24];
let cores = vec![8, 12, 16, 24, 32, 48];
for size in sizes {
for &core in &cores {
calculate_parameters(size, core, 254);
}
}
}
}<file_sep>/src/plonk/commitments/transparent/utils.rs
pub fn log2_floor(num: usize) -> u32 {
assert!(num > 0);
let mut pow = 0;
while (1 << (pow+1)) <= num {
pow += 1;
}
pow
}<file_sep>/src/plonk/adaptor/mod.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::SynthesisError;
use crate::plonk::cs::gates::Gate;
use crate::plonk::cs::gates::Coeff;
use crate::plonk::cs::gates::Variable as PlonkVariable;
use crate::plonk::cs::gates::Index as PlonkIndex;
use crate::plonk::cs::Circuit as PlonkCircuit;
use crate::plonk::cs::ConstraintSystem as PlonkConstraintSystem;
use std::marker::PhantomData;
pub mod alternative;
// pub struct Adaptor<'a, E: Engine, CS: PlonkConstraintSystem<E> + 'a> {
// cs: &'a mut CS,
// _marker: PhantomData<E>,
// }
// impl<'a, E: Engine, CS: PlonkConstraintSystem<E> + 'a> crate::ConstraintSystem<E>
// for Adaptor<'a, E, CS>
// {
// type Root = Self;
// fn one() -> crate::Variable {
// crate::Variable::new_unchecked(crate::Index::Input(0))
// }
// fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<crate::Variable, crate::SynthesisError>
// where
// F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
// A: FnOnce() -> AR,
// AR: Into<String>,
// {
// let var = self.cs.alloc(|| {
// f().map_err(|_| crate::SynthesisError::AssignmentMissing)
// })?;
// Ok(match var {
// PlonkVariable(PlonkIndex::Input(index)) => crate::Variable::new_unchecked(crate::Index::Input(index)),
// _ => unreachable!(),
// })
// }
// fn alloc_input<F, A, AR>(
// &mut self,
// _: A,
// f: F,
// ) -> Result<crate::Variable, crate::SynthesisError>
// where
// F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
// A: FnOnce() -> AR,
// AR: Into<String>,
// {
// let var = self.cs.alloc_input(|| {
// f().map_err(|_| crate::SynthesisError::AssignmentMissing)
// })?;
// Ok(match var {
// PlonkVariable(PlonkIndex::Aux(index)) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
// _ => unreachable!(),
// })
// }
// fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
// where
// A: FnOnce() -> AR,
// AR: Into<String>,
// LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
// LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
// LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
// {
// /// Represents either a "true" variable or a constant
// /// auxillary variable.
// #[derive(Copy, Clone, PartialEq, Debug, Hash, Eq)]
// enum Var
// {
// InputVar(PlonkVariable),
// ConstVar
// }
// fn convert<E: Engine>(lc: crate::LinearCombination<E>) -> Vec<(E::Fr, Var)> {
// let mut ret = Vec::with_capacity(lc.as_ref().len());
// for &(v, coeff) in lc.as_ref().iter() {
// let var = match v.get_unchecked() {
// crate::Index::Input(0) => Var::ConstVar,
// crate::Index::Input(i) => Var::InputVar(PlonkVariable(PlonkIndex::Input(i))),
// crate::Index::Aux(i) => Var::InputVar(PlonkVariable(PlonkIndex::Aux(i))),
// };
// ret.push((coeff, var));
// }
// ret
// }
// let a_terms = convert(a(crate::LinearCombination::zero()));
// let b_terms = convert(b(crate::LinearCombination::zero()));
// let c_terms = convert(c(crate::LinearCombination::zero()));
// ///first check if we are dealing with a boolean constraint
// ///we use the following euristics:
// ///analyse comment string
// ///calculate the number of arguments in each linear combination - in boolean constraint length of each lc is at most 2
// /// this function returns true in the case of boolean constraint
// fn handle_boolean_constraint<A, AR, E: Engine>(
// la: &Vec<(E::Fr, Var)>,
// lb: &Vec<(E::Fr, Var)>,
// lc: &Vec<(E::Fr, Var)>,
// ) -> bool
// where
// A: FnOnce() -> AR,
// AR: Into<String>
// {
// return true;
// }
// fn eval_lc_short<E: Engine, CS: PlonkConstraintSystem<E>>(
// term1: (E::Fr, PlonkVariable),
// term2: (E::Fr, PlonkVariable),
// cs: &CS,
// ) -> Option<E::Fr>
// {
// let mut extra_value = E::Fr::zero();
// let mut var_value = match cs.get_value(term1.1) {
// Ok(tmp) => tmp,
// Err(_) => return None,
// };
// var_value.mul_assign(&term1.0);
// extra_value.add_assign(&var_value);
// var_value = match cs.get_value(term2.1) {
// Ok(tmp) => tmp,
// Err(_) => return None,
// };
// var_value.mul_assign(&term2.0);
// extra_value.add_assign(&var_value);
// Some(extra_value)
// }
// fn allocate_new_lc_var<E: Engine, CS: PlonkConstraintSystem<E>>(
// term1: (E::Fr, PlonkVariable),
// term2: (E::Fr, PlonkVariable),
// cs: &mut CS,
// ) -> PlonkVariable
// {
// let extra_value = eval_lc_short(term1, term2, &*cs);
// let extra_variable = cs.alloc(||
// {
// if let Some(value) = extra_value {
// Ok(value)
// } else {
// Err(SynthesisError::AssignmentMissing)
// }
// }
// ).expect("must allocate");
// cs.enforce_mul_3((term1.1, term2.1, extra_variable)).expect("must allocate");
// extra_variable
// }
// fn allocate_lc_intermediate_variables<E: Engine, CS: PlonkConstraintSystem<E>>(
// terms: Vec<(E::Fr, Var)>,
// cs: &mut CS,
// ) -> (PlonkVariable, Option<E::Fr>) {
// debug_assert!(terms.len() > 2);
// let mut const_var_found = false;
// let mut const_coeff = E::Fr::zero();
// let mut current_var : Option<(E::Fr, PlonkVariable)> = None;
// for &(coeff, var) in terms.iter() {
// match var {
// Var::ConstVar => {
// if const_var_found {
// unreachable!();
// }
// const_var_found = true;
// const_coeff = coeff;
// }
// Var::InputVar(pv) => {
// current_var = match current_var {
// None => Some((coeff, pv)),
// Some((old_coeff, old_pv)) => {
// let new_val = allocate_new_lc_var((old_coeff, old_pv), (coeff, pv), cs);
// Some((E::Fr::one(), new_val))
// }
// }
// }
// }
// }
// let var = match current_var {
// Some((_, pv)) => pv,
// None => unreachable!(),
// };
// let coef = match const_var_found{
// false => None,
// true => Some(const_coeff)
// } ;
// return (var, coef)
// }
// /// after parsing we should return on of three possible results:
// /// variable, constant or sum variable + constant
// fn parse_lc<E: Engine, CS: PlonkConstraintSystem<E>>(
// terms: Vec<(E::Fr, Var)>,
// cs: &mut CS,
// ) -> (Option<(E::Fr, PlonkVariable)>, Option<E::Fr>) {
// // there are few options
// match terms.len() {
// 0 => {
// //Every linear combination in real cs should contain at least one term!
// unreachable!();
// },
// 1 => {
// let (c_0, v_0) = terms[0];
// let result = match v_0 {
// Var::InputVar(pv) => (Some((c_0, pv)), None),
// Var::ConstVar => (None, Some(c_0)),
// };
// // forward the result
// return result;
// },
// 2 => {
// let (c_0, v_0) = terms[0];
// let (c_1, v_1) = terms[1];
// //check of one of v_0, v_1 is constant and the other is variable or vice versa
// //the case of two constants is impossible in real cs!
// let result = match (v_0, v_1) {
// (Var::InputVar(pv), Var::ConstVar) => (Some((c_0, pv)), Some(c_1)),
// (Var::ConstVar, Var::InputVar(pv)) => (Some((c_1, pv)), Some(c_0)),
// (Var::InputVar(pv0), Var::InputVar(pv1)) => {
// let extra_variable = allocate_new_lc_var((c_0, pv0), (c_1, pv1), cs);
// (Some((E::Fr::one(), extra_variable)), None)
// }
// (Var::ConstVar, Var::ConstVar) => unreachable!(),
// };
// return result;
// }
// _ => {
// // here we need to allocate intermediate variables and output the last one
// let last_vars = allocate_lc_intermediate_variables(terms, cs);
// return (Some((E::Fr::one(), last_vars.0)), last_vars.1);
// }
// }
// }
// let a_var = parse_lc(a_terms, self.cs);
// let b_var = parse_lc(b_terms, self.cs);
// let c_var = parse_lc(c_terms, self.cs);
// /// parse result and return expr of the form: coeff * var + constant
// fn unfold_var<E: Engine, CS: PlonkConstraintSystem<E>>(
// var: (Option<(E::Fr, PlonkVariable)>, Option<(E::Fr)>),
// stub: PlonkVariable,
// cs: &mut CS,
// ) -> (E::Fr, PlonkVariable, E::Fr)
// {
// let result = match var {
// (Some((coeff, var)), Some(constant)) => (coeff, var, constant),
// (Some((coeff, var)), None) => (coeff, var, E::Fr::zero()),
// (None, Some(constant)) => (E::Fr::zero(), stub, constant),
// _ => unreachable!(),
// };
// return result;
// }
// // our final equation is of the following form
// // (x a_var + c_1) (y b_var + c_2) = (z c_var + c_3)
// // we can convert it to standard PLONK form:
// // (xy) a_var + b_var + (x c_2) a_var + (y c_1) b_var - z c_var + (c_1 c_2 - c_3) */
// let (mut x, a_var, mut c_1) : (E::Fr, PlonkVariable, E::Fr) = unfold_var(a_var, CS::ZERO, self.cs);
// let (mut y, b_var, c_2) : (E::Fr, PlonkVariable, E::Fr) = unfold_var(b_var, CS::ZERO, self.cs);
// let (mut z, c_var, mut c_3) : (E::Fr, PlonkVariable, E::Fr) = unfold_var(c_var, CS::ZERO, self.cs);
// let mut a_coef : E::Fr = x;
// a_coef.mul_assign(&y);
// x.mul_assign(&c_2);
// y.mul_assign(&c_1);
// z.negate();
// c_1.mul_assign(&c_2);
// c_3.negate();
// c_1.add_assign(&c_3);
// self.cs.new_gate((a_var, b_var, c_var), (a_coef, x, y, z, c_1));
// }
// fn push_namespace<NR, N>(&mut self, _: N)
// where
// NR: Into<String>,
// N: FnOnce() -> NR,
// {
// // Do nothing; we don't care about namespaces in this context.
// }
// fn pop_namespace(&mut self) {
// // Do nothing; we don't care about namespaces in this context.
// }
// fn get_root(&mut self) -> &mut Self::Root {
// self
// }
// }
// #[derive(Clone)]
// pub struct AdaptorCircuit<T>(pub T);
// impl<'a, E: Engine, C: crate::Circuit<E> + Clone> PlonkCircuit<E> for AdaptorCircuit<C> {
// fn synthesize<CS: PlonkConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// let mut adaptor = Adaptor {
// cs: cs,
// _marker: PhantomData,
// };
// match self.0.clone().synthesize(&mut adaptor) {
// Err(_) => return Err(SynthesisError::AssignmentMissing),
// Ok(_) => {}
// };
// Ok(())
// }
// }<file_sep>/src/plonk/better_cs/one_shot_test_assembly.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
use super::cs::*;
#[derive(Debug, Clone)]
pub struct OneShotTestAssembly<E: Engine, P: PlonkConstraintSystemParams<E>> {
m: usize,
n: usize,
num_inputs: usize,
num_aux: usize,
input_assingments: Vec<E::Fr>,
aux_assingments: Vec<E::Fr>,
input_gates: Vec<(P::StateVariables, P::ThisTraceStepCoefficients, P::NextTraceStepCoefficients)>,
aux_gates: Vec<(P::StateVariables, P::ThisTraceStepCoefficients, P::NextTraceStepCoefficients)>,
inputs_map: Vec<usize>,
is_finalized: bool,
next_step_leftover_from_previous_gate: Option<(E::Fr, P::NextTraceStepCoefficients)>,
_marker: std::marker::PhantomData<P>
}
impl<E: Engine, P: PlonkConstraintSystemParams<E>> ConstraintSystem<E, P> for OneShotTestAssembly<E, P> {
// allocate a variable
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_aux += 1;
let index = self.num_aux;
self.aux_assingments.push(value);
// println!("Allocated variable Aux({}) with value {}", index, value);
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
self.num_inputs += 1;
let index = self.num_inputs;
self.input_assingments.push(value);
let input_var = Variable(Index::Input(index));
let dummy = self.get_dummy_variable();
let vars = P::StateVariables::from_variable_and_padding(input_var, dummy);
let mut this_step_coeffs = P::ThisTraceStepCoefficients::identity();
this_step_coeffs.negate(); // we use -1 here to later add to the constants polynomial using + sign
let next_step_coeffs = P::NextTraceStepCoefficients::empty();
self.input_gates.push((vars, this_step_coeffs, next_step_coeffs));
self.n += 1;
Ok(input_var)
}
// allocate an abstract gate
fn new_gate(&mut self,
variables: P::StateVariables,
this_step_coeffs: P::ThisTraceStepCoefficients,
next_step_coeffs: P::NextTraceStepCoefficients
) -> Result<(), SynthesisError> {
self.aux_gates.push((variables, this_step_coeffs, next_step_coeffs));
self.n += 1;
Ok(())
}
fn get_value(&self, var: Variable) -> Result<E::Fr, SynthesisError> {
let value = match var {
Variable(Index::Aux(0)) => {
E::Fr::zero()
// return Err(SynthesisError::AssignmentMissing);
}
Variable(Index::Input(0)) => {
return Err(SynthesisError::AssignmentMissing);
}
Variable(Index::Input(input)) => {
self.input_assingments[input - 1]
},
Variable(Index::Aux(aux)) => {
self.aux_assingments[aux - 1]
}
};
Ok(value)
}
fn get_dummy_variable(&self) -> Variable {
self.dummy_variable()
}
}
impl<E: Engine, P: PlonkConstraintSystemParams<E>> OneShotTestAssembly<E, P> {
pub fn new() -> Self {
let tmp = Self {
n: 0,
m: 0,
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: vec![],
input_gates: vec![],
aux_gates: vec![],
inputs_map: vec![],
is_finalized: false,
next_step_leftover_from_previous_gate: None,
_marker: std::marker::PhantomData
};
tmp
}
pub fn new_with_size_hints(num_inputs: usize, num_aux: usize) -> Self {
let tmp = Self {
n: 0,
m: 0,
num_inputs: 0,
num_aux: 0,
input_assingments: Vec::with_capacity(num_inputs),
aux_assingments: Vec::with_capacity(num_aux),
input_gates:Vec::with_capacity(num_inputs),
aux_gates: Vec::with_capacity(num_aux),
inputs_map: Vec::with_capacity(num_inputs),
is_finalized: false,
next_step_leftover_from_previous_gate: None,
_marker: std::marker::PhantomData
};
tmp
}
// return variable that is not in a constraint formally, but has some value
fn dummy_variable(&self) -> Variable {
Variable(Index::Aux(0))
}
pub fn is_well_formed(&self) -> bool {
// check that last gate does not chain further!
self.next_step_leftover_from_previous_gate.is_none()
}
pub fn num_gates(&self) -> usize {
self.n
}
}
impl<E: Engine> OneShotTestAssembly<E, PlonkCsWidth4WithNextStepParams> {
pub fn is_satisfied(&self, in_a_middle: bool) -> bool {
// expect a small number of inputs
for (_i, (_vars, this_step_coeffs, next_step_coeffs)) in self.input_gates.iter().enumerate()
{
for c in this_step_coeffs.as_ref().iter().skip(1) {
assert!(c.is_zero(), "input gate must contatain only one coefficient");
}
for c in next_step_coeffs.as_ref().iter() {
assert!(c.is_zero(), "input gate must contatain no next step coefficients");
}
}
for (i, gate_pair) in self.aux_gates.windows(2).enumerate()
{
let this_gate = &gate_pair[0];
let next_gate = &gate_pair[1];
let mut this_gate_value = E::Fr::zero();
let mut coeffs_iter = this_gate.1.as_ref().iter();
// addition
for (&this_var, this_coeff) in this_gate.0.as_ref().iter()
.zip(&mut coeffs_iter)
{
let mut tmp = self.get_value(this_var).expect("must get a variable value");
tmp.mul_assign(&this_coeff);
this_gate_value.add_assign(&tmp);
}
// multiplication
let mut tmp = self.get_value(this_gate.0.as_ref()[0]).expect("must get a variable value");
tmp.mul_assign(&self.get_value(this_gate.0.as_ref()[1]).expect("must get a variable value"));
tmp.mul_assign(&(&mut coeffs_iter.next().unwrap()));
this_gate_value.add_assign(&tmp);
// constant
this_gate_value.add_assign(&(&mut coeffs_iter.next().unwrap()));
// next step part
for (&next_var, next_step_coeffs_coeff) in next_gate.0.as_ref().iter().rev()
.zip(this_gate.2.as_ref().iter())
{
let mut tmp = self.get_value(next_var).expect("must get a variable value");
tmp.mul_assign(&next_step_coeffs_coeff);
this_gate_value.add_assign(&tmp);
}
if !this_gate_value.is_zero() {
println!("Unsatisfied at aux gate {}", i+1);
println!("Gate {:?}", this_gate);
// println!("A = {}, B = {}, C = {}", a_value, b_value, c_value);
return false;
}
}
if !in_a_middle {
let i = self.aux_gates.len();
let last_gate = *self.aux_gates.last().unwrap();
let this_gate = last_gate;
let mut this_gate_value = E::Fr::zero();
let mut coeffs_iter = this_gate.1.as_ref().iter();
// addition
for (&this_var, this_coeff) in this_gate.0.as_ref().iter()
.zip(&mut coeffs_iter)
{
let mut tmp = self.get_value(this_var).expect("must get a variable value");
tmp.mul_assign(&this_coeff);
this_gate_value.add_assign(&tmp);
}
// multiplication
let mut tmp = self.get_value(this_gate.0.as_ref()[0]).expect("must get a variable value");
tmp.mul_assign(&self.get_value(this_gate.0.as_ref()[1]).expect("must get a variable value"));
tmp.mul_assign(&(&mut coeffs_iter.next().unwrap()));
this_gate_value.add_assign(&tmp);
// constant
this_gate_value.add_assign(&(&mut coeffs_iter.next().unwrap()));
// next step part must be empty
for c in this_gate.2.as_ref().iter() {
assert!(c.is_zero(), "last gate must not wrap around");
}
if !this_gate_value.is_zero() {
println!("Unsatisfied at last aux gate {}", i+1);
println!("Gate {:?}", this_gate);
// println!("A = {}, B = {}, C = {}", a_value, b_value, c_value);
return false;
}
}
true
}
}
<file_sep>/src/plonk/better_better_cs/lookup_tables.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{Engine};
use crate::bit_vec::BitVec;
use crate::{SynthesisError};
use std::marker::PhantomData;
use crate::worker::Worker;
use crate::plonk::domains::*;
use crate::plonk::polynomials::*;
pub use crate::plonk::cs::variable::*;
use crate::plonk::better_cs::utils::*;
use super::cs::*;
use super::data_structures::*;
pub const RANGE_CHECK_SINGLE_APPLICATION_TABLE_NAME: &'static str = "Range check table for a single column";
pub trait LookupTableInternal<E: Engine>: Send
+ Sync
+ 'static
+ std::any::Any
+ std::fmt::Debug {
fn name(&self) -> &'static str;
fn table_size(&self) -> usize;
fn num_keys(&self) -> usize;
fn num_values(&self) -> usize;
fn allows_combining(&self) -> bool;
fn is_valid_entry(&self, keys: &[E::Fr], values: &[E::Fr]) -> bool;
fn query(&self, keys: &[E::Fr]) -> Result<Vec<E::Fr>, SynthesisError>;
fn get_table_values_for_polys(&self) -> Vec<Vec<E::Fr>>;
fn table_id(&self) -> E::Fr;
fn sort(&self, values: &[E::Fr], column: usize) -> Result<Vec<E::Fr>, SynthesisError>;
fn box_clone(&self) -> Box<dyn LookupTableInternal<E>>;
fn column_is_trivial(&self, column_num: usize) -> bool;
}
impl<E: Engine> std::hash::Hash for dyn LookupTableInternal<E> {
fn hash<H>(&self, state: &mut H) where H: std::hash::Hasher {
self.type_id().hash(state);
self.name().hash(state);
self.table_size().hash(state);
self.num_keys().hash(state);
self.num_values().hash(state);
}
}
impl<E: Engine> PartialEq for dyn LookupTableInternal<E> {
fn eq(&self, other: &Self) -> bool {
self.type_id() == other.type_id() &&
self.name() == other.name() &&
self.table_size() == other.table_size() &&
self.num_keys() == other.num_keys() &&
self.num_values() == other.num_values()
}
}
impl<E: Engine> Eq for dyn LookupTableInternal<E> {}
/// Applies a single lookup table to a specific set of columns
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(bound(serialize = "dyn LookupTableInternal<E>: serde::Serialize", deserialize = "dyn LookupTableInternal<E>: serde::de::DeserializeOwned"))]
pub struct LookupTableApplication<E: Engine> {
name: &'static str,
apply_over: Vec<PolyIdentifier>,
table_to_apply: Box<dyn LookupTableInternal<E>>,
#[serde(skip)]
name_generator: Option<Box<dyn (Fn() -> String) + 'static + Send + Sync>>,
can_be_combined: bool
}
impl<E: Engine> std::fmt::Debug for LookupTableApplication<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LookupTableApplication")
.field("name", &self.name)
.field("apply_over", &self.apply_over)
.field("table_to_apply", &self.table_to_apply)
.field("can_be_combined", &self.can_be_combined)
.finish()
}
}
impl<E: Engine> PartialEq for LookupTableApplication<E> {
fn eq(&self, other: &Self) -> bool {
self.name == other.name &&
self.apply_over == other.apply_over &&
&self.table_to_apply == &other.table_to_apply &&
self.can_be_combined == other.can_be_combined
}
}
impl<E: Engine> Eq for LookupTableApplication<E> {}
impl<E: Engine> LookupTableApplication<E> {
pub fn new<L: LookupTableInternal<E>>(
name: &'static str,
table: L,
apply_over: Vec<PolyIdentifier>,
name_generator: Option<Box<dyn (Fn() -> String) + 'static + Send + Sync>>,
can_be_combined: bool
) -> Self {
Self {
name,
apply_over,
table_to_apply: Box::from(table) as Box<dyn LookupTableInternal<E>>,
name_generator,
can_be_combined
}
}
pub fn new_range_table_of_width_3(width: usize, over: Vec<PolyIdentifier>) -> Result<Self, SynthesisError> {
let table = RangeCheckTableOverOneColumnOfWidth3::new(width);
let name = RANGE_CHECK_SINGLE_APPLICATION_TABLE_NAME;
Ok(Self {
name: name,
apply_over: over,
table_to_apply: table.box_clone(),
name_generator: None,
can_be_combined: true
})
}
pub fn new_xor_table(bit_width: usize, over: Vec<PolyIdentifier>) -> Result<Self, SynthesisError> {
Self::new_binop_table::<XorBinop>(bit_width, over, "XOR table")
}
pub fn new_and_table(bit_width: usize, over: Vec<PolyIdentifier>) -> Result<Self, SynthesisError> {
Self::new_binop_table::<AndBinop>(bit_width, over, "AND table")
}
pub fn new_or_table(bit_width: usize, over: Vec<PolyIdentifier>) -> Result<Self, SynthesisError> {
Self::new_binop_table::<OrBinop>(bit_width, over, "OR table")
}
pub fn new_binop_table<B: Binop>(bit_width: usize, over: Vec<PolyIdentifier>, name: &'static str) -> Result<Self, SynthesisError> {
let table = TwoKeysOneValueBinopTable::<E, B>::new(bit_width, name);
Ok(Self {
name: name,
apply_over: over,
table_to_apply: table.box_clone(),
name_generator: None,
can_be_combined: true
})
}
pub fn functional_name(&self) -> String {
if let Some(gen) = self.name_generator.as_ref() {
gen()
} else {
self.name.to_string()
// format!("{} over {:?}", self.table_to_apply.name(), self.apply_over)
}
}
pub fn applies_over(&self) -> &[PolyIdentifier] {
&self.apply_over
}
pub fn can_be_combined(&self) -> bool {
self.can_be_combined && self.table_to_apply.allows_combining()
}
#[track_caller]
pub fn is_valid_entry(&self, values: &[E::Fr]) -> bool {
let num_keys = self.table_to_apply.num_keys();
let num_values = self.table_to_apply.num_values();
assert_eq!(num_keys + num_values, values.len());
let (keys, values) = values.split_at(num_keys);
self.table_to_apply.is_valid_entry(keys, values)
}
pub fn table_id(&self) -> E::Fr {
self.table_to_apply.table_id()
}
pub fn size(&self) -> usize {
self.table_to_apply.table_size()
}
pub fn width(&self) -> usize {
self.table_to_apply.num_keys() + self.table_to_apply.num_values()
}
pub fn get_table_values_for_polys(&self) -> Vec<Vec<E::Fr>> {
self.table_to_apply.get_table_values_for_polys()
}
pub fn query(&self, keys: &[E::Fr]) -> Result<Vec<E::Fr>, SynthesisError> {
self.table_to_apply.query(keys)
}
pub fn as_internal(&self) -> &dyn LookupTableInternal<E> {
self.table_to_apply.as_ref()
}
}
/// Apply multiple tables at the same time to corresponding columns
#[derive(Debug)]
pub struct MultiTableApplication<E: Engine> {
name: &'static str,
apply_over: Vec<PolyIdentifier>,
tables_to_apply: Vec<Box<dyn LookupTableInternal<E>>>,
table_size: usize,
id: E::Fr
}
impl<E: Engine> PartialEq for MultiTableApplication<E> {
fn eq(&self, other: &Self) -> bool {
self.name == other.name &&
self.apply_over == other.apply_over &&
&self.tables_to_apply == &other.tables_to_apply &&
self.table_size == other.table_size
}
}
impl<E: Engine> Eq for MultiTableApplication<E> {}
impl<E: Engine> MultiTableApplication<E> {
pub fn name(&self) -> String {
format!("Table {} of size {}", self.name, self.table_size)
}
pub fn functional_name(&self) -> String {
self.name.to_string()
}
pub fn new_range_table_of_width_3(width: usize, over: Vec<PolyIdentifier>) -> Result<Self, SynthesisError> {
let table = RangeCheckTableOverSingleColumn::new(width);
let name = "Range check table";
Ok(Self {
name: name,
apply_over: over,
tables_to_apply: vec![table.box_clone(), table.box_clone(), table.box_clone()],
table_size: 1 << width,
id: table_id_from_string::<E::Fr>(name)
})
}
pub fn applies_over(&self) -> &[PolyIdentifier] {
&self.apply_over
}
pub fn is_valid_entry(&self, values: &[E::Fr]) -> bool {
assert_eq!(values.len(), 3);
let mut all_values = values;
let mut valid = true;
for t in self.tables_to_apply.iter() {
let num_keys = t.num_keys();
let num_values = t.num_values();
let (keys, rest) = all_values.split_at(num_keys);
let (values, rest) = rest.split_at(num_values);
valid &= t.is_valid_entry(keys, values);
all_values = rest;
}
valid
}
pub fn size(&self) -> usize {
self.table_size
}
pub fn table_id(&self) -> E::Fr {
self.id
}
pub fn width(&self) -> usize {
let mut width = 0;
for t in self.tables_to_apply.iter() {
width += t.num_keys();
width += t.num_values();
}
width
}
}
#[derive(Clone)]
pub struct RangeCheckTableOverSingleColumn<E: Engine> {
table_entries: Vec<E::Fr>,
entries_map: std::collections::HashMap<E::Fr, usize>,
bits: usize
}
impl<E: Engine> RangeCheckTableOverSingleColumn<E> {
pub fn new(bits: usize) -> Self {
let mut entries = Vec::with_capacity(1 << bits);
let mut map = std::collections::HashMap::with_capacity(1 << bits);
for i in 0..(1 << bits) {
let value = E::Fr::from_str(&i.to_string()).unwrap();
entries.push(value);
map.insert(value, i);
}
Self {
table_entries: entries,
entries_map: map,
bits
}
}
}
impl<E: Engine> std::fmt::Debug for RangeCheckTableOverSingleColumn<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RangeCheckTableOverSingleColumn")
.field("bits", &self.bits)
.finish()
}
}
impl<E: Engine> LookupTableInternal<E> for RangeCheckTableOverSingleColumn<E> {
fn name(&self) -> &'static str {
RANGE_CHECK_SINGLE_APPLICATION_TABLE_NAME
}
fn table_size(&self) -> usize {
debug_assert_eq!(1usize << self.bits, self.table_entries.len());
1usize << self.bits
}
fn num_keys(&self) -> usize {
1
}
fn num_values(&self) -> usize {
0
}
fn allows_combining(&self) -> bool {
false
}
fn is_valid_entry(&self, keys: &[E::Fr], values: &[E::Fr]) -> bool {
assert!(keys.len() == 1);
assert!(values.len() == 0);
self.table_entries.contains(&keys[0])
}
fn query(&self, keys: &[E::Fr]) -> Result<Vec<E::Fr>, SynthesisError> {
assert!(keys.len() == 1);
if self.entries_map.get(&keys[0]).is_some() {
return Ok(vec![]);
} else {
return Err(SynthesisError::Unsatisfiable);
}
}
fn get_table_values_for_polys(&self) -> Vec<Vec<E::Fr>> {
vec![self.table_entries.clone()]
}
fn table_id(&self) -> E::Fr {
table_id_from_string(self.name())
}
fn sort(&self, _values: &[E::Fr], _column: usize) -> Result<Vec<E::Fr>, SynthesisError> {
unimplemented!()
}
fn box_clone(&self) -> Box<dyn LookupTableInternal<E>> {
Box::from(self.clone())
}
fn column_is_trivial(&self, column_num: usize) -> bool {
assert!(column_num < 1);
false
}
}
#[derive(Clone)]
pub struct RangeCheckTableOverOneColumnOfWidth3<E: Engine> {
table_entries: Vec<E::Fr>,
dummy_entries: Vec<E::Fr>,
bits: usize
}
impl<E: Engine> RangeCheckTableOverOneColumnOfWidth3<E> {
pub fn new(bits: usize) -> Self {
let mut entries = Vec::with_capacity(1 << bits);
for i in 0..(1 << bits) {
let value = E::Fr::from_str(&i.to_string()).unwrap();
entries.push(value);
}
let dummy_entries = vec![E::Fr::zero(); 1 << bits];
Self {
table_entries: entries,
dummy_entries,
bits
}
}
}
impl<E: Engine> std::fmt::Debug for RangeCheckTableOverOneColumnOfWidth3<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RangeCheckTableOverOneColumnOfWidth3")
.field("bits", &self.bits)
.finish()
}
}
impl<E: Engine> LookupTableInternal<E> for RangeCheckTableOverOneColumnOfWidth3<E> {
fn name(&self) -> &'static str {
"Range check table for a single column only with width 3"
}
fn table_size(&self) -> usize {
debug_assert_eq!(1usize << self.bits, self.table_entries.len());
1usize << self.bits
}
fn num_keys(&self) -> usize {
3
}
fn num_values(&self) -> usize {
0
}
fn allows_combining(&self) -> bool {
true
}
fn is_valid_entry(&self, keys: &[E::Fr], values: &[E::Fr]) -> bool {
assert!(keys.len() == 3);
assert!(values.len() == 0);
let repr = keys[0].into_repr().as_ref()[0];
let mut valid = repr < (1 << self.bits);
valid = valid & keys[1].is_zero();
valid = valid & keys[2].is_zero();
valid
}
fn query(&self, keys: &[E::Fr]) -> Result<Vec<E::Fr>, SynthesisError> {
assert!(keys.len() == 3);
let is_valid = self.is_valid_entry(keys, &[]);
if is_valid {
return Ok(vec![]);
} else {
return Err(SynthesisError::Unsatisfiable);
}
}
fn get_table_values_for_polys(&self) -> Vec<Vec<E::Fr>> {
vec![self.table_entries.clone(), self.dummy_entries.clone(), self.dummy_entries.clone()]
}
fn table_id(&self) -> E::Fr {
table_id_from_string(self.name())
}
fn sort(&self, values: &[E::Fr], _column: usize) -> Result<Vec<E::Fr>, SynthesisError> {
unimplemented!()
}
fn box_clone(&self) -> Box<dyn LookupTableInternal<E>> {
Box::from(self.clone())
}
fn column_is_trivial(&self, column_num: usize) -> bool {
assert!(column_num < 3);
if column_num == 0 {
false
} else {
true
}
}
}
pub trait Binop: 'static + Clone + Copy + Send + Sync + std::fmt::Debug + PartialEq + Eq {
const NAME: &'static str;
fn apply(x: usize, y: usize) -> usize;
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct XorBinop;
impl Binop for XorBinop {
const NAME: &'static str = "XOR binop";
fn apply(x: usize, y: usize) -> usize {
x ^ y
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct AndBinop;
impl Binop for AndBinop {
const NAME: &'static str = "AND binop";
fn apply(x: usize, y: usize) -> usize {
x & y
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct OrBinop;
impl Binop for OrBinop {
const NAME: &'static str = "OR binop";
fn apply(x: usize, y: usize) -> usize {
x | y
}
}
#[derive(Clone)]
pub struct TwoKeysOneValueBinopTable<E: Engine, B: Binop> {
table_entries: [Vec<E::Fr>; 3],
table_lookup_map: std::collections::HashMap<(E::Fr, E::Fr), E::Fr>,
bits: usize,
name: &'static str,
_binop_marker: std::marker::PhantomData<B>
}
impl<E: Engine, B: Binop> TwoKeysOneValueBinopTable<E, B> {
pub fn new(bits: usize, name: &'static str) -> Self {
let mut key_0 = Vec::with_capacity(1 << bits);
let mut key_1 = Vec::with_capacity(1 << bits);
let mut value_0 = Vec::with_capacity(1 << bits);
let mut map = std::collections::HashMap::with_capacity(1 << (bits * 2));
for x in 0..(1 << bits) {
for y in 0..(1<<bits) {
let z = B::apply(x, y);
let x = E::Fr::from_str(&x.to_string()).unwrap();
let y = E::Fr::from_str(&y.to_string()).unwrap();
let z = E::Fr::from_str(&z.to_string()).unwrap();
key_0.push(x);
key_1.push(y);
value_0.push(z);
map.insert((x, y), z);
}
}
Self {
table_entries: [key_0, key_1, value_0],
table_lookup_map: map,
bits,
name,
_binop_marker: std::marker::PhantomData
}
}
}
impl<E: Engine, B: Binop> std::fmt::Debug for TwoKeysOneValueBinopTable<E, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TwoKeysOneValueBinopTable")
.field("bits", &self.bits)
.field("binop", &B::NAME)
.finish()
}
}
impl<E: Engine, B: Binop> LookupTableInternal<E> for TwoKeysOneValueBinopTable<E, B> {
fn name(&self) -> &'static str {
self.name
}
fn table_size(&self) -> usize {
debug_assert_eq!(1usize << (self.bits*2), self.table_entries[0].len());
1usize << (self.bits*2)
}
fn num_keys(&self) -> usize {
2
}
fn num_values(&self) -> usize {
1
}
fn allows_combining(&self) -> bool {
true
}
fn is_valid_entry(&self, keys: &[E::Fr], values: &[E::Fr]) -> bool {
assert!(keys.len() == 2);
assert!(values.len() == 1);
if let Some(entry) = self.table_lookup_map.get(&(keys[0], keys[1])) {
return entry == &values[0];
}
false
}
fn query(&self, keys: &[E::Fr]) -> Result<Vec<E::Fr>, SynthesisError> {
assert!(keys.len() == 2);
if let Some(entry) = self.table_lookup_map.get(&(keys[0], keys[1])) {
return Ok(vec![*entry])
}
Err(SynthesisError::Unsatisfiable)
}
fn get_table_values_for_polys(&self) -> Vec<Vec<E::Fr>> {
vec![self.table_entries[0].clone(), self.table_entries[1].clone(), self.table_entries[2].clone()]
}
fn table_id(&self) -> E::Fr {
table_id_from_string(self.name())
}
fn sort(&self, values: &[E::Fr], _column: usize) -> Result<Vec<E::Fr>, SynthesisError> {
unimplemented!()
}
fn box_clone(&self) -> Box<dyn LookupTableInternal<E>> {
Box::from(self.clone())
}
fn column_is_trivial(&self, column_num: usize) -> bool {
assert!(column_num < 3);
false
}
}
pub fn table_id_from_string<F: PrimeField>(
s: &str
) -> F {
let mut h = tiny_keccak::keccak256(s.as_bytes());
for i in 0..4 {
h[i] = 0u8;
}
use crate::pairing::ff::PrimeFieldRepr;
let mut repr = F::Repr::default();
repr.read_be(&h[..]).unwrap();
F::from_repr(repr).unwrap()
}
#[derive(Clone)]
pub struct KeyValueSet<E: Engine> {
pub inner: [E::Fr; 3]
}
impl<E: Engine> Copy for KeyValueSet<E>{}
impl<E: Engine> KeyValueSet<E> {
pub fn new(set: [E::Fr; 3]) -> Self {
Self {
inner: set
}
}
pub fn from_slice(input: &[E::Fr]) -> Self {
debug_assert_eq!(input.len(), 3);
let mut inner = [E::Fr::zero(); 3];
inner.copy_from_slice(input);
Self {
inner
}
}
}
impl<E: Engine> std::hash::Hash for KeyValueSet<E> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.inner.hash(state);
}
}
impl<E: Engine> std::cmp::PartialEq for KeyValueSet<E> {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl<E: Engine> std::cmp::Eq for KeyValueSet<E> {}
impl<E: Engine> std::cmp::Ord for KeyValueSet<E> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
match self.inner[0].into_repr().cmp(&other.inner[0].into_repr()) {
std::cmp::Ordering::Equal => {
// return self.inner[1].into_repr().cmp(&other.inner[1].into_repr());
match self.inner[1].into_repr().cmp(&other.inner[1].into_repr()) {
std::cmp::Ordering::Equal => {
self.inner[2].into_repr().cmp(&other.inner[2].into_repr())
// match self.inner[2].into_repr().cmp(&other.inner[2].into_repr()) {
// std::cmp::Ordering::Equal => {
// panic!("keys and values have duality for {:?} and {:?}", &self, &other);
// }
// ord @ _ => {
// return ord;
// }
// }
}
ord @ _ => {
return ord;
}
}
},
ord @ _ => {
return ord;
}
}
}
}
impl<E: Engine> std::cmp::PartialOrd for KeyValueSet<E> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<E: Engine> std::fmt::Debug for KeyValueSet<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("KeyValueSet")
.field("inner", &self.inner)
.finish()
}
}
pub(crate) struct LookupDataHolder<E: Engine> {
pub(crate) eta: E::Fr,
pub(crate) f_poly_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub(crate) t_poly_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub(crate) t_shifted_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub(crate) s_poly_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub(crate) s_shifted_unpadded_values: Option<Polynomial<E::Fr, Values>>,
// pub(crate) f_poly_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub(crate) t_poly_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub(crate) s_poly_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub(crate) selector_poly_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub(crate) table_type_poly_monomial: Option<Polynomial<E::Fr, Coefficients>>,
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct LookupQuery<E: Engine> {
pub(crate) s_at_z_omega: E::Fr,
pub(crate) grand_product_at_z_omega: E::Fr,
pub(crate) t_at_z: E::Fr,
pub(crate) t_at_z_omega: E::Fr,
pub(crate) selector_at_z: E::Fr,
pub(crate) table_type_at_z: E::Fr,
}<file_sep>/src/sonic/paper.rs
#[test]
fn test_paper_results() {
use crate::pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
struct PedersenHashPreimageCircuit<'a, E: sapling_crypto::jubjub::JubjubEngine + 'a> {
preimage: Vec<Option<bool>>,
params: &'a E::Params,
}
impl<'a, E: sapling_crypto::jubjub::JubjubEngine + 'a> Clone for PedersenHashPreimageCircuit<'a, E> {
fn clone(&self) -> Self {
PedersenHashPreimageCircuit {
preimage: self.preimage.clone(),
params: self.params
}
}
}
impl<'a, E: sapling_crypto::jubjub::JubjubEngine> bellman::Circuit<E> for PedersenHashPreimageCircuit<'a, E> {
fn synthesize<CS: bellman::ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), bellman::SynthesisError>
{
//use bellman::ConstraintSystem;
use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean};
use sapling_crypto::circuit::pedersen_hash;
let mut preimage = vec![];
for &bit in self.preimage.iter() {
preimage.push(Boolean::from(AllocatedBit::alloc(&mut* cs, bit)?));
}
pedersen_hash::pedersen_hash(
&mut* cs, pedersen_hash::Personalization::NoteCommitment, &preimage, self.params)?;
Ok(())
}
}
#[derive(Clone)]
struct SHA256PreimageCircuit {
preimage: Vec<Option<bool>>,
}
impl<E: Engine> bellman::Circuit<E> for SHA256PreimageCircuit {
fn synthesize<CS: bellman::ConstraintSystem<E>>(
self,
cs: &mut CS,
) -> Result<(), bellman::SynthesisError> {
//use bellman::ConstraintSystem;
use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean};
use sapling_crypto::circuit::sha256::sha256_block_no_padding;
let mut preimage = vec![];
for &bit in self.preimage.iter() {
preimage.push(Boolean::from(AllocatedBit::alloc(&mut *cs, bit)?));
}
sha256_block_no_padding(&mut *cs, &preimage)?;
sha256_block_no_padding(&mut *cs, &preimage)?;
sha256_block_no_padding(&mut *cs, &preimage)?;
// sha256_block_no_padding(&mut *cs, &preimage)?;
Ok(())
}
}
{
use crate::pairing::{CurveAffine};
use crate::pairing::bls12_381::{G1Affine, G2Affine};
let a = G1Affine::one();
let b = G2Affine::one();
let c = G1Affine::one();
let alpha = G1Affine::one();
let beta = G2Affine::one();
let iv = G1Affine::one();
let gamma = G2Affine::one().prepare();
let delta = G2Affine::one().prepare();
let alphabeta = <Bls12 as Engine>::pairing(alpha, beta);
println!("verifying an idealized groth16 proof");
let start = Instant::now();
assert!(<Bls12 as Engine>::final_exponentiation(
&<Bls12 as Engine>::miller_loop([
(&a.prepare(), &b.prepare()),
(&iv.prepare(), &gamma),
(&c.prepare(), &delta),
].into_iter())
).unwrap() != alphabeta);
println!("done in {:?}", start.elapsed());
}
{
use sonic::util::multiexp;
use crate::pairing::{CurveAffine};
use crate::pairing::bls12_381::{G1Affine, G2Affine};
// e([\alpha G], [\beta H]) = e(A, B) e(IV, [\gamma] H) e(C, [\delta] H)
let a = G1Affine::one();
let b = G2Affine::one();
let c = vec![G1Affine::one(); 100];
let mut tmp = Fr::one();
tmp.double();
tmp = tmp.inverse().unwrap();
let cscalars = (0..100).map(|_| {tmp.square(); tmp}).collect::<Vec<_>>();
let alpha = G1Affine::one();
let beta = G2Affine::one();
let iv = G1Affine::one();
let gamma = G2Affine::one().prepare();
let delta = G2Affine::one().prepare();
let alphabeta = <Bls12 as Engine>::pairing(alpha, beta);
println!("verifying 100 idealized groth16 proofs");
let start = Instant::now();
let c = multiexp(
c.iter(),
cscalars.iter(),
).into_affine();
assert!(<Bls12 as Engine>::final_exponentiation(
&<Bls12 as Engine>::miller_loop([
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&iv.prepare(), &gamma),
(&c.prepare(), &delta),
].into_iter())
).unwrap() != alphabeta);
println!("done in {:?}", start.elapsed());
}
{
let samples: usize = 100;
const NUM_BITS: usize = 384;
let params = sapling_crypto::jubjub::JubjubBls12::new();
let circuit = PedersenHashPreimageCircuit {
preimage: vec![Some(true); NUM_BITS],
params: ¶ms
};
println!("creating proof");
let start = Instant::now();
let proof = create_proof::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proof, &srs);
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let mut verifier = MultiVerifier::<Bls12, _, Basic>::new(AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let mut verifier = MultiVerifier::<Bls12, _, Basic>::new(AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let mut verifier = MultiVerifier::<Bls12, _, Basic>::new(AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("verifying 100 proofs with advice");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
<file_sep>/src/sonic/srs/srs.rs
use crate::pairing::ff::{Field, PrimeField};
use crate::pairing::{CurveAffine, CurveProjective, Engine, Wnaf};
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
#[derive(Clone, Eq)]
pub struct SRS<E: Engine> {
pub d: usize,
// g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
pub g_negative_x: Vec<E::G1Affine>,
// g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
pub g_positive_x: Vec<E::G1Affine>,
// g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
pub h_negative_x: Vec<E::G2Affine>,
// g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
pub h_positive_x: Vec<E::G2Affine>,
// alpha*(g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
pub g_negative_x_alpha: Vec<E::G1Affine>,
// alpha*(g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
pub g_positive_x_alpha: Vec<E::G1Affine>,
// alpha*(h^{x^0}, h^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
pub h_negative_x_alpha: Vec<E::G2Affine>,
// alpha*(h^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
pub h_positive_x_alpha: Vec<E::G2Affine>,
}
impl<E: Engine> PartialEq for SRS<E> {
fn eq(&self, other: &SRS<E>) -> bool {
self.d == other.d &&
self.g_negative_x == other.g_negative_x &&
self.g_positive_x == other.g_positive_x &&
self.h_negative_x == other.h_negative_x &&
self.h_positive_x == other.h_positive_x &&
self.g_negative_x_alpha == other.g_negative_x_alpha &&
self.g_positive_x_alpha == other.g_positive_x_alpha &&
self.h_negative_x_alpha == other.h_negative_x_alpha &&
self.h_positive_x_alpha == other.h_positive_x_alpha
}
}
impl<E: Engine> SRS<E> {
pub fn dummy(d: usize, _: E::Fr, _: E::Fr) -> Self {
SRS {
d: d,
g_negative_x: vec![E::G1Affine::one(); d + 1],
g_positive_x: vec![E::G1Affine::one(); d + 1],
h_negative_x: vec![E::G2Affine::one(); d + 1],
h_positive_x: vec![E::G2Affine::one(); d + 1],
g_negative_x_alpha: vec![E::G1Affine::one(); d],
g_positive_x_alpha: vec![E::G1Affine::one(); d],
h_negative_x_alpha: vec![E::G2Affine::one(); d + 1],
h_positive_x_alpha: vec![E::G2Affine::one(); d + 1],
}
}
pub fn new(d: usize, x: E::Fr, alpha: E::Fr) -> Self {
let mut g1 = Wnaf::new();
let mut g1 = g1.base(E::G1::one(), d * 4);
let mut g2 = Wnaf::new();
let mut g2 = g2.base(E::G2::one(), d * 4);
fn table<C: CurveAffine>(
mut cur: C::Scalar,
step: C::Scalar,
num: usize,
table: &mut Wnaf<usize, &[C::Projective], &mut Vec<i64>>,
) -> Vec<C> {
let mut v = vec![];
for _ in 0..num {
v.push(table.scalar(cur.into_repr()));
cur.mul_assign(&step);
}
C::Projective::batch_normalization(&mut v);
let v = v.into_iter().map(|e| e.into_affine()).collect();
v
}
let x_inv = x.inverse().unwrap();
let mut x_alpha = x;
x_alpha.mul_assign(&alpha);
let mut inv_x_alpha = x_inv;
inv_x_alpha.mul_assign(&alpha);
SRS {
d: d,
g_negative_x: table(E::Fr::one(), x_inv, d + 1, &mut g1),
g_positive_x: table(E::Fr::one(), x, d + 1, &mut g1),
h_negative_x: table(E::Fr::one(), x_inv, d + 1, &mut g2),
h_positive_x: table(E::Fr::one(), x, d + 1, &mut g2),
g_negative_x_alpha: table(inv_x_alpha, x_inv, d, &mut g1),
g_positive_x_alpha: table(x_alpha, x, d, &mut g1),
h_negative_x_alpha: table(alpha, x_inv, d + 1, &mut g2),
h_positive_x_alpha: table(alpha, x, d + 1, &mut g2),
}
}
}
impl<E: Engine> SRS<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
assert_eq!(self.d + 1, self.g_negative_x.len());
assert_eq!(self.d + 1, self.g_positive_x.len());
assert_eq!(self.d + 1, self.h_negative_x.len());
assert_eq!(self.d + 1, self.h_positive_x.len());
assert_eq!(self.d, self.g_negative_x_alpha.len());
assert_eq!(self.d, self.g_positive_x_alpha.len());
assert_eq!(self.d + 1, self.h_negative_x_alpha.len());
assert_eq!(self.d + 1, self.h_positive_x_alpha.len());
writer.write_u32::<BigEndian>(self.d as u32)?;
for g in &self.g_negative_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.g_positive_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_negative_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_positive_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.g_negative_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.g_positive_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_negative_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_positive_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R,
checked: bool
) -> io::Result<Self>
{
use crate::pairing::EncodedPoint;
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
} else {
repr
.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
} else {
repr
.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let mut g_negative_x = vec![];
let mut g_positive_x = vec![];
let mut h_negative_x = vec![];
let mut h_positive_x = vec![];
let mut g_negative_x_alpha = vec![];
let mut g_positive_x_alpha = vec![];
let mut h_negative_x_alpha = vec![];
let mut h_positive_x_alpha = vec![];
let d = reader.read_u32::<BigEndian>()? as usize;
{
for _ in 0..(d+1) {
g_negative_x.push(read_g1(&mut reader)?);
}
for _ in 0..(d+1) {
g_positive_x.push(read_g1(&mut reader)?);
}
}
{
for _ in 0..(d+1) {
h_negative_x.push(read_g2(&mut reader)?);
}
for _ in 0..(d+1) {
h_positive_x.push(read_g2(&mut reader)?);
}
}
{
for _ in 0..d {
g_negative_x_alpha.push(read_g1(&mut reader)?);
}
for _ in 0..d {
g_positive_x_alpha.push(read_g1(&mut reader)?);
}
}
{
for _ in 0..(d+1) {
h_negative_x_alpha.push(read_g2(&mut reader)?);
}
for _ in 0..(d+1) {
h_positive_x_alpha.push(read_g2(&mut reader)?);
}
}
Ok(Self {
d: d,
g_negative_x: g_negative_x,
g_positive_x: g_positive_x,
h_negative_x: h_negative_x,
h_positive_x: h_positive_x,
g_negative_x_alpha: g_negative_x_alpha,
g_positive_x_alpha: g_positive_x_alpha,
h_negative_x_alpha: h_negative_x_alpha,
h_positive_x_alpha: h_positive_x_alpha
})
}
}<file_sep>/src/plonk/better_better_cs/setup/mod.rs
use super::cs::*;
use super::data_structures::*;
use crate::pairing::ff::*;
use crate::pairing::*;
use crate::plonk::polynomials::*;
use std::collections::HashMap;
use crate::plonk::domains::*;
use crate::worker::Worker;
use crate::SynthesisError;
use crate::kate_commitment::*;
use super::super::better_cs::utils::make_non_residues;
use crate::byteorder::BigEndian;
use crate::byteorder::ReadBytesExt;
use crate::byteorder::WriteBytesExt;
use std::io::{Read, Write};
use crate::plonk::better_cs::keys::*;
#[derive(Clone, PartialEq, Eq)]
pub struct Setup<E: Engine, C: Circuit<E>> {
pub n: usize,
pub num_inputs: usize,
pub state_width: usize,
pub num_witness_polys: usize,
pub gate_setup_monomials: Vec<Polynomial<E::Fr, Coefficients>>,
pub gate_selectors_monomials: Vec<Polynomial<E::Fr, Coefficients>>,
pub permutation_monomials: Vec<Polynomial<E::Fr, Coefficients>>,
pub total_lookup_entries_length: usize,
pub lookup_selector_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub lookup_tables_monomials: Vec<Polynomial<E::Fr, Coefficients>>,
pub lookup_table_type_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub non_residues: Vec<E::Fr>,
_marker: std::marker::PhantomData<C>
}
impl<E: Engine, C: Circuit<E>> std::fmt::Debug for Setup<E, C> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Setup")
.field("n", &self.n)
.field("num_inputs", &self.num_inputs)
.field("gate_setup_monomials", &self.gate_setup_monomials)
.field("gate_selectors_monomials", &self.gate_selectors_monomials)
.field("permutation_monomials", &self.permutation_monomials)
.field("total_lookup_entries_length", &self.total_lookup_entries_length)
.field("lookup_selector_monomial", &self.lookup_selector_monomial)
.field("lookup_tables_monomials", &self.lookup_tables_monomials)
.field("lookup_table_type_monomial", &self.lookup_table_type_monomial)
.finish()
}
}
impl<E: Engine, C: Circuit<E>> Setup<E, C> {
pub fn empty() -> Self {
Self {
n: 0,
num_inputs: 0,
state_width: 0,
num_witness_polys: 0,
gate_setup_monomials: vec![],
gate_selectors_monomials: vec![],
permutation_monomials: vec![],
total_lookup_entries_length: 0,
lookup_selector_monomial: None,
lookup_tables_monomials: vec![],
lookup_table_type_monomial: None,
non_residues: vec![],
_marker: std::marker::PhantomData
}
}
pub fn write<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(self.n as u64)?;
writer.write_u64::<BigEndian>(self.num_inputs as u64)?;
writer.write_u64::<BigEndian>(self.state_width as u64)?;
writer.write_u64::<BigEndian>(self.num_witness_polys as u64)?;
write_polynomials_vec(&self.gate_setup_monomials, &mut writer)?;
write_polynomials_vec(&self.gate_selectors_monomials, &mut writer)?;
write_polynomials_vec(&self.permutation_monomials, &mut writer)?;
writer.write_u64::<BigEndian>(self.total_lookup_entries_length as u64)?;
write_optional_polynomial(&self.lookup_selector_monomial, &mut writer)?;
write_polynomials_vec(&self.lookup_tables_monomials, &mut writer)?;
write_optional_polynomial(&self.lookup_table_type_monomial, &mut writer)?;
write_fr_vec(&self.non_residues, &mut writer)?;
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> std::io::Result<Self> {
use crate::pairing::CurveAffine;
use crate::pairing::EncodedPoint;
let n = reader.read_u64::<BigEndian>()?;
let num_inputs = reader.read_u64::<BigEndian>()?;
let state_width = reader.read_u64::<BigEndian>()?;
let num_witness_polys = reader.read_u64::<BigEndian>()?;
let gate_setup_monomials = read_polynomials_coeffs_vec(&mut reader)?;
let gate_selectors_monomials = read_polynomials_coeffs_vec(&mut reader)?;
let permutation_monomials = read_polynomials_coeffs_vec(&mut reader)?;
let total_lookup_entries_length = reader.read_u64::<BigEndian>()?;
let lookup_selector_monomial = read_optional_polynomial_coeffs(&mut reader)?;
let lookup_tables_monomials = read_polynomials_coeffs_vec(&mut reader)?;
let lookup_table_type_monomial = read_optional_polynomial_coeffs(&mut reader)?;
let non_residues = read_fr_vec(&mut reader)?;
let new = Self {
n: n as usize,
num_inputs: num_inputs as usize,
state_width: state_width as usize,
num_witness_polys: num_witness_polys as usize,
gate_setup_monomials,
gate_selectors_monomials,
permutation_monomials,
total_lookup_entries_length: total_lookup_entries_length as usize,
lookup_selector_monomial,
lookup_tables_monomials,
lookup_table_type_monomial,
non_residues,
_marker: std::marker::PhantomData,
};
Ok(new)
}
}
#[derive(Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct VerificationKey<E: Engine, C: Circuit<E>> {
pub n: usize,
pub num_inputs: usize,
pub state_width: usize,
pub num_witness_polys: usize,
pub gate_setup_commitments: Vec<E::G1Affine>,
pub gate_selectors_commitments: Vec<E::G1Affine>,
pub permutation_commitments: Vec<E::G1Affine>,
pub total_lookup_entries_length: usize,
pub lookup_selector_commitment: Option<E::G1Affine>,
pub lookup_tables_commitments: Vec<E::G1Affine>,
pub lookup_table_type_commitment: Option<E::G1Affine>,
pub non_residues: Vec<E::Fr>,
pub g2_elements: [E::G2Affine; 2],
#[serde(skip_serializing,skip_deserializing, default)]
#[serde(bound(serialize = ""))]
#[serde(bound(deserialize = ""))]
_marker: std::marker::PhantomData<C>
}
impl<E: Engine, C: Circuit<E>> std::fmt::Debug for VerificationKey<E, C> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("VerificationKey")
.field("n", &self.n)
.field("num_inputs", &self.num_inputs)
.field("gate_setup_commitments", &self.gate_setup_commitments)
.field("gate_selectors_commitments", &self.gate_selectors_commitments)
.field("permutation_commitments", &self.permutation_commitments)
.field("total_lookup_entries_length", &self.total_lookup_entries_length)
.field("lookup_selector_commitment", &self.lookup_selector_commitment)
.field("lookup_tables_commitments", &self.lookup_tables_commitments)
.field("lookup_table_type_commitment", &self.lookup_table_type_commitment)
.finish()
}
}
impl<E: Engine, C: Circuit<E>> VerificationKey<E, C> {
pub fn empty() -> Self {
Self {
n: 0,
num_inputs: 0,
state_width: 0,
num_witness_polys: 0,
gate_setup_commitments: vec![],
gate_selectors_commitments: vec![],
permutation_commitments: vec![],
total_lookup_entries_length: 0,
lookup_selector_commitment: None,
lookup_tables_commitments: vec![],
lookup_table_type_commitment: None,
non_residues: vec![],
g2_elements: [<E::G2Affine as pairing::CurveAffine>::zero(); 2],
_marker: std::marker::PhantomData,
}
}
pub fn from_setup(
setup: &Setup<E, C>,
worker: &Worker,
crs: &Crs<E, CrsForMonomialForm>,
) -> Result<Self, SynthesisError> {
let mut new = Self {
n: setup.n,
num_inputs: setup.num_inputs,
state_width: setup.state_width,
num_witness_polys: setup.num_witness_polys,
gate_setup_commitments: vec![],
gate_selectors_commitments: vec![],
permutation_commitments: vec![],
total_lookup_entries_length: setup.total_lookup_entries_length,
lookup_selector_commitment: None,
lookup_tables_commitments: vec![],
lookup_table_type_commitment: None,
non_residues: vec![],
g2_elements: [crs.g2_monomial_bases[0], crs.g2_monomial_bases[1]],
_marker: std::marker::PhantomData,
};
for (p, c) in vec![
(&setup.gate_setup_monomials, &mut new.gate_setup_commitments),
(&setup.gate_selectors_monomials, &mut new.gate_selectors_commitments),
(&setup.permutation_monomials, &mut new.permutation_commitments),
(&setup.lookup_tables_monomials, &mut new.lookup_tables_commitments),
].into_iter() {
for p in p.iter() {
let commitment = commit_using_monomials(p, &crs, &worker)?;
c.push(commitment);
}
}
if let Some(p) = setup.lookup_selector_monomial.as_ref() {
let commitment = commit_using_monomials(p, &crs, &worker)?;
new.lookup_selector_commitment = Some(commitment);
}
if let Some(p) = setup.lookup_table_type_monomial.as_ref() {
let commitment = commit_using_monomials(p, &crs, &worker)?;
new.lookup_table_type_commitment = Some(commitment);
}
new.non_residues = setup.non_residues.clone();
// new.non_residues
// .extend(make_non_residues::<E::Fr>(state_width - 1));
Ok(new)
}
pub fn write<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
writer.write_u64::<BigEndian>(self.n as u64)?;
writer.write_u64::<BigEndian>(self.num_inputs as u64)?;
writer.write_u64::<BigEndian>(self.state_width as u64)?;
writer.write_u64::<BigEndian>(self.num_witness_polys as u64)?;
write_curve_affine_vec(&self.gate_setup_commitments, &mut writer)?;
write_curve_affine_vec(&self.gate_selectors_commitments, &mut writer)?;
write_curve_affine_vec(&self.permutation_commitments, &mut writer)?;
writer.write_u64::<BigEndian>(self.total_lookup_entries_length as u64)?;
write_optional_curve_affine(&self.lookup_selector_commitment, &mut writer)?;
write_curve_affine_vec(&self.lookup_tables_commitments, &mut writer)?;
write_optional_curve_affine(&self.lookup_table_type_commitment, &mut writer)?;
write_fr_vec(&self.non_residues, &mut writer)?;
write_curve_affine(&self.g2_elements[0], &mut writer)?;
write_curve_affine(&self.g2_elements[1], &mut writer)?;
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> std::io::Result<Self> {
use crate::pairing::CurveAffine;
use crate::pairing::EncodedPoint;
let n = reader.read_u64::<BigEndian>()?;
let num_inputs = reader.read_u64::<BigEndian>()?;
let state_width = reader.read_u64::<BigEndian>()?;
let num_witness_polys = reader.read_u64::<BigEndian>()?;
let gate_setup_commitments = read_curve_affine_vector(&mut reader)?;
let gate_selectors_commitments = read_curve_affine_vector(&mut reader)?;
let permutation_commitments = read_curve_affine_vector(&mut reader)?;
let total_lookup_entries_length = reader.read_u64::<BigEndian>()?;
let lookup_selector_commitment = read_optional_curve_affine(&mut reader)?;
let lookup_tables_commitments = read_curve_affine_vector(&mut reader)?;
let lookup_table_type_commitment = read_optional_curve_affine(&mut reader)?;
let non_residues = read_fr_vec(&mut reader)?;
let h = read_curve_affine(&mut reader)?;
let h_x = read_curve_affine(&mut reader)?;
let new = Self {
n: n as usize,
num_inputs: num_inputs as usize,
state_width: state_width as usize,
num_witness_polys: num_witness_polys as usize,
gate_setup_commitments,
gate_selectors_commitments,
permutation_commitments,
total_lookup_entries_length: total_lookup_entries_length as usize,
lookup_selector_commitment,
lookup_tables_commitments,
lookup_table_type_commitment,
non_residues,
g2_elements: [h, h_x],
_marker: std::marker::PhantomData,
};
Ok(new)
}
}
use super::data_structures::AssembledPolynomialStorageForMonomialForms;
impl<'a, E: Engine> AssembledPolynomialStorageForMonomialForms<'a, E> {
pub fn extend_from_setup<C: Circuit<E>>(&mut self, setup: &'a Setup<E, C>) -> Result<(), SynthesisError> {
// extend with gate setup polys, gate selectors, permutation polys
// and lookup table setup polys if available
let all_gates = C::declare_used_gates()?;
let has_selectors = all_gates.len() > 1;
let mut setup_gates_iter = setup.gate_setup_monomials.iter();
for gate in all_gates.iter() {
for &poly_id in gate.setup_polynomials().into_iter() {
let poly_ref = setup_gates_iter.next().expect(&format!("must have gate setup poly {:?} for gate {:?} in setup", poly_id, gate));
let proxy = PolynomialProxy::from_borrowed(poly_ref);
self.setup_map.insert(poly_id, proxy);
}
}
assert!(setup_gates_iter.next().is_none());
if has_selectors {
let mut selector_iter = setup.gate_selectors_monomials.iter();
for gate in all_gates.into_iter() {
let id = PolyIdentifier::GateSelector(gate.name());
let poly_ref = selector_iter.next().expect(&format!("must have gate selector poly for gate {:?} in setup", gate));
let proxy = PolynomialProxy::from_borrowed(poly_ref);
self.gate_selectors.insert(id, proxy);
}
assert!(selector_iter.next().is_none());
}
for (idx, poly_ref) in setup.permutation_monomials.iter().enumerate() {
let id = PolyIdentifier::PermutationPolynomial(idx);
let proxy = PolynomialProxy::from_borrowed(poly_ref);
self.setup_map.insert(id, proxy);
}
Ok(())
}
}<file_sep>/src/plonk/cs/variable.rs
/// Represents a variable in our constraint system.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct Variable(pub(crate) Index);
impl Variable {
/// This constructs a variable with an arbitrary index.
/// Circuit implementations are not recommended to use this.
pub fn new_unchecked(idx: Index) -> Variable {
Variable(idx)
}
/// This returns the index underlying the variable.
/// Circuit implementations are not recommended to use this.
pub fn get_unchecked(&self) -> Index {
self.0
}
}
/// Represents the index of either an input variable or
/// auxillary variable.
#[derive(Copy, Clone, PartialEq, Debug, Hash, Eq, serde::Serialize, serde::Deserialize)]
pub enum Index {
Input(usize),
Aux(usize)
}<file_sep>/src/plonk/better_better_cs/verifier/mod.rs
use crate::pairing::ff::*;
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use super::setup::VerificationKey;
use super::proof::{Proof, sort_queries_for_linearization};
use super::cs::*;
use super::data_structures::*;
use std::collections::HashMap;
use crate::plonk::domains::*;
use crate::SynthesisError;
use crate::plonk::commitments::transcript::*;
use crate::kate_commitment::*;
use crate::plonk::better_cs::utils::*;
use super::lookup_tables::LookupQuery;
pub const MAX_DILATION: usize = 1;
pub fn verify<E: Engine, C: Circuit<E>, T: Transcript<E::Fr>>(
vk: &VerificationKey<E, C>,
proof: &Proof<E, C>,
transcript_params: Option<T::InitializationParameters>,
) -> Result<bool, SynthesisError> {
let ((pair_with_generator, pair_with_x), success) = aggregate::<_, _, T>(vk, proof, transcript_params)?;
if !success {
return Ok(false)
}
use crate::pairing::CurveAffine;
let valid = E::final_exponentiation(
&E::miller_loop(&[
(&pair_with_generator.prepare(), &vk.g2_elements[0].prepare()),
(&pair_with_x.prepare(), &vk.g2_elements[1].prepare())
])
).ok_or(SynthesisError::Unsatisfiable)? == E::Fqk::one();
Ok(valid)
}
fn safe_assert(must_be_true: bool) -> Result<(), SynthesisError> {
if !must_be_true {
return Err(SynthesisError::AssignmentMissing);
}
Ok(())
}
fn safe_assert_eq<T: Eq>(a: T, b: T) -> Result<(), SynthesisError> {
safe_assert(a==b)
}
pub fn aggregate<E: Engine, C: Circuit<E>, T: Transcript<E::Fr>>(
vk: &VerificationKey<E, C>,
proof: &Proof<E, C>,
transcript_params: Option<T::InitializationParameters>,
) -> Result<((E::G1Affine, E::G1Affine), bool), SynthesisError> {
let mut transcript = if let Some(params) = transcript_params {
T::new_from_params(params)
} else {
T::new()
};
let sorted_gates = C::declare_used_gates()?;
let num_different_gates = sorted_gates.len();
safe_assert((vk.n+1).is_power_of_two())?;
let required_domain_size = vk.n.next_power_of_two();
let domain = Domain::<E::Fr>::new_for_size(required_domain_size as u64)?;
for inp in proof.inputs.iter() {
transcript.commit_field_element(inp);
}
for idx in 0..vk.state_width {
let commitment = proof.state_polys_commitments.get(idx).ok_or(SynthesisError::AssignmentMissing)?;
commit_point_as_xy::<E, T>(&mut transcript, commitment);
}
for idx in 0..vk.num_witness_polys {
let commitment = proof.witness_polys_commitments.get(idx).ok_or(SynthesisError::AssignmentMissing)?;
commit_point_as_xy::<E, T>(&mut transcript, commitment);
}
let mut eta = E::Fr::zero();
if vk.total_lookup_entries_length > 0 {
eta = transcript.get_challenge();
let commitment = proof.lookup_s_poly_commitment.as_ref().ok_or(SynthesisError::AssignmentMissing)?;
commit_point_as_xy::<E, T>(&mut transcript, commitment);
}
let beta_for_copy_permutation = transcript.get_challenge();
let gamma_for_copy_permutation = transcript.get_challenge();
let commitment = &proof.copy_permutation_grand_product_commitment;
commit_point_as_xy::<E, T>(&mut transcript, commitment);
let mut beta_for_lookup = None;
let mut gamma_for_lookup = None;
if vk.total_lookup_entries_length > 0 {
let beta_for_lookup_permutation = transcript.get_challenge();
let gamma_for_lookup_permutation = transcript.get_challenge();
beta_for_lookup = Some(beta_for_lookup_permutation);
gamma_for_lookup = Some(gamma_for_lookup_permutation);
let commitment = proof.lookup_grand_product_commitment.as_ref().ok_or(SynthesisError::AssignmentMissing)?;
commit_point_as_xy::<E, T>(&mut transcript, commitment);
}
let alpha = transcript.get_challenge();
let mut total_powers_of_alpha_for_gates = 0;
for g in sorted_gates.iter() {
total_powers_of_alpha_for_gates += g.num_quotient_terms();
}
// println!("Have {} terms from {} gates", total_powers_of_alpha_for_gates, sorted_gates.len());
let mut current_alpha = E::Fr::one();
let mut powers_of_alpha_for_gates = Vec::with_capacity(total_powers_of_alpha_for_gates);
powers_of_alpha_for_gates.push(current_alpha);
for _ in 1..total_powers_of_alpha_for_gates {
current_alpha.mul_assign(&alpha);
powers_of_alpha_for_gates.push(current_alpha);
}
safe_assert_eq(powers_of_alpha_for_gates.len(), total_powers_of_alpha_for_gates)?;
let copy_grand_product_alphas;
{
current_alpha.mul_assign(&alpha);
let alpha_0 = current_alpha;
current_alpha.mul_assign(&alpha);
let alpha_1 = current_alpha;
copy_grand_product_alphas = Some([alpha_0, alpha_1]);
}
let mut lookup_grand_product_alphas = None;
if vk.total_lookup_entries_length > 0 {
current_alpha.mul_assign(&alpha);
let alpha_0 = current_alpha;
current_alpha.mul_assign(&alpha);
let alpha_1 = current_alpha;
current_alpha.mul_assign(&alpha);
let alpha_2 = current_alpha;
lookup_grand_product_alphas = Some([alpha_0, alpha_1, alpha_2]);
}
for commitment in proof.quotient_poly_parts_commitments.iter() {
commit_point_as_xy::<E, T>(&mut transcript, commitment);
}
let z = transcript.get_challenge();
let z_in_domain_size = z.pow(&[required_domain_size as u64]);
let quotient_at_z = proof.quotient_poly_opening_at_z;
transcript.commit_field_element("ient_at_z);
// first reconstruct storage of all the commitments
let mut setup_commitments_storage = HashMap::new();
let mut gate_selectors_commitments_storage = HashMap::new();
{
let mut gate_setup_polys_commitments_iter = vk.gate_setup_commitments.iter();
if sorted_gates.len() == 1 {
// there is no selector
let gate = sorted_gates.last().unwrap();
let setup_polys = gate.setup_polynomials();
for &id in setup_polys.into_iter() {
let commitment = *gate_setup_polys_commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
setup_commitments_storage.insert(id, commitment);
}
} else {
let mut gate_selectors_polys_commitments_iter = vk.gate_selectors_commitments.iter();
for gate in sorted_gates.iter() {
let key = PolyIdentifier::GateSelector(gate.name());
let commitment = *gate_selectors_polys_commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
gate_selectors_commitments_storage.insert(key, commitment);
let setup_polys = gate.setup_polynomials();
for &id in setup_polys.into_iter() {
let commitment = *gate_setup_polys_commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
setup_commitments_storage.insert(id, commitment);
}
}
safe_assert(gate_selectors_polys_commitments_iter.next().is_none())?;
}
safe_assert(gate_setup_polys_commitments_iter.next().is_none())?;
}
let queries_with_linearization = sort_queries_for_linearization(&sorted_gates, MAX_DILATION);
let mut query_values_map = std::collections::HashMap::new();
let mut state_polys_openings_at_z_iter = proof.state_polys_openings_at_z.iter();
let mut state_polys_openings_at_dilations_iter = proof.state_polys_openings_at_dilations.iter();
let mut all_values_queried_at_z = vec![];
let mut all_values_queried_at_z_omega = vec![];
let mut all_commitments_queried_at_z = vec![];
let mut all_commitments_queried_at_z_omega = vec![];
for (dilation_value, ids) in queries_with_linearization.state_polys.iter().enumerate() {
safe_assert(dilation_value <= MAX_DILATION)?;
for id in ids.into_iter() {
let poly_idx = if let PolyIdentifier::VariablesPolynomial(idx) = id {
idx
} else {
unreachable!();
};
let commitment = *proof.state_polys_commitments.get(*poly_idx).ok_or(SynthesisError::AssignmentMissing)?;
let value = if dilation_value == 0 {
let value = *state_polys_openings_at_z_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
all_values_queried_at_z.push(value);
all_commitments_queried_at_z.push(commitment);
value
} else {
let (dilation, state_poly_idx, value) = *state_polys_openings_at_dilations_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
safe_assert_eq(dilation, dilation_value)?;
safe_assert_eq(*poly_idx, state_poly_idx)?;
safe_assert(state_poly_idx < vk.state_width)?;
all_values_queried_at_z_omega.push(value);
all_commitments_queried_at_z_omega.push(commitment);
value
};
transcript.commit_field_element(&value);
let key = PolynomialInConstraint::from_id_and_dilation(*id, dilation_value);
query_values_map.insert(key, value);
}
}
safe_assert(state_polys_openings_at_z_iter.next().is_none())?;
safe_assert(state_polys_openings_at_dilations_iter.next().is_none())?;
let mut witness_polys_openings_at_z_iter = proof.witness_polys_openings_at_z.iter();
let mut witness_polys_openings_at_dilations_iter = proof.witness_polys_openings_at_dilations.iter();
for (dilation_value, ids) in queries_with_linearization.witness_polys.iter().enumerate() {
safe_assert(dilation_value <= MAX_DILATION)?;
for id in ids.into_iter() {
let poly_idx = if let PolyIdentifier::WitnessPolynomial(idx) = id {
idx
} else {
unreachable!();
};
let commitment = *proof.witness_polys_commitments.get(*poly_idx).ok_or(SynthesisError::AssignmentMissing)?;
let value = if dilation_value == 0 {
let value = *witness_polys_openings_at_z_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
all_values_queried_at_z.push(value);
all_commitments_queried_at_z.push(commitment);
value
} else {
let (dilation, witness_poly_idx, value) = *witness_polys_openings_at_dilations_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
safe_assert_eq(dilation, dilation_value)?;
safe_assert_eq(*poly_idx, witness_poly_idx)?;
safe_assert(witness_poly_idx < vk.num_witness_polys)?;
all_values_queried_at_z_omega.push(value);
all_commitments_queried_at_z_omega.push(commitment);
value
};
transcript.commit_field_element(&value);
let key = PolynomialInConstraint::from_id_and_dilation(*id, dilation_value);
query_values_map.insert(key, value);
}
}
safe_assert(witness_polys_openings_at_z_iter.next().is_none())?;
safe_assert(witness_polys_openings_at_dilations_iter.next().is_none())?;
let mut gate_setup_openings_at_z_iter = proof.gate_setup_openings_at_z.iter();
for (gate_idx, queries) in queries_with_linearization.gate_setup_polys.iter().enumerate() {
for (dilation_value, ids) in queries.iter().enumerate() {
safe_assert(dilation_value <= MAX_DILATION)?;
for id in ids.into_iter() {
let poly_idx = if let PolyIdentifier::GateSetupPolynomial(_, idx) = id {
idx
} else {
unreachable!();
};
let commitment = *setup_commitments_storage.get(&id).ok_or(SynthesisError::AssignmentMissing)?;
let value = if dilation_value == 0 {
let (gate_index, setup_poly_index, value) = *gate_setup_openings_at_z_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
safe_assert_eq(gate_idx, gate_index)?;
safe_assert_eq(*poly_idx, setup_poly_index)?;
all_values_queried_at_z.push(value);
all_commitments_queried_at_z.push(commitment);
value
} else {
unimplemented!("gate setup polynomials can not be time dilated");
};
transcript.commit_field_element(&value);
let key = PolynomialInConstraint::from_id_and_dilation(*id, dilation_value);
query_values_map.insert(key, value);
}
}
}
safe_assert(gate_setup_openings_at_z_iter.next().is_none())?;
// also open gate selectors
let mut selector_values_iter = proof.gate_selectors_openings_at_z.iter();
let mut selector_values = vec![];
for s in queries_with_linearization.gate_selectors.iter() {
let gate_index = sorted_gates.iter().position(|r| r == s).ok_or(SynthesisError::AssignmentMissing)?;
let (gate_idx, value) = *selector_values_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
safe_assert_eq(gate_index, gate_idx)?;
transcript.commit_field_element(&value);
let key = PolyIdentifier::GateSelector(s.name());
// let commitment = *selector_commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
let commitment = *gate_selectors_commitments_storage.get(&key).ok_or(SynthesisError::AssignmentMissing)?;
selector_values.push(value);
all_values_queried_at_z.push(value);
all_commitments_queried_at_z.push(commitment);
}
safe_assert(selector_values_iter.next().is_none())?;
// copy-permutation polynomials queries
let mut copy_permutation_polys_openings_at_z_iter = proof.copy_permutation_polys_openings_at_z.iter();
let mut copy_permutation_polys_commitments_iter = vk.permutation_commitments.iter();
let mut copy_permutation_queries = vec![];
for _ in 0..(vk.state_width-1) {
let value = *copy_permutation_polys_openings_at_z_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
transcript.commit_field_element(&value);
copy_permutation_queries.push(value);
all_values_queried_at_z.push(value);
let commitment = *copy_permutation_polys_commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?;
all_commitments_queried_at_z.push(commitment);
}
safe_assert(copy_permutation_polys_openings_at_z_iter.next().is_none())?;
// copy-permutation grand product query
let mut z_omega = z;
z_omega.mul_assign(&domain.generator);
// for polys below we will insert queried commitments manually into the corresponding lists
let copy_permutation_z_at_z_omega = proof.copy_permutation_grand_product_opening_at_z_omega;
transcript.commit_field_element(©_permutation_z_at_z_omega);
if vk.total_lookup_entries_length > 0 {
// first commit values at z, and then at z*omega
transcript.commit_field_element(proof.lookup_t_poly_opening_at_z.as_ref().ok_or(SynthesisError::AssignmentMissing)?);
transcript.commit_field_element(proof.lookup_selector_poly_opening_at_z.as_ref().ok_or(SynthesisError::AssignmentMissing)?);
transcript.commit_field_element(proof.lookup_table_type_poly_opening_at_z.as_ref().ok_or(SynthesisError::AssignmentMissing)?);
// now at z*omega
transcript.commit_field_element(proof.lookup_s_poly_opening_at_z_omega.as_ref().ok_or(SynthesisError::AssignmentMissing)?);
transcript.commit_field_element(proof.lookup_grand_product_opening_at_z_omega.as_ref().ok_or(SynthesisError::AssignmentMissing)?);
transcript.commit_field_element(proof.lookup_t_poly_opening_at_z_omega.as_ref().ok_or(SynthesisError::AssignmentMissing)?);
}
let linearization_at_z = proof.linearization_poly_opening_at_z;
transcript.commit_field_element(&linearization_at_z);
// linearization is done, now perform sanity check
// this is effectively a verification procedure
let mut lookup_query = None;
{
let vanishing_at_z = evaluate_vanishing_for_size(&z, required_domain_size as u64);
// first let's aggregate gates
let mut t_num_on_full_domain = E::Fr::zero();
let challenges_slice = &powers_of_alpha_for_gates[..];
let mut all_gates = sorted_gates.clone();
// we've suffered and linearization polynomial captures all the gates except the public input!
{
let mut tmp = linearization_at_z;
// add input values
let gate = all_gates.drain(0..1).into_iter().next().ok_or(SynthesisError::AssignmentMissing)?;
safe_assert(gate.benefits_from_linearization())?;
safe_assert(C::MainGate::default().into_internal() == gate)?;
let gate = C::MainGate::default();
let num_challenges = gate.num_quotient_terms();
let (for_gate, _) = challenges_slice.split_at(num_challenges);
let input_values = proof.inputs.clone();
let mut inputs_term = gate.add_inputs_into_quotient(
required_domain_size,
&input_values,
z,
for_gate,
)?;
if num_different_gates > 1 {
let selector_value = selector_values[0];
inputs_term.mul_assign(&selector_value);
}
tmp.add_assign(&inputs_term);
t_num_on_full_domain.add_assign(&tmp);
}
// now aggregate leftovers from grand product for copy permutation
{
// - alpha_0 * (a + perm(z) * beta + gamma)*()*(d + gamma) * z(z*omega)
let [alpha_0, alpha_1] = copy_grand_product_alphas.expect("there must be powers of alpha for copy permutation");
let mut factor = alpha_0;
factor.mul_assign(©_permutation_z_at_z_omega);
for idx in 0..(vk.state_width-1) {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let wire_value = query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
let permutation_at_z = copy_permutation_queries[idx];
let mut t = permutation_at_z;
t.mul_assign(&beta_for_copy_permutation);
t.add_assign(&wire_value);
t.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&t);
}
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(vk.state_width-1));
let mut tmp = *query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
tmp.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&tmp);
t_num_on_full_domain.sub_assign(&factor);
// - L_0(z) * alpha_1
let mut l_0_at_z = evaluate_l0_at_point(required_domain_size as u64, z)?;
l_0_at_z.mul_assign(&alpha_1);
t_num_on_full_domain.sub_assign(&l_0_at_z);
}
// and if exists - grand product for lookup permutation
{
if vk.total_lookup_entries_length > 0 {
let [alpha_0, alpha_1, alpha_2] = lookup_grand_product_alphas.expect("there must be powers of alpha for lookup permutation");
let lookup_queries = LookupQuery::<E> {
s_at_z_omega: proof.lookup_s_poly_opening_at_z_omega.ok_or(SynthesisError::AssignmentMissing)?,
grand_product_at_z_omega: proof.lookup_grand_product_opening_at_z_omega.ok_or(SynthesisError::AssignmentMissing)?,
t_at_z: proof.lookup_t_poly_opening_at_z.ok_or(SynthesisError::AssignmentMissing)?,
t_at_z_omega: proof.lookup_t_poly_opening_at_z_omega.ok_or(SynthesisError::AssignmentMissing)?,
selector_at_z: proof.lookup_selector_poly_opening_at_z.ok_or(SynthesisError::AssignmentMissing)?,
table_type_at_z: proof.lookup_table_type_poly_opening_at_z.ok_or(SynthesisError::AssignmentMissing)?,
};
let beta_for_lookup_permutation = beta_for_lookup.ok_or(SynthesisError::AssignmentMissing)?;
let gamma_for_lookup_permutation = gamma_for_lookup.ok_or(SynthesisError::AssignmentMissing)?;
let mut beta_plus_one = beta_for_lookup_permutation;
beta_plus_one.add_assign(&E::Fr::one());
let mut gamma_beta = gamma_for_lookup_permutation;
gamma_beta.mul_assign(&beta_plus_one);
let expected = gamma_beta.pow([(required_domain_size-1) as u64]);
// in a linearization we've taken terms:
// - s(x) from the alpha_0 * Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
// - and Z(x) from - alpha_0 * Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) (term in full) +
// + alpha_1 * (Z(x) - 1) * L_{0}(z) + alpha_2 * (Z(x) - expected) * L_{n-1}(z)
// first make alpha_0 * Z(x*omega)*(\gamma*(1 + \beta) + \beta * s(x*omega)))
let mut tmp = lookup_queries.s_at_z_omega;
tmp.mul_assign(&beta_for_lookup_permutation);
tmp.add_assign(&gamma_beta);
tmp.mul_assign(&lookup_queries.grand_product_at_z_omega);
tmp.mul_assign(&alpha_0);
// (z - omega^{n-1}) for this part
let last_omega = domain.generator.pow(&[(required_domain_size - 1) as u64]);
let mut z_minus_last_omega = z;
z_minus_last_omega.sub_assign(&last_omega);
tmp.mul_assign(&z_minus_last_omega);
t_num_on_full_domain.add_assign(&tmp);
// // - alpha_1 * L_{0}(z)
let mut l_0_at_z = evaluate_l0_at_point(required_domain_size as u64, z)?;
l_0_at_z.mul_assign(&alpha_1);
t_num_on_full_domain.sub_assign(&l_0_at_z);
// // - alpha_2 * expected L_{n-1}(z)
let mut l_n_minus_one_at_z = evaluate_lagrange_poly_at_point(required_domain_size - 1, &domain, z)?;
l_n_minus_one_at_z.mul_assign(&expected);
l_n_minus_one_at_z.mul_assign(&alpha_2);
t_num_on_full_domain.sub_assign(&l_n_minus_one_at_z);
lookup_query = Some(lookup_queries);
}
}
let mut lhs = quotient_at_z;
lhs.mul_assign(&vanishing_at_z);
let rhs = t_num_on_full_domain;
if lhs != rhs {
return Ok(((E::G1Affine::zero(), E::G1Affine::zero()), false));
}
}
// now we need to reconstruct the effective linearization poly with homomorphic properties
let linearization_commitment = {
let mut challenges_slice = &powers_of_alpha_for_gates[..];
let mut all_gates = sorted_gates.clone();
let gate = all_gates.drain(0..1).into_iter().next().ok_or(SynthesisError::AssignmentMissing)?;
safe_assert(gate.benefits_from_linearization())?;
safe_assert(C::MainGate::default().into_internal() == gate)?;
let gate = C::MainGate::default();
let num_challenges = gate.num_quotient_terms();
let (for_gate, rest) = challenges_slice.split_at(num_challenges);
challenges_slice = rest;
let input_values = proof.inputs.clone();
let mut r = gate.contribute_into_linearization_commitment_for_public_inputs(
required_domain_size,
&input_values,
z,
&query_values_map,
&setup_commitments_storage,
for_gate,
)?;
let mut selectors_it = selector_values.clone().into_iter();
if num_different_gates > 1 {
// first multiply r by the selector value at z
r.mul_assign(selectors_it.next().ok_or(SynthesisError::AssignmentMissing)?.into_repr());
}
// now proceed per gate
for gate in all_gates.into_iter() {
let num_challenges = gate.num_quotient_terms();
let (for_gate, rest) = challenges_slice.split_at(num_challenges);
challenges_slice = rest;
if gate.benefits_from_linearization() {
// gate benefits from linearization, so make temporary value
let tmp = gate.contribute_into_linearization_commitment(
required_domain_size,
z,
&query_values_map,
&setup_commitments_storage,
for_gate,
)?;
let selector_value = selectors_it.next().ok_or(SynthesisError::AssignmentMissing)?;
let mut scaled = tmp;
scaled.mul_assign(selector_value.into_repr());
r.add_assign(&scaled);
} else {
// we linearize over the selector, so take a selector and scale it
let gate_value_at_z = gate.contribute_into_verification_equation(
required_domain_size,
z,
&query_values_map,
for_gate
)?;
let key = PolyIdentifier::GateSelector(gate.name());
let gate_selector = gate_selectors_commitments_storage.get(&key).ok_or(SynthesisError::AssignmentMissing)?;
let scaled = gate_selector.mul(gate_value_at_z.into_repr());
r.add_assign(&scaled);
}
}
safe_assert(selectors_it.next().is_none())?;
safe_assert_eq(challenges_slice.len(), 0)?;
// add contributions from copy-permutation and lookup-permutation
// copy-permutation linearization comtribution
{
// + (a(z) + beta*z + gamma)*()*()*()*Z(x)
let [alpha_0, alpha_1] = copy_grand_product_alphas.expect("there must be powers of alpha for copy permutation");
let some_one = Some(E::Fr::one());
let mut non_residues_iterator = some_one.iter().chain(&vk.non_residues);
let mut factor = alpha_0;
for idx in 0..vk.state_width {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let wire_value = query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
let mut t = z;
let non_res = non_residues_iterator.next().ok_or(SynthesisError::AssignmentMissing)?;
t.mul_assign(&non_res);
t.mul_assign(&beta_for_copy_permutation);
t.add_assign(&wire_value);
t.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&t);
}
safe_assert(non_residues_iterator.next().is_none())?;
let scaled = proof.copy_permutation_grand_product_commitment.mul(factor.into_repr());
r.add_assign(&scaled);
// - (a(z) + beta*perm_a + gamma)*()*()*z(z*omega) * beta * perm_d(X)
let mut factor = alpha_0;
factor.mul_assign(&beta_for_copy_permutation);
factor.mul_assign(©_permutation_z_at_z_omega);
for idx in 0..(vk.state_width-1) {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let wire_value = query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
let permutation_at_z = copy_permutation_queries[idx];
let mut t = permutation_at_z;
t.mul_assign(&beta_for_copy_permutation);
t.add_assign(&wire_value);
t.add_assign(&gamma_for_copy_permutation);
factor.mul_assign(&t);
}
let scaled = vk.permutation_commitments.get(vk.state_width - 1).ok_or(SynthesisError::AssignmentMissing)?.mul(factor.into_repr());
r.sub_assign(&scaled);
// + L_0(z) * Z(x)
let mut factor = evaluate_l0_at_point(required_domain_size as u64, z)?;
factor.mul_assign(&alpha_1);
let scaled = proof.copy_permutation_grand_product_commitment.mul(factor.into_repr());
r.add_assign(&scaled);
}
// lookup grand product linearization
// due to separate divisor it's not obvious if this is beneficial without some tricks
// like multiplication by (1 - L_{n-1}) or by (x - omega^{n-1})
// Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) -
// Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) == 0
// check that (Z(x) - 1) * L_{0} == 0
// check that (Z(x) - expected) * L_{n-1} == 0, or (Z(x*omega) - expected)* L_{n-2} == 0
// f(x) does not need to be opened as it's made of table selector and witnesses
// if we pursue the strategy from the linearization of a copy-permutation argument
// then we leave something like s(x) from the Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) term,
// and Z(x) from Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)) term,
// with terms with lagrange polys as multipliers left intact
if vk.total_lookup_entries_length > 0 {
let [alpha_0, alpha_1, alpha_2] = lookup_grand_product_alphas.expect("there must be powers of alpha for lookup permutation");
let lookup_queries = lookup_query.expect("lookup data must be constructed");
// let s_at_z_omega = lookup_queries.s_at_z_omega;
let grand_product_at_z_omega = lookup_queries.grand_product_at_z_omega;
let t_at_z = lookup_queries.t_at_z;
let t_at_z_omega = lookup_queries.t_at_z_omega;
let selector_at_z = lookup_queries.selector_at_z;
let table_type_at_z = lookup_queries.table_type_at_z;
let l_0_at_z = evaluate_lagrange_poly_at_point(0, &domain, z)?;
let l_n_minus_one_at_z = evaluate_lagrange_poly_at_point(required_domain_size - 1, &domain, z)?;
let beta_for_lookup_permutation = beta_for_lookup.ok_or(SynthesisError::AssignmentMissing)?;
let gamma_for_lookup_permutation = gamma_for_lookup.ok_or(SynthesisError::AssignmentMissing)?;
let mut beta_plus_one = beta_for_lookup_permutation;
beta_plus_one.add_assign(&E::Fr::one());
let mut gamma_beta = gamma_for_lookup_permutation;
gamma_beta.mul_assign(&beta_plus_one);
// (Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega))) -
// Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega)))*(X - omega^{n-1})
let last_omega = domain.generator.pow(&[(required_domain_size - 1) as u64]);
let mut z_minus_last_omega = z;
z_minus_last_omega.sub_assign(&last_omega);
// s(x) from the Z(x*omega)*(\gamma*(1 + \beta) + s(x) + \beta * s(x*omega)))
let mut factor = grand_product_at_z_omega; // we do not need to account for additive terms
factor.mul_assign(&alpha_0);
factor.mul_assign(&z_minus_last_omega);
let scaled = proof.lookup_s_poly_commitment.ok_or(SynthesisError::AssignmentMissing)?.mul(factor.into_repr());
r.add_assign(&scaled);
// Z(x) from - alpha_0 * Z(x) * (\beta + 1) * (\gamma + f(x)) * (\gamma(1 + \beta) + t(x) + \beta * t(x*omega))
// + alpha_1 * Z(x) * L_{0}(z) + alpha_2 * Z(x) * L_{n-1}(z)
// accumulate coefficient
let mut factor = t_at_z_omega;
factor.mul_assign(&beta_for_lookup_permutation);
factor.add_assign(&t_at_z);
factor.add_assign(&gamma_beta);
// (\gamma + f(x))
let mut f_reconstructed = E::Fr::zero();
let mut current = E::Fr::one();
let eta = eta;
// a,b,c
safe_assert_eq(vk.state_width, 4)?;
for idx in 0..(vk.state_width-1) {
let key = PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(idx));
let mut value = *query_values_map.get(&key)
.ok_or(SynthesisError::AssignmentMissing)?;
value.mul_assign(¤t);
f_reconstructed.add_assign(&value);
current.mul_assign(&eta);
}
// and table type
let mut t = table_type_at_z;
t.mul_assign(¤t);
f_reconstructed.add_assign(&t);
f_reconstructed.mul_assign(&selector_at_z);
f_reconstructed.add_assign(&gamma_for_lookup_permutation);
// end of (\gamma + f(x)) part
factor.mul_assign(&f_reconstructed);
factor.mul_assign(&beta_plus_one);
factor.negate(); // don't forget minus sign
factor.mul_assign(&alpha_0);
// Multiply by (z - omega^{n-1})
factor.mul_assign(&z_minus_last_omega);
// L_{0}(z) in front of Z(x)
let mut tmp = l_0_at_z;
tmp.mul_assign(&alpha_1);
factor.add_assign(&tmp);
// L_{n-1}(z) in front of Z(x)
let mut tmp = l_n_minus_one_at_z;
tmp.mul_assign(&alpha_2);
factor.add_assign(&tmp);
let scaled = proof.lookup_grand_product_commitment.ok_or(SynthesisError::AssignmentMissing)?.mul(factor.into_repr());
r.add_assign(&scaled);
}
r.into_affine()
};
let v = transcript.get_challenge();
// commit proofs
commit_point_as_xy::<E, T>(&mut transcript, &proof.opening_proof_at_z);
commit_point_as_xy::<E, T>(&mut transcript, &proof.opening_proof_at_z_omega);
let u = transcript.get_challenge();
// first perform naive verification at z
// f(x) - f(z) = q(x)(x - z) =>
// e(f(x) - f(z)*g + z*q(x), h)*e(-q(x), h^x) == 1
// when we aggregate we need to aggregate f(x) part (commitments) and f(z) part (values)
let mut values_queried_at_z = vec![quotient_at_z];
values_queried_at_z.push(linearization_at_z);
values_queried_at_z.extend(all_values_queried_at_z);
let quotient_commitment_aggregated = {
let mut quotient_commitments_iter = proof.quotient_poly_parts_commitments.iter();
let mut result = quotient_commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?.into_projective();
let mut current = z_in_domain_size;
for part in quotient_commitments_iter {
let tmp = *part;
let tmp = tmp.mul(current.into_repr());
result.add_assign(&tmp);
current.mul_assign(&z_in_domain_size);
}
result.into_affine()
};
let mut commitments_queried_at_z = vec![];
commitments_queried_at_z.push(quotient_commitment_aggregated);
commitments_queried_at_z.push(linearization_commitment);
commitments_queried_at_z.extend(all_commitments_queried_at_z);
let mut reconstructed_lookup_t_poly_commitment = None;
if vk.total_lookup_entries_length > 0 {
// we need to add t(x), selector(x) and table type(x)
values_queried_at_z.push(proof.lookup_t_poly_opening_at_z.ok_or(SynthesisError::AssignmentMissing)?);
values_queried_at_z.push(proof.lookup_selector_poly_opening_at_z.ok_or(SynthesisError::AssignmentMissing)?);
values_queried_at_z.push(proof.lookup_table_type_poly_opening_at_z.ok_or(SynthesisError::AssignmentMissing)?);
// use eta to reconstruct t poly aggregated commitment
let lookup_t_poly_commitment_aggregated = {
let mut commitments_iter = vk.lookup_tables_commitments.iter();
let mut result = commitments_iter.next().ok_or(SynthesisError::AssignmentMissing)?.into_projective();
let mut current = eta;
for part in commitments_iter {
let tmp = *part;
let tmp = tmp.mul(current.into_repr());
result.add_assign(&tmp);
current.mul_assign(&eta);
}
result.into_affine()
};
reconstructed_lookup_t_poly_commitment = Some(lookup_t_poly_commitment_aggregated);
commitments_queried_at_z.push(lookup_t_poly_commitment_aggregated);
commitments_queried_at_z.push(vk.lookup_selector_commitment.ok_or(SynthesisError::AssignmentMissing)?);
commitments_queried_at_z.push(vk.lookup_table_type_commitment.ok_or(SynthesisError::AssignmentMissing)?);
}
let mut values_queried_at_z_omega = vec![copy_permutation_z_at_z_omega];
values_queried_at_z_omega.extend(all_values_queried_at_z_omega);
let mut commitments_queried_at_z_omega = vec![proof.copy_permutation_grand_product_commitment];
commitments_queried_at_z_omega.extend(all_commitments_queried_at_z_omega);
if vk.total_lookup_entries_length > 0 {
// we need to add s(x), grand_product(x) and t(x)
values_queried_at_z_omega.push(proof.lookup_s_poly_opening_at_z_omega.ok_or(SynthesisError::AssignmentMissing)?);
values_queried_at_z_omega.push(proof.lookup_grand_product_opening_at_z_omega.ok_or(SynthesisError::AssignmentMissing)?);
values_queried_at_z_omega.push(proof.lookup_t_poly_opening_at_z_omega.ok_or(SynthesisError::AssignmentMissing)?);
commitments_queried_at_z_omega.push(proof.lookup_s_poly_commitment.ok_or(SynthesisError::AssignmentMissing)?);
commitments_queried_at_z_omega.push(proof.lookup_grand_product_commitment.ok_or(SynthesisError::AssignmentMissing)?);
commitments_queried_at_z_omega.push(reconstructed_lookup_t_poly_commitment.expect("t poly for lookup must be reconstructed"));
}
safe_assert_eq(commitments_queried_at_z.len(), values_queried_at_z.len())?;
safe_assert_eq(commitments_queried_at_z_omega.len(), values_queried_at_z_omega.len())?;
let mut aggregated_commitment_at_z = commitments_queried_at_z.drain(0..1).next().ok_or(SynthesisError::AssignmentMissing)?.into_projective();
let mut aggregated_opening_at_z = values_queried_at_z.drain(0..1).next().ok_or(SynthesisError::AssignmentMissing)?;
let mut aggregation_challenge = E::Fr::one();
for (commitment, value) in commitments_queried_at_z.into_iter().zip(values_queried_at_z.into_iter()) {
aggregation_challenge.mul_assign(&v);
let scaled = commitment.mul(aggregation_challenge.into_repr());
aggregated_commitment_at_z.add_assign(&scaled);
// dbg!(aggregated_commitment_at_z.into_affine());
let mut tmp = value;
tmp.mul_assign(&aggregation_challenge);
aggregated_opening_at_z.add_assign(&tmp);
}
aggregation_challenge.mul_assign(&v);
let mut aggregated_commitment_at_z_omega = commitments_queried_at_z_omega.drain(0..1).next().ok_or(SynthesisError::AssignmentMissing)?.mul(aggregation_challenge.into_repr());
let mut aggregated_opening_at_z_omega = values_queried_at_z_omega.drain(0..1).next().ok_or(SynthesisError::AssignmentMissing)?;
aggregated_opening_at_z_omega.mul_assign(&aggregation_challenge);
for (commitment, value) in commitments_queried_at_z_omega.into_iter().zip(values_queried_at_z_omega.into_iter()) {
aggregation_challenge.mul_assign(&v);
let scaled = commitment.mul(aggregation_challenge.into_repr());
aggregated_commitment_at_z_omega.add_assign(&scaled);
let mut tmp = value;
tmp.mul_assign(&aggregation_challenge);
aggregated_opening_at_z_omega.add_assign(&tmp);
}
// f(x)
let mut pair_with_generator = aggregated_commitment_at_z;
aggregated_commitment_at_z_omega.mul_assign(u.into_repr());
pair_with_generator.add_assign(&aggregated_commitment_at_z_omega);
// - f(z)*g
let mut aggregated_value = aggregated_opening_at_z_omega;
aggregated_value.mul_assign(&u);
aggregated_value.add_assign(&aggregated_opening_at_z);
let tmp = E::G1Affine::one().mul(aggregated_value.into_repr());
pair_with_generator.sub_assign(&tmp);
// +z * q(x)
let mut tmp = proof.opening_proof_at_z.mul(z.into_repr());
let mut t0 = z_omega;
t0.mul_assign(&u);
let t1 = proof.opening_proof_at_z_omega.mul(t0.into_repr());
tmp.add_assign(&t1);
pair_with_generator.add_assign(&tmp);
// rhs
let mut pair_with_x = proof.opening_proof_at_z_omega.mul(u.into_repr());
pair_with_x.add_assign_mixed(&proof.opening_proof_at_z);
let mut pair_with_x = pair_with_x.into_affine();
pair_with_x.negate();
let pair_with_generator = pair_with_generator.into_affine();
Ok(((pair_with_generator, pair_with_x), true))
}
<file_sep>/src/sonic/unhelped/wellformed_argument.rs
/// Wellformedness argument allows to verify that some committment was to multivariate polynomial of degree n,
/// with no constant term and negative powers
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
#[derive(Clone)]
pub struct WellformednessArgument<E: Engine> {
polynomials: Vec<Vec<E::Fr>>
}
#[derive(Clone)]
pub struct WellformednessProof<E: Engine> {
pub l: E::G1Affine,
pub r: E::G1Affine
}
#[derive(Clone)]
pub struct WellformednessSignature<E: Engine> {
pub proof: WellformednessProof<E>
}
impl<E: Engine> WellformednessArgument<E> {
pub fn create_signature(
all_polys: Vec<Vec<E::Fr>>,
wellformed_challenges: Vec<E::Fr>,
srs: &SRS<E>
) -> WellformednessSignature<E> {
let wellformed_argument = WellformednessArgument::new(all_polys);
let proof = wellformed_argument.make_argument(wellformed_challenges, &srs);
WellformednessSignature {
proof
}
}
pub fn new(polynomials: Vec<Vec<E::Fr>>) -> Self {
assert!(polynomials.len() > 0);
let length = polynomials[0].len();
for p in polynomials.iter() {
assert!(p.len() == length);
}
WellformednessArgument {
polynomials: polynomials
}
}
pub fn commit(&self, srs: &SRS<E>) -> Vec<E::G1Affine> {
let mut results = vec![];
let n = self.polynomials[0].len();
for p in self.polynomials.iter() {
let c = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
p.iter()
).into_affine();
results.push(c);
}
results
}
pub fn make_argument(self, challenges: Vec<E::Fr>, srs: &SRS<E>) -> WellformednessProof<E> {
assert_eq!(challenges.len(), self.polynomials.len());
let mut polynomials = self.polynomials;
let mut challenges = challenges;
let mut p0 = polynomials.pop().unwrap();
let r0 = challenges.pop().unwrap();
let n = p0.len();
mul_polynomial_by_scalar(&mut p0[..], r0);
let m = polynomials.len();
for _ in 0..m {
let p = polynomials.pop().unwrap();
let r = challenges.pop().unwrap();
mul_add_polynomials(&mut p0[..], & p[..], r);
}
let d = srs.d;
// TODO: it's not necessary to have n < d, fix later
assert!(n < d);
// here the multiplier is x^-d, so largest negative power is -(d - 1), smallest negative power is - (d - n)
// H^{x^k} are labeled from 0 power, so we need to use proper indexes
let l = multiexp(
srs.g_negative_x[(d - n)..=(d - 1)].iter().rev(),
p0.iter()
).into_affine();
// here the multiplier is x^d-n, so largest positive power is d, smallest positive power is d - n + 1
let r = multiexp(
srs.g_positive_x[(d - n + 1)..=d].iter(),
p0.iter()
).into_affine();
WellformednessProof {
l: l,
r: r
}
}
pub fn verify(n: usize, challenges: &Vec<E::Fr>, commitments: &Vec<E::G1Affine>, proof: &WellformednessProof<E>, srs: &SRS<E>) -> bool {
let d = srs.d;
let alpha_x_d_precomp = srs.h_positive_x_alpha[d].prepare();
// TODO: not strictly required
assert!(n < d);
let d_minus_n = d - n;
let alpha_x_n_minus_d_precomp = srs.h_negative_x_alpha[d_minus_n].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let a = multiexp(
commitments.iter(),
challenges.iter(),
).into_affine();
let a = a.prepare();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&a, &h_prep),
(&proof.l.prepare(), &alpha_x_d_precomp)
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
let valid = E::final_exponentiation(&E::miller_loop(&[
(&a, &h_prep),
(&proof.r.prepare(), &alpha_x_n_minus_d_precomp)
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
true
}
}
#[test]
fn test_argument() {
use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
// let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let srs = SRS::<Bls12>::new(128, srs_x, srs_alpha);
let n: usize = 1 << 5;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let argument = WellformednessArgument::new(vec![coeffs]);
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let commitments = argument.commit(&srs);
let proof = argument.make_argument(challenges.clone(), &srs);
let valid = WellformednessArgument::verify(n, &challenges, &commitments, &proof, &srs);
assert!(valid);
}
#[test]
fn test_argument_soundness() {
use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let n: usize = 1 << 8;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let argument = WellformednessArgument::new(vec![coeffs]);
let commitments = argument.commit(&srs);
let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let argument = WellformednessArgument::new(vec![coeffs]);
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let proof = argument.make_argument(challenges.clone(), &srs);
let valid = WellformednessArgument::verify(n, &challenges, &commitments, &proof, &srs);
assert!(!valid);
}<file_sep>/src/plonk/fft/with_precomputation/mod.rs
use crate::ff::PrimeField;
pub trait FftPrecomputations<F: PrimeField>: Send + Sync {
fn new_for_domain_size(size: usize) -> Self;
fn element_for_index(&self, index: usize) -> &F;
fn domain_size(&self) -> usize;
}
pub(crate) mod fft;<file_sep>/src/plonk/better_better_cs/redshift/poseidon_tree_hash.rs
use crate::pairing::ff::{Field, PrimeField};
use poseidon_hash::{PoseidonEngine, PoseidonHashParams, poseidon_hash};
use super::tree_hash::BinaryTreeHasher;
pub struct PoseidonBinaryTreeHasher<'a, E: PoseidonEngine> {
params: &'a E::Params,
}
impl<'a, E: PoseidonEngine> PoseidonBinaryTreeHasher<'a, E> {
pub fn new(params: &'a E::Params) -> Self {
assert_eq!(params.rate(), 2u32);
assert_eq!(params.output_len(), 1u32);
Self {
params: params
}
}
}
impl<'a, E: PoseidonEngine> Clone for PoseidonBinaryTreeHasher<'a, E> {
fn clone(&self) -> Self {
Self {
params: self.params
}
}
}
use std::sync::atomic::{AtomicUsize, Ordering};
impl<'a, E: PoseidonEngine> BinaryTreeHasher<E::Fr> for PoseidonBinaryTreeHasher<'a, E> {
type Output = E::Fr;
#[inline]
fn placeholder_output() -> Self::Output {
E::Fr::zero()
}
fn leaf_hash(&self, input: &[E::Fr]) -> Self::Output {
let mut num_invocations = input.len() / 2;
if input.len() % 2 != 0 {
num_invocations += 1;
}
super::tree_hash::COUNTER.fetch_add(num_invocations, Ordering::SeqCst);
let mut as_vec = poseidon_hash::<E>(self.params, input);
as_vec.pop().unwrap()
}
fn node_hash(&self, input: &[Self::Output; 2], _level: usize) -> Self::Output {
super::tree_hash::COUNTER.fetch_add(2, Ordering::SeqCst);
let mut as_vec = poseidon_hash::<E>(self.params, &input[..]);
as_vec.pop().unwrap()
}
}<file_sep>/src/plonk/better_better_cs/data_structures.rs
use crate::pairing::ff::*;
use crate::pairing::*;
use crate::plonk::polynomials::*;
use super::cs::GateInternal;
#[derive(Copy, Clone, Debug, serde::Serialize, serde::Deserialize)]
pub enum PolyIdentifier {
VariablesPolynomial(usize),
WitnessPolynomial(usize),
GateSetupPolynomial(&'static str, usize),
GateSelector(&'static str),
LookupSelector,
LookupTableEntriesPolynomial(usize),
NamedSetupPolynomial(&'static str),
PermutationPolynomial(usize),
}
impl PartialEq for PolyIdentifier {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(PolyIdentifier::VariablesPolynomial(a), PolyIdentifier::VariablesPolynomial(b)) => a.eq(&b),
(PolyIdentifier::GateSetupPolynomial(a_id, a), PolyIdentifier::GateSetupPolynomial(b_id, b)) => {
if a.eq(&b) == true {
a == b
} else {
false
}
},
(PolyIdentifier::GateSelector(a_id), PolyIdentifier::GateSelector(b_id)) => {
*a_id == *b_id
},
(PolyIdentifier::LookupSelector, PolyIdentifier::LookupSelector) => true,
(PolyIdentifier::LookupTableEntriesPolynomial(a), PolyIdentifier::LookupTableEntriesPolynomial(b)) => a.eq(&b),
(PolyIdentifier::PermutationPolynomial(a), PolyIdentifier::PermutationPolynomial(b)) => a.eq(&b),
(PolyIdentifier::NamedSetupPolynomial(a_id), PolyIdentifier::NamedSetupPolynomial(b_id)) => {
*a_id == *b_id
},
(PolyIdentifier::WitnessPolynomial(a), PolyIdentifier::WitnessPolynomial(b)) => a.eq(&b),
_ => false
}
}
}
impl Eq for PolyIdentifier {}
impl std::hash::Hash for PolyIdentifier {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
a @ PolyIdentifier::VariablesPolynomial(id)
| a @ PolyIdentifier::WitnessPolynomial(id)
| a @ PolyIdentifier::PermutationPolynomial(id)
| a @ PolyIdentifier::LookupTableEntriesPolynomial(id) => {
std::mem::discriminant(a).hash(state);
state.write_usize(*id);
}
a @ PolyIdentifier::GateSetupPolynomial(str_id, id) => {
std::mem::discriminant(a).hash(state);
state.write(str_id.as_bytes());
state.write_usize(*id);
},
a @ PolyIdentifier::GateSelector(str_id)
| a @ PolyIdentifier::NamedSetupPolynomial(str_id) => {
std::mem::discriminant(a).hash(state);
state.write(str_id.as_bytes());
},
a @ PolyIdentifier::LookupSelector => {
std::mem::discriminant(a).hash(state);
}
}
}
}
pub const LOOKUP_TABLE_TYPE_POLYNOMIAL: &'static str = "LOOKUP_TABLE_TYPE_POLYNOMIAL";
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct TimeDilation(pub usize);
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(bound( deserialize = "'de: 'static"))]
pub struct PolynomialInConstraint(pub PolyIdentifier, pub TimeDilation);
impl PolynomialInConstraint{
pub const fn from_id(id: PolyIdentifier) -> Self {
Self(id, TimeDilation(0))
}
pub const fn from_id_and_dilation(id: PolyIdentifier, dilation: usize) -> Self {
Self(id, TimeDilation(dilation))
}
pub const fn into_id_and_raw_dilation(self) -> (PolyIdentifier, usize) {
(self.0, (self.1).0)
}
}
pub enum PolynomialProxy<'a, F: PrimeField, P: PolynomialForm> {
Borrowed(&'a Polynomial<F, P>),
Owned(Polynomial<F, P>),
}
impl<'a, F: PrimeField, P: PolynomialForm> PolynomialProxy<'a, F, P> {
pub fn from_owned(poly: Polynomial<F, P>) -> Self {
PolynomialProxy::Owned(poly)
}
pub fn from_borrowed(poly: &'a Polynomial<F, P>) -> Self {
PolynomialProxy::Borrowed(poly)
}
pub fn as_ref(&self) -> &Polynomial<F, P> {
match self {
PolynomialProxy::Borrowed(b) => {
&*b
},
PolynomialProxy::Owned(o) => {
&o
}
}
}
pub fn as_data_ref(&self) -> &[F] {
match self {
PolynomialProxy::Borrowed(b) => {
b.as_ref()
},
PolynomialProxy::Owned(o) => {
o.as_ref()
}
}
}
pub fn as_data_ref_mut(&mut self) -> &mut [F] {
match self {
PolynomialProxy::Borrowed(..) => {
unreachable!("Can not borrow mutable for non-owned proxy")
},
PolynomialProxy::Owned(o) => {
o.as_mut()
}
}
}
pub fn into_poly(self) -> Polynomial<F, P> {
match self {
PolynomialProxy::Borrowed(b) => {
b.clone()
},
PolynomialProxy::Owned(o) => {
o
}
}
}
pub fn clone_as_owned(&self) -> Self {
match self {
PolynomialProxy::Borrowed(ref b) => {
PolynomialProxy::Owned((*b).clone())
},
PolynomialProxy::Owned(o) => {
PolynomialProxy::Owned(o.clone())
}
}
}
}
pub fn clone_as_borrowed<'a, 'b: 'a, F: PrimeField, P: PolynomialForm>(
src: &'a PolynomialProxy<'b, F, P>
) -> PolynomialProxy<'a, F, P> {
match src {
PolynomialProxy::Borrowed(ref b) => {
PolynomialProxy::Borrowed(*b)
},
PolynomialProxy::Owned(ref o) => {
PolynomialProxy::Borrowed(o)
}
}
}
// impl<'a, F: PrimeField, P: PolynomialForm> Clone for PolynomialProxy<'a, F, P> {
// fn clone(&self) -> Self {
// match self {
// PolynomialProxy::Borrowed(ref b) => {
// PolynomialProxy::Borrowed(b)
// },
// PolynomialProxy::Owned(ref o) => {
// PolynomialProxy::Borrowed(o)
// }
// }
// }
// }
pub struct AssembledPolynomialStorage<'a, E: Engine> {
pub state_map: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Values>>,
pub witness_map: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Values>>,
pub setup_map: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Values>>,
pub scratch_space: std::collections::HashMap<PolynomialInConstraint, PolynomialProxy<'a, E::Fr, Values>>,
pub gate_selectors: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Values>>,
pub named_polys: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Values>>,
pub is_bitreversed: bool,
pub lde_factor: usize
}
pub struct AssembledPolynomialStorageForMonomialForms<'a, E: Engine> {
pub state_map: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Coefficients>>,
pub witness_map: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Coefficients>>,
pub setup_map: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Coefficients>>,
pub gate_selectors: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Coefficients>>,
pub named_polys: std::collections::HashMap<PolyIdentifier, PolynomialProxy<'a, E::Fr, Coefficients>>,
}
impl<'a, E: Engine> AssembledPolynomialStorage<'a, E> {
pub fn get_poly(&self, id: PolyIdentifier) -> &Polynomial<E::Fr, Values> {
match id {
p @ PolyIdentifier::VariablesPolynomial(..) => {
self.state_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::WitnessPolynomial(..) => {
self.witness_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::GateSetupPolynomial(..) => {
self.setup_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::GateSelector(..) => {
self.gate_selectors.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::PermutationPolynomial(..) => {
self.setup_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::LookupSelector => {
self.named_polys.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::LookupTableEntriesPolynomial(..) => {
self.named_polys.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::NamedSetupPolynomial(..) => {
self.named_polys.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
_ => {
unreachable!()
}
}
}
pub fn get_poly_at_step(&self, id: PolyIdentifier, step: usize) -> E::Fr {
assert!(self.is_bitreversed == false);
assert!(self.lde_factor == 1);
let p = self.get_poly(id);
p.as_ref()[step]
}
pub fn get_selector_for_gate(&self, gate: &dyn GateInternal<E>) -> &Polynomial<E::Fr, Values> {
let gate_name = gate.name();
let p = PolyIdentifier::GateSelector(gate_name);
self.gate_selectors.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
}
pub fn new(bitreversed: bool, lde_factor: usize) -> Self {
Self {
state_map: std::collections::HashMap::new(),
witness_map: std::collections::HashMap::new(),
setup_map: std::collections::HashMap::new(),
gate_selectors: std::collections::HashMap::new(),
scratch_space: std::collections::HashMap::new(),
named_polys: std::collections::HashMap::new(),
is_bitreversed: bitreversed,
lde_factor
}
}
pub fn add_setup_polys<'b: 'a>(&mut self, ids: &[PolyIdentifier], polys: &'b [Polynomial<E::Fr, Values>]) {
assert_eq!(ids.len(), polys.len());
for (id, poly) in ids.iter().zip(polys.iter()) {
let proxy = PolynomialProxy::from_borrowed(poly);
match id {
p @ PolyIdentifier::GateSetupPolynomial(..) => {
self.setup_map.insert(p.clone(), proxy);
},
p @ PolyIdentifier::GateSelector(..) => {
self.setup_map.insert(p.clone(), proxy);
},
p @ PolyIdentifier::PermutationPolynomial(..) => {
self.setup_map.insert(p.clone(), proxy);
},
p @ PolyIdentifier::LookupSelector => {
self.named_polys.insert(p.clone(), proxy);
},
p @ PolyIdentifier::LookupTableEntriesPolynomial(..) => {
self.named_polys.insert(p.clone(), proxy);
},
p @ PolyIdentifier::NamedSetupPolynomial(..) => {
self.named_polys.insert(p.clone(), proxy);
},
_ => {
unreachable!()
}
}
}
}
}
impl<'a, E: Engine> AssembledPolynomialStorageForMonomialForms<'a, E> {
pub fn get_poly(&self, id: PolyIdentifier) -> &Polynomial<E::Fr, Coefficients> {
match id {
p @ PolyIdentifier::VariablesPolynomial(..) => {
self.state_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::WitnessPolynomial(..) => {
self.witness_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::GateSetupPolynomial(..) => {
self.setup_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::GateSelector(..) => {
self.gate_selectors.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::PermutationPolynomial(..) => {
self.setup_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::LookupSelector => {
self.named_polys.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::LookupTableEntriesPolynomial(..) => {
self.named_polys.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
p @ PolyIdentifier::NamedSetupPolynomial(..) => {
self.named_polys.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
},
}
}
pub fn new() -> Self {
Self {
state_map: std::collections::HashMap::new(),
witness_map: std::collections::HashMap::new(),
setup_map: std::collections::HashMap::new(),
gate_selectors: std::collections::HashMap::new(),
named_polys: std::collections::HashMap::new(),
}
}
pub fn get_selector_for_gate(&self, gate: &dyn GateInternal<E>) -> &Polynomial<E::Fr, Coefficients> {
let gate_name = gate.name();
let p = PolyIdentifier::GateSelector(gate_name);
self.state_map.get(&p).expect(&format!("poly {:?} must exist", p)).as_ref()
}
pub fn add_setup_polys<'b: 'a>(&mut self, ids: &[PolyIdentifier], polys: &'b [Polynomial<E::Fr, Coefficients>]) {
assert_eq!(ids.len(), polys.len());
for (id, poly) in ids.iter().zip(polys.iter()) {
let proxy = PolynomialProxy::from_borrowed(poly);
match id {
p @ PolyIdentifier::GateSetupPolynomial(..) => {
self.setup_map.insert(p.clone(), proxy);
},
p @ PolyIdentifier::GateSelector(..) => {
self.setup_map.insert(p.clone(), proxy);
},
p @ PolyIdentifier::PermutationPolynomial(..) => {
self.setup_map.insert(p.clone(), proxy);
},
p @ PolyIdentifier::LookupSelector => {
self.named_polys.insert(p.clone(), proxy);
},
p @ PolyIdentifier::LookupTableEntriesPolynomial(..) => {
self.named_polys.insert(p.clone(), proxy);
},
p @ PolyIdentifier::NamedSetupPolynomial(..) => {
self.named_polys.insert(p.clone(), proxy);
}
_ => {
unreachable!()
}
}
}
}
}
pub struct LookupDataHolder<'a, E: Engine> {
pub eta: E::Fr,
pub f_poly_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub t_poly_unpadded_values: Option<PolynomialProxy<'a, E::Fr, Values>>,
pub t_shifted_unpadded_values: Option<PolynomialProxy<'a, E::Fr, Values>>,
pub s_poly_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub s_shifted_unpadded_values: Option<Polynomial<E::Fr, Values>>,
pub t_poly_monomial: Option<PolynomialProxy<'a, E::Fr, Coefficients>>,
pub s_poly_monomial: Option<Polynomial<E::Fr, Coefficients>>,
pub selector_poly_monomial: Option<PolynomialProxy<'a, E::Fr, Coefficients>>,
pub table_type_poly_monomial: Option<PolynomialProxy<'a, E::Fr, Coefficients>>,
}<file_sep>/src/plonk/better_better_cs/cs.rs
use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use crate::pairing::{Engine, CurveAffine, CurveProjective};
use crate::bit_vec::BitVec;
use crate::{SynthesisError};
#[cfg(feature = "allocator")]
use std::alloc::{Allocator, Global};
use std::collections::HashMap;
use std::marker::PhantomData;
use crate::worker::Worker;
use crate::plonk::domains::*;
use crate::plonk::polynomials::*;
pub use crate::plonk::cs::variable::*;
use crate::plonk::better_cs::utils::*;
pub use super::lookup_tables::{*};
use crate::plonk::fft::cooley_tukey_ntt::*;
use super::utils::*;
pub use super::data_structures::*;
pub use super::setup::*;
pub use super::gates::main_gate_with_d_next::*;
pub trait SynthesisMode: Clone + Send + Sync + std::fmt::Debug {
const PRODUCE_WITNESS: bool;
const PRODUCE_SETUP: bool;
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct SynthesisModeGenerateSetup;
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct SynthesisModeProve;
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct SynthesisModeTesting;
impl SynthesisMode for SynthesisModeGenerateSetup {
const PRODUCE_WITNESS: bool = false;
const PRODUCE_SETUP: bool = true;
}
impl SynthesisMode for SynthesisModeProve {
const PRODUCE_WITNESS: bool = true;
const PRODUCE_SETUP: bool = false;
}
impl SynthesisMode for SynthesisModeTesting {
const PRODUCE_WITNESS: bool = true;
const PRODUCE_SETUP: bool = true;
}
pub trait Circuit<E: Engine> {
type MainGate: MainGate<E>;
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError>;
fn declare_used_gates() -> Result<Vec<Box<dyn GateInternal<E>>>, SynthesisError> {
Ok(
vec![Self::MainGate::default().into_internal()]
)
}
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Coefficient {
PlusOne,
MinusOne,
Other
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct PolynomialMultiplicativeTerm(pub Coefficient, pub Vec<PolynomialInConstraint>);
impl PolynomialMultiplicativeTerm {
fn degree(&self) -> usize {
self.1.len()
}
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct PolynomialOpeningRequest {
pub(crate) id: PolyIdentifier,
pub(crate) dilation: TimeDilation,
}
pub trait GateInternal<E: Engine>: Send
+ Sync
+ 'static
+ std::any::Any
+ std::fmt::Debug
{
fn name(&self) -> &'static str;
fn degree(&self) -> usize;
fn can_include_public_inputs(&self) -> bool;
fn all_queried_polynomials(&self) -> &'static [PolynomialInConstraint];
fn setup_polynomials(&self) -> &'static [PolyIdentifier];
fn variable_polynomials(&self) -> &'static [PolyIdentifier];
#[inline]
fn witness_polynomials(&self) -> &'static [PolyIdentifier] {
&[]
}
fn benefits_from_linearization(&self) -> bool;
fn linearizes_over(&self) -> &'static [PolynomialInConstraint];
fn needs_opened_for_linearization(&self) -> &'static [PolynomialInConstraint];
fn num_quotient_terms(&self) -> usize;
fn verify_on_row<'a>(&self, row: usize, poly_storage: &AssembledPolynomialStorage<'a, E>, last_row: bool) -> E::Fr;
fn contribute_into_quotient<'a, 'b>(
&self,
domain_size: usize,
poly_storage: &mut AssembledPolynomialStorage<'a, E>,
monomials_storage: & AssembledPolynomialStorageForMonomialForms<'b, E>,
challenges: &[E::Fr],
omegas_bitreversed: &BitReversedOmegas<E::Fr>,
omegas_inv_bitreversed: &OmegasInvBitreversed<E::Fr>,
worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError>;
fn contribute_into_linearization<'a>(
&self,
domain_size: usize,
at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
monomials_storage: & AssembledPolynomialStorageForMonomialForms<'a, E>,
challenges: &[E::Fr],
worker: &Worker
) -> Result<Polynomial<E::Fr, Coefficients>, SynthesisError>;
fn contribute_into_verification_equation(
&self,
domain_size: usize,
at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
challenges: &[E::Fr],
) -> Result<E::Fr, SynthesisError>;
fn put_public_inputs_into_selector_id(&self) -> Option<usize>;
fn box_clone(&self) -> Box<dyn GateInternal<E>>;
fn contribute_into_linearization_commitment(
&self,
domain_size: usize,
at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
commitments_storage: &std::collections::HashMap<PolyIdentifier, E::G1Affine>,
challenges: &[E::Fr],
) -> Result<E::G1, SynthesisError>;
}
pub trait Gate<E: Engine>: GateInternal<E>
+ Sized
+ Clone
+ std::hash::Hash
+ std::default::Default
{
fn as_internal(&self) -> &dyn GateInternal<E> {
self as &dyn GateInternal<E>
}
fn into_internal(self) -> Box<dyn GateInternal<E>> {
Box::from(self) as Box<dyn GateInternal<E>>
}
}
use serde::{Serialize, Deserialize};
use smallvec::SmallVec;
pub const DEFAULT_SMALLVEC_CAPACITY: usize = 8;
pub trait MainGate<E: Engine>: Gate<E> {
const NUM_LINEAR_TERMS: usize;
const NUM_VARIABLES: usize;
const NUM_VARIABLES_ON_NEXT_STEP: usize;
fn range_of_multiplicative_term() -> std::ops::Range<usize>;
fn range_of_linear_terms() -> std::ops::Range<usize>;
fn index_for_constant_term() -> usize;
fn range_of_next_step_linear_terms() -> std::ops::Range<usize>;
fn format_term(instance: MainGateTerm<E>, padding: Variable) -> Result<(SmallVec<[Variable; DEFAULT_SMALLVEC_CAPACITY]>, SmallVec<[E::Fr; DEFAULT_SMALLVEC_CAPACITY]>), SynthesisError>;
fn format_linear_term_with_duplicates(instance: MainGateTerm<E>, padding: Variable) -> Result<(SmallVec<[Variable; DEFAULT_SMALLVEC_CAPACITY]>, SmallVec<[E::Fr; DEFAULT_SMALLVEC_CAPACITY]>), SynthesisError>;
fn dummy_vars_to_inscribe(dummy: Variable) -> SmallVec<[Variable; DEFAULT_SMALLVEC_CAPACITY]>;
fn empty_coefficients() -> SmallVec<[E::Fr; DEFAULT_SMALLVEC_CAPACITY]>;
fn contribute_into_quotient_for_public_inputs<'a, 'b>(
&self,
domain_size: usize,
public_inputs: &[E::Fr],
poly_storage: &mut AssembledPolynomialStorage<'b, E>,
monomial_storage: & AssembledPolynomialStorageForMonomialForms<'a, E>,
challenges: &[E::Fr],
omegas_bitreversed: &BitReversedOmegas<E::Fr>,
omegas_inv_bitreversed: &OmegasInvBitreversed<E::Fr>,
worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError>;
fn contribute_into_linearization_for_public_inputs<'a>(
&self,
domain_size: usize,
public_inputs: &[E::Fr],
at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
monomials_storage: & AssembledPolynomialStorageForMonomialForms<'a, E>,
challenges: &[E::Fr],
worker: &Worker
) -> Result<Polynomial<E::Fr, Coefficients>, SynthesisError>;
fn add_inputs_into_quotient(
&self,
domain_size: usize,
public_inputs: &[E::Fr],
at: E::Fr,
challenges: &[E::Fr],
) -> Result<E::Fr, SynthesisError>;
// fn contribute_into_verification_equation_for_public_inputs(
// &self,
// domain_size: usize,
// public_inputs: &[E::Fr],
// at: E::Fr,
// queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
// challenges: &[E::Fr],
// ) -> Result<E::Fr, SynthesisError>;
fn contribute_into_linearization_commitment_for_public_inputs(
&self,
domain_size: usize,
public_inputs: &[E::Fr],
at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
commitments_storage: &std::collections::HashMap<PolyIdentifier, E::G1Affine>,
challenges: &[E::Fr],
) -> Result<E::G1, SynthesisError>;
}
impl<E: Engine> std::hash::Hash for dyn GateInternal<E> {
fn hash<H>(&self, state: &mut H) where H: std::hash::Hasher {
self.type_id().hash(state);
self.name().hash(state);
self.degree().hash(state);
}
}
impl<E: Engine> PartialEq for dyn GateInternal<E> {
fn eq(&self, other: &Self) -> bool {
self.type_id() == other.type_id() &&
self.name() == other.name() &&
self.degree() == other.degree()
}
}
impl<E: Engine> Eq for dyn GateInternal<E> {}
impl<E: Engine> Clone for Box<dyn GateInternal<E>> {
fn clone(&self) -> Self {
self.box_clone()
}
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct LinearCombinationOfTerms(pub Vec<PolynomialMultiplicativeTerm>);
impl LinearCombinationOfTerms {
fn terms(&self) -> &[PolynomialMultiplicativeTerm] {
&self.0[..]
}
}
#[derive(Clone, Debug)]
pub enum ArithmeticTerm<E: Engine>{
Product(smallvec::SmallVec<[Variable; 2]>, E::Fr),
SingleVariable(Variable, E::Fr),
Constant(E::Fr),
}
impl<E: Engine> ArithmeticTerm<E> {
pub fn from_variable(var: Variable) -> Self {
ArithmeticTerm::SingleVariable(var, E::Fr::one())
}
pub fn from_variable_and_coeff(var: Variable, coeff: E::Fr) -> Self {
ArithmeticTerm::SingleVariable(var, coeff)
}
pub fn constant(coeff: E::Fr) -> Self {
ArithmeticTerm::Constant(coeff)
}
pub fn mul_by_variable(self, other: Variable) -> Self {
match self {
ArithmeticTerm::Product(mut terms, coeff) => {
terms.push(other);
ArithmeticTerm::Product(terms, coeff)
},
ArithmeticTerm::SingleVariable(this, coeff) => {
let terms = smallvec::smallvec![this, other];
ArithmeticTerm::Product(terms, coeff)
},
ArithmeticTerm::Constant(coeff) => {
let terms = smallvec::smallvec![other];
ArithmeticTerm::Product(terms, coeff)
},
}
}
pub fn scale(&mut self, by: &E::Fr) {
match self {
ArithmeticTerm::Product(_, ref mut coeff) => {
coeff.mul_assign(by);
},
ArithmeticTerm::SingleVariable(_, ref mut coeff) => {
coeff.mul_assign(by);
},
ArithmeticTerm::Constant(ref mut coeff) => {
coeff.mul_assign(by);
},
}
}
}
const DEFAULT_SMALLVEC_CAPACITY_FOR_TERM: usize = 8;
#[derive(Clone, Debug)]
pub struct MainGateTerm<E: Engine>{
pub(crate) terms: smallvec::SmallVec<[ArithmeticTerm<E>; DEFAULT_SMALLVEC_CAPACITY_FOR_TERM]>,
pub(crate) vars_scratch: std::collections::HashMap<Variable, usize>,
pub(crate) num_multiplicative_terms: usize,
pub(crate) num_constant_terms: usize
}
impl<E: Engine> MainGateTerm<E> {
pub fn new() -> Self {
Self {
terms: smallvec::smallvec![],
vars_scratch: std::collections::HashMap::with_capacity(DEFAULT_SMALLVEC_CAPACITY_FOR_TERM),
num_multiplicative_terms: 0,
num_constant_terms: 0
}
}
pub fn len_without_constant(&self) -> usize {
self.terms.len()
}
pub fn add_assign(&mut self, other: ArithmeticTerm<E>) {
match other {
ArithmeticTerm::Product(_, _) => {
self.num_multiplicative_terms += 1;
self.terms.push(other);
},
ArithmeticTerm::SingleVariable(var, coeff) => {
// deduplicate
if self.vars_scratch.get(&var).is_some() {
let index = *self.vars_scratch.get(&var).unwrap();
match &mut self.terms[index] {
ArithmeticTerm::SingleVariable(_, ref mut c) => {
c.add_assign(&coeff);
},
_ => {
unreachable!()
}
}
} else {
// just push
self.vars_scratch.insert(var, self.terms.len());
self.terms.push(other);
}
},
ArithmeticTerm::Constant(_) => {
self.num_constant_terms += 1;
self.terms.push(other);
},
}
debug_assert!(self.num_constant_terms <= 1, "must duplicate constants");
}
pub fn add_assign_allowing_duplicates(&mut self, other: ArithmeticTerm<E>) {
match other {
ArithmeticTerm::Product(_, _) => {
self.num_multiplicative_terms += 1;
self.terms.push(other);
},
ArithmeticTerm::SingleVariable(_, _) => {
// we just push and don't even count this variable as duplicatable
self.terms.push(other);
},
ArithmeticTerm::Constant(_) => {
self.num_constant_terms += 1;
self.terms.push(other);
},
}
debug_assert!(self.num_constant_terms <= 1, "must duplicate constants");
}
pub fn sub_assign(&mut self, mut other: ArithmeticTerm<E>) {
match &mut other {
ArithmeticTerm::Product(_, ref mut coeff) => {
coeff.negate();
},
ArithmeticTerm::SingleVariable(_, ref mut coeff) => {
coeff.negate();
},
ArithmeticTerm::Constant(ref mut coeff) => {
coeff.negate();
},
}
self.add_assign(other);
debug_assert!(self.num_constant_terms <= 1, "must not duplicate constants");
}
pub fn sub_assign_allowing_duplicates(&mut self, mut other: ArithmeticTerm<E>) {
match &mut other {
ArithmeticTerm::Product(_, ref mut coeff) => {
coeff.negate();
},
ArithmeticTerm::SingleVariable(_, ref mut coeff) => {
coeff.negate();
},
ArithmeticTerm::Constant(ref mut coeff) => {
coeff.negate();
},
}
self.add_assign_allowing_duplicates(other);
debug_assert!(self.num_constant_terms <= 1, "must not duplicate constants");
}
}
pub fn get_from_map_unchecked<'a, 'b: 'a, E: Engine>(
key_with_dilation: PolynomialInConstraint,
ldes_map: &'a AssembledPolynomialStorage<'b, E>
) -> &'a Polynomial<E::Fr, Values> {
let (key, dilation_value) = key_with_dilation.into_id_and_raw_dilation();
let r = if dilation_value == 0 {
match key {
k @ PolyIdentifier::VariablesPolynomial(..) => {
ldes_map.state_map.get(&k).expect(&format!("Must get poly {:?} from ldes storage", &k)).as_ref()
},
k @ PolyIdentifier::WitnessPolynomial(..) => {
ldes_map.witness_map.get(&k).expect(&format!("Must get poly {:?} from ldes storage", &k)).as_ref()
},
k @ PolyIdentifier::GateSetupPolynomial(..) => {
ldes_map.setup_map.get(&k).expect(&format!("Must get poly {:?} from ldes storage", &k)).as_ref()
},
_ => {
unreachable!();
}
}
} else {
ldes_map.scratch_space.get(&key_with_dilation).expect(&format!("Must get poly {:?} from lde storage", &key_with_dilation)).as_ref()
};
r
}
pub fn ensure_in_map_or_create<'a, 'b, E: Engine>(
worker: &Worker,
key_with_dilation: PolynomialInConstraint,
domain_size: usize,
omegas_bitreversed: &BitReversedOmegas<E::Fr>,
lde_factor: usize,
coset_factor: E::Fr,
monomials_map: & AssembledPolynomialStorageForMonomialForms<'a, E>,
ldes_map: &mut AssembledPolynomialStorage<'b, E>
) -> Result<(), SynthesisError> {
assert!(ldes_map.is_bitreversed);
assert_eq!(ldes_map.lde_factor, lde_factor);
let (key, dilation_value) = key_with_dilation.into_id_and_raw_dilation();
let mut contains_in_scratch_or_maps = false;
if dilation_value == 0 {
match key {
k @ PolyIdentifier::VariablesPolynomial(..) => {
if ldes_map.state_map.get(&k).is_some() {
contains_in_scratch_or_maps = true;
}
},
k @ PolyIdentifier::WitnessPolynomial(..) => {
if ldes_map.witness_map.get(&k).is_some() {
contains_in_scratch_or_maps = true;
}
},
k @ PolyIdentifier::GateSetupPolynomial(..) => {
if ldes_map.setup_map.get(&k).is_some() {
contains_in_scratch_or_maps = true;
}
},
_ => {
unreachable!();
}
}
} else {
if ldes_map.scratch_space.get(&key_with_dilation).is_some() {
contains_in_scratch_or_maps = true;
}
};
if !contains_in_scratch_or_maps {
// optimistic case: we have already calculated value without dilation
// but now need to just rotate
let lde_without_dilation = match key {
k @ PolyIdentifier::VariablesPolynomial(..) => {
ldes_map.state_map.get(&k)
},
k @ PolyIdentifier::WitnessPolynomial(..) => {
ldes_map.witness_map.get(&k)
},
k @ PolyIdentifier::GateSetupPolynomial(..) => {
ldes_map.setup_map.get(&k)
},
_ => {
unreachable!();
}
};
let mut done = false;
let rotated = if let Some(lde) = lde_without_dilation.as_ref() {
let rotation_factor = dilation_value * lde_factor;
let f = lde.as_ref().clone_shifted_assuming_bitreversed(rotation_factor, worker)?;
drop(lde);
Some(f)
} else {
None
};
drop(lde_without_dilation);
if let Some(f) = rotated {
let proxy = PolynomialProxy::from_owned(f);
ldes_map.scratch_space.insert(key_with_dilation, proxy);
done = true;
};
if !done {
// perform LDE and push
let monomial = match key {
k @ PolyIdentifier::VariablesPolynomial(..) => {
monomials_map.state_map.get(&k).unwrap().as_ref()
},
k @ PolyIdentifier::WitnessPolynomial(..) => {
monomials_map.witness_map.get(&k).unwrap().as_ref()
},
k @ PolyIdentifier::GateSetupPolynomial(..) => {
monomials_map.setup_map.get(&k).unwrap().as_ref()
},
_ => {
unreachable!();
}
};
let lde = monomial.clone().bitreversed_lde_using_bitreversed_ntt(
&worker,
lde_factor,
omegas_bitreversed,
&coset_factor
)?;
let final_lde = if dilation_value != 0 {
let rotation_factor = dilation_value * lde_factor;
let f = lde.clone_shifted_assuming_bitreversed(rotation_factor, worker)?;
drop(lde);
f
} else {
lde
};
// insert back
let proxy = PolynomialProxy::from_owned(final_lde);
if dilation_value == 0 {
match key {
k @ PolyIdentifier::VariablesPolynomial(..) => {
ldes_map.state_map.insert(k, proxy);
},
k @ PolyIdentifier::WitnessPolynomial(..) => {
ldes_map.witness_map.insert(k, proxy);
},
k @ PolyIdentifier::GateSetupPolynomial(..) => {
ldes_map.setup_map.insert(k, proxy);
},
_ => {
unreachable!();
}
}
} else {
ldes_map.scratch_space.insert(key_with_dilation, proxy);
};
done = true;
}
assert!(done);
}
Ok(())
}
pub(crate) struct SimpleBitmap(u64, usize);
impl SimpleBitmap {
pub(crate) fn new() -> Self {
Self(0u64, 0)
}
pub(crate) fn get_next_unused(&mut self) -> usize {
for i in 0..64 {
if self.get(i) == false {
return i;
}
}
unreachable!()
}
pub(crate) fn get(&self, idx: usize) -> bool {
1u64 << idx & self.0 > 0
}
pub(crate) fn set(&mut self, idx: usize) {
self.0 |= 1u64 << idx;
}
}
pub trait PlonkConstraintSystemParams<E: Engine>: Sized + Copy + Clone + Send + Sync {
const STATE_WIDTH: usize;
const WITNESS_WIDTH: usize;
const HAS_WITNESS_POLYNOMIALS: bool;
const HAS_CUSTOM_GATES: bool;
const CAN_ACCESS_NEXT_TRACE_STEP: bool;
}
use std::sync::Arc;
pub trait ConstraintSystem<E: Engine> {
type Params: PlonkConstraintSystemParams<E>;
type MainGate: MainGate<E>;
// allocate a variable
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>;
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>;
fn new_single_gate_for_trace_step<G: Gate<E>>(&mut self,
equation: &G,
coefficients_assignments: &[E::Fr],
variables_assignments: &[Variable],
witness_assignments: &[E::Fr]
) -> Result<(), SynthesisError> {
self.begin_gates_batch_for_step()?;
self.new_gate_in_batch(
equation,
coefficients_assignments,
variables_assignments,
witness_assignments
)?;
self.end_gates_batch_for_step()
}
fn get_main_gate(&self) -> &Self::MainGate;
fn allocate_main_gate(&mut self, term: MainGateTerm<E>) -> Result<(), SynthesisError> {
let (vars, coeffs) = Self::MainGate::format_term(term, Self::get_dummy_variable())?;
let mg = Self::MainGate::default();
self.new_single_gate_for_trace_step(
&mg,
&coeffs,
&vars,
&[]
)
}
fn begin_gates_batch_for_step(&mut self) -> Result<(), SynthesisError>;
fn new_gate_in_batch<G: Gate<E>>(&mut self,
equation: &G,
coefficients_assignments: &[E::Fr],
variables_assignments: &[Variable],
witness_assignments: &[E::Fr]
) -> Result<(), SynthesisError>;
fn end_gates_batch_for_step(&mut self) -> Result<(), SynthesisError>;
fn allocate_variables_without_gate(&mut self,
variables_assignments: &[Variable],
witness_assignments: &[E::Fr]
) -> Result<(), SynthesisError>;
fn get_value(&self, _variable: Variable) -> Result<E::Fr, SynthesisError> {
Err(SynthesisError::AssignmentMissing)
}
fn get_dummy_variable() -> Variable;
fn get_explicit_zero(&mut self) -> Result<Variable, SynthesisError>;
fn get_explicit_one(&mut self) -> Result<Variable, SynthesisError>;
fn add_table(&mut self, table: LookupTableApplication<E>) -> Result<Arc<LookupTableApplication<E>>, SynthesisError>;
fn get_table(&self, functional_name: &str) -> Result<Arc<LookupTableApplication<E>>, SynthesisError>;
fn add_multitable(&mut self, table: MultiTableApplication<E>) -> Result<(), SynthesisError>;
fn get_multitable(&self, functional_name: &str) -> Result<Arc<MultiTableApplication<E>>, SynthesisError>;
fn apply_single_lookup_gate(&mut self, variables: &[Variable], gate: Arc<LookupTableApplication<E>>) -> Result<(), SynthesisError>;
fn apply_multi_lookup_gate(&mut self, variables: &[Variable], gate: Arc<MultiTableApplication<E>>) -> Result<(), SynthesisError>;
fn get_current_step_number(&self) -> usize;
fn get_current_aux_gate_number(&self) -> usize;
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PlonkCsWidth4WithNextStepParams;
impl<E: Engine> PlonkConstraintSystemParams<E> for PlonkCsWidth4WithNextStepParams {
const STATE_WIDTH: usize = 4;
const WITNESS_WIDTH: usize = 0;
const HAS_WITNESS_POLYNOMIALS: bool = false;
const HAS_CUSTOM_GATES: bool = false;
const CAN_ACCESS_NEXT_TRACE_STEP: bool = true;
}
#[derive(Clone, Copy, Debug)]
pub struct PlonkCsWidth4WithNextStepAndCustomGatesParams;
impl<E: Engine> PlonkConstraintSystemParams<E> for PlonkCsWidth4WithNextStepAndCustomGatesParams {
const STATE_WIDTH: usize = 4;
const WITNESS_WIDTH: usize = 0;
const HAS_WITNESS_POLYNOMIALS: bool = false;
const HAS_CUSTOM_GATES: bool = true;
const CAN_ACCESS_NEXT_TRACE_STEP: bool = true;
}
#[cfg(not(feature="allocator"))]
macro_rules! new_vec_with_allocator {
($capacity:expr) => {
Vec::with_capacity($capacity)
}
}
#[cfg(feature="allocator")]
macro_rules! new_vec_with_allocator {
($capacity:expr) => {
Vec::with_capacity_in($capacity, A::default())
}
}
use crate::plonk::polynomials::*;
#[derive(Clone, serde::Serialize, serde::Deserialize)]
#[cfg_attr(feature = "allocator",serde(bound(serialize = "A: serde::Serialize", deserialize = "'de: 'static, A: serde::Deserialize<'de>")))]
#[cfg_attr(not(feature = "allocator"), serde(bound(deserialize = "'de: 'static")))]
pub struct PolynomialStorage<E: Engine, #[cfg(feature = "allocator")]A: Allocator + Default = Global> {
#[cfg(feature = "allocator")]
#[cfg_attr(feature = "allocator", serde(serialize_with = "serialize_hashmap_with_allocator"))]
#[cfg_attr(feature = "allocator", serde(deserialize_with = "deserialize_hashmap_with_allocator"))]
pub state_map: std::collections::HashMap<PolyIdentifier, Vec<Variable, A>>,
#[cfg(not(feature = "allocator"))]
pub state_map: std::collections::HashMap<PolyIdentifier, Vec<Variable>>,
pub witness_map: std::collections::HashMap<PolyIdentifier, Vec<E::Fr>>,
#[cfg(feature = "allocator")]
#[cfg_attr(feature = "allocator", serde(serialize_with = "serialize_hashmap_with_allocator"))]
#[cfg_attr(feature = "allocator", serde(deserialize_with = "deserialize_hashmap_with_allocator"))]
pub setup_map: std::collections::HashMap<PolyIdentifier, Vec<E::Fr, A>>,
#[cfg(not(feature = "allocator"))]
pub setup_map: std::collections::HashMap<PolyIdentifier, Vec<E::Fr>>,
}
macro_rules! impl_poly_storage {
(impl PolynomialStorage $inherent:tt) => {
#[cfg(feature = "allocator")]
impl<E: Engine, A: Allocator + Default> PolynomialStorage<E, A> $inherent
#[cfg(not(feature = "allocator"))]
impl<E: Engine> PolynomialStorage<E> $inherent
};
}
impl_poly_storage! {
impl PolynomialStorage {
pub fn new() -> Self {
Self {
state_map: std::collections::HashMap::new(),
witness_map: std::collections::HashMap::new(),
setup_map: std::collections::HashMap::new(),
}
}
pub fn new_specialized_for_proving_assembly_and_state_4(size: usize) -> Self {
assert!(size <= 1 << <E::Fr as PrimeField>::S);
let mut state_map = std::collections::HashMap::new();
for idx in 0..4{
state_map.insert(PolyIdentifier::VariablesPolynomial(idx), new_vec_with_allocator!(size));
}
Self {
state_map,
witness_map: std::collections::HashMap::new(),
setup_map: std::collections::HashMap::new(),
}
}
pub fn get_value(&self, poly: &PolynomialInConstraint, n: usize) -> Result<E::Fr, SynthesisError> {
match poly {
PolynomialInConstraint(PolyIdentifier::VariablesPolynomial(_), TimeDilation(_)) => {
unreachable!("should not try to get value of the state polynomial, get variable first instead");
},
PolynomialInConstraint(PolyIdentifier::GateSetupPolynomial(gate_descr, idx), TimeDilation(dilation)) => {
let final_index = n + dilation;
let identifier = PolyIdentifier::GateSetupPolynomial(gate_descr, *idx);
let value = *self.setup_map
.get(&identifier)
.ok_or(SynthesisError::AssignmentMissing)?
.get(final_index)
.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
},
PolynomialInConstraint(PolyIdentifier::WitnessPolynomial(_), TimeDilation(_)) => {
unimplemented!()
},
_ => {
unreachable!();
}
}
}
pub fn get_variable(&self, poly: &PolynomialInConstraint, n: usize) -> Result<Variable, SynthesisError> {
match poly {
PolynomialInConstraint(PolyIdentifier::VariablesPolynomial(idx), TimeDilation(dilation)) => {
let final_index = n + dilation;
let identifier = PolyIdentifier::VariablesPolynomial(*idx);
let value = *self.state_map
.get(&identifier)
.ok_or(SynthesisError::AssignmentMissing)?
.get(final_index)
.ok_or(SynthesisError::AssignmentMissing)?;
Ok(value)
},
_ => {
unreachable!("should not try to get variable of setup or witness polynomial");
}
}
}
}
}
#[derive(Clone, serde::Serialize, serde::Deserialize)]
#[serde(bound(serialize = "dyn GateInternal<E>: serde::Serialize", deserialize = "'de: 'static, dyn GateInternal<E>: serde::Deserialize<'de>"))]
pub struct GateDensityStorage<E: Engine>(pub std::collections::HashMap<Box<dyn GateInternal<E>>, BitVec>);
impl<E: Engine> Default for GateDensityStorage<E>{
fn default() -> Self {
Self(std::collections::HashMap::new())
}
}
impl<E: Engine> GateDensityStorage<E> {
pub fn new() -> Self {
Self(std::collections::HashMap::new())
}
}
pub struct GateConstantCoefficientsStorage<E: Engine>(pub std::collections::HashMap<Box<dyn GateInternal<E>>, Vec<E::Fr>>);
impl<E: Engine> GateConstantCoefficientsStorage<E> {
pub fn new() -> Self {
Self(std::collections::HashMap::new())
}
}
pub type TrivialAssembly<E, P, MG> = Assembly<E, P, MG, SynthesisModeTesting>;
pub type ProvingAssembly<E, P, MG> = Assembly<E, P, MG, SynthesisModeProve>;
pub type SetupAssembly<E, P, MG> = Assembly<E, P, MG, SynthesisModeGenerateSetup>;
#[derive(Clone, serde::Serialize, serde::Deserialize)]
#[cfg_attr(feature = "allocator", serde(bound(serialize = "MG: serde::Serialize, A: serde::Serialize", deserialize = "'de: 'static, MG: serde::Deserialize<'de>, A: serde::Deserialize<'de>")))]
#[cfg_attr(not(feature = "allocator"), serde(bound(serialize = "MG: serde::Serialize", deserialize = "'de: 'static, MG: serde::Deserialize<'de>")))]
pub struct Assembly<E: Engine, P: PlonkConstraintSystemParams<E>, MG: MainGate<E>, S: SynthesisMode, #[cfg(feature = "allocator")]A: Allocator + Default = Global> {
#[cfg(feature = "allocator")]
pub inputs_storage: PolynomialStorage<E, A>,
#[cfg(not(feature = "allocator"))]
pub inputs_storage: PolynomialStorage<E>,
#[cfg(feature = "allocator")]
pub aux_storage: PolynomialStorage<E, A>,
#[cfg(not(feature = "allocator"))]
pub aux_storage: PolynomialStorage<E>,
pub num_input_gates: usize,
pub num_aux_gates: usize,
pub max_constraint_degree: usize,
pub main_gate: MG,
pub input_assingments: Vec<E::Fr>,
#[cfg(feature = "allocator")]
#[cfg_attr(feature = "allocator", serde(serialize_with = "serialize_vec_with_allocator"))]
#[cfg_attr(feature = "allocator", serde(deserialize_with = "deserialize_vec_with_allocator"))]
pub aux_assingments: Vec<E::Fr, A>,
#[cfg(not(feature = "allocator"))]
pub aux_assingments: Vec<E::Fr>,
pub num_inputs: usize,
pub num_aux: usize,
pub trace_step_for_batch: Option<usize>,
pub is_finalized: bool,
#[serde(skip)]
pub gates: std::collections::HashSet<Box<dyn GateInternal<E>>>,
pub all_queried_polys_in_constraints: std::collections::HashSet<PolynomialInConstraint>,
// pub sorted_setup_polynomial_ids: Vec<PolyIdentifier>,
#[serde(skip)]
pub sorted_gates: Vec<Box<dyn GateInternal<E>>>,
#[serde(skip)]
pub aux_gate_density: GateDensityStorage<E>,
pub explicit_zero_variable: Option<Variable>,
pub explicit_one_variable: Option<Variable>,
#[serde(skip)]
pub tables: Vec<Arc<LookupTableApplication<E>>>,
#[serde(skip)]
pub multitables: Vec<Arc<MultiTableApplication<E>>>,
pub table_selectors: std::collections::HashMap<String, BitVec>,
pub multitable_selectors: std::collections::HashMap<String, BitVec>,
pub table_ids_poly: Vec<E::Fr>,
pub total_length_of_all_tables: usize,
pub individual_table_canonical_sorted_entries: std::collections::HashMap<String, Vec<[E::Fr; 3]>>,
pub individual_table_entries_lookups: std::collections::HashMap<String, std::collections::HashMap<[E::Fr; 3], usize>>,
#[cfg(feature = "allocator")]
#[cfg_attr(feature = "allocator", serde(serialize_with = "serialize_hashmap_with_allocator"))]
#[cfg_attr(feature = "allocator", serde(deserialize_with = "deserialize_hashmap_with_allocator"))]
pub individual_table_entries: std::collections::HashMap<String, Vec<u32, A>>,
#[cfg(not(feature = "allocator"))]
pub individual_table_entries: std::collections::HashMap<String, Vec<u32>>,
#[cfg(feature = "allocator")]
#[cfg_attr(feature = "allocator", serde(serialize_with = "serialize_2d_vec_with_allocator"))]
#[cfg_attr(feature = "allocator", serde(deserialize_with = "deserialize_2d_vec_with_allocator"))]
pub reusable_buffer_for_lookup_entries: Vec<Vec<u32, A>>,
#[cfg(not(feature = "allocator"))]
pub reusable_buffer_for_lookup_entries: Vec<Vec<u32>>,
pub individual_multitable_entries: std::collections::HashMap<String, Vec<Vec<E::Fr>>>,
pub known_table_ids: HashMap<String, E::Fr>,
pub known_table_names: Vec<String>,
pub num_table_lookups: usize,
pub num_multitable_lookups: usize,
_marker_p: std::marker::PhantomData<P>,
_marker_s: std::marker::PhantomData<S>,
#[cfg(feature = "allocator")]
_marker_a: std::marker::PhantomData<A>,
}
cfg_if!{
if #[cfg(feature = "allocator")]{
use serde::de::{Visitor, SeqAccess, MapAccess};
struct VecVisitor<T, B: Allocator> {
m1: PhantomData<T>,
m2: PhantomData<B>,
}
impl<T, B: Allocator> VecVisitor<T, B>{
pub fn new() -> Self{
Self{
m1: PhantomData,
m2: PhantomData,
}
}
}
impl<'de, T, B> Visitor<'de> for VecVisitor<T, B>
where
T: Deserialize<'de>,
B: Allocator + Default,
{
type Value = Vec<T, B>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let size_hint = seq.size_hint();
let size_hint = std::cmp::min(size_hint.unwrap_or(0), 4096);
let mut values = Vec::with_capacity_in(size_hint, B::default());
while let Ok(result) = seq.next_element() {
match result{
Some(value) => values.push(value),
None => (),
}
}
Ok(values)
}
}
struct TwoDVecVisitor<T, B: Allocator> {
m1: PhantomData<T>,
m2: PhantomData<B>,
}
impl<T, B: Allocator> TwoDVecVisitor<T, B>{
pub fn new() -> Self{
Self{
m1: PhantomData,
m2: PhantomData,
}
}
}
impl<'de, T, B> Visitor<'de> for TwoDVecVisitor<T, B>
where
T: Deserialize<'de>,
B: Allocator + Default,
{
type Value = Vec<Vec<T, B>>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let size_hint = seq.size_hint();
let size_hint = std::cmp::min(size_hint.unwrap_or(0), 4096);
let mut final_result = Vec::with_capacity(size_hint);
while let Ok(result) = seq.next_element::<Vec<T>>() {
match result{
Some(sub_vec) => {
let size_hint = seq.size_hint();
let size_hint = std::cmp::min(size_hint.unwrap_or(0), 4096);
let mut values = Vec::with_capacity_in(size_hint, B::default());
for el in sub_vec{
values.push(el)
}
final_result.push(values);
},
None => (),
}
}
Ok(final_result)
}
}
struct MapVisitor<K, T, B: Allocator> {
m0: PhantomData<K>,
m1: PhantomData<T>,
m2: PhantomData<B>,
}
impl<K, T, B: Allocator> MapVisitor<K, T, B>{
pub fn new() -> Self{
Self{
m0: PhantomData,
m1: PhantomData,
m2: PhantomData,
}
}
}
impl<'de, K, T, B> Visitor<'de> for MapVisitor<K, T, B>
where
T: Deserialize<'de>,
K: Deserialize<'de>,
B: Allocator + Default
{
type Value = HashMap<K, Vec<T, B>>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a 2d sequence")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let size_hint = map.size_hint();
let size_hint = std::cmp::min(size_hint.unwrap_or(0), 4096);
let mut final_map = HashMap::with_capacity(size_hint);
while let Ok(entry) = map.next_entry::<K, Vec<T>>() {
let mut values = vec![];
match entry{
Some((key, sub_vec)) => {
for el in sub_vec{
values.push(el)
}
},
None => (),
}
}
Ok(final_map)
}
}
fn serialize_vec_with_allocator<T: serde::Serialize, S, A: Allocator + serde::Serialize>(data: &Vec<T, A>, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
data.serialize(serializer)
}
fn deserialize_vec_with_allocator<'de, D, T: serde::Deserialize<'de>, A: Allocator + Default + serde::Deserialize<'de>>(deserializer: D) -> Result<Vec<T, A>, D::Error> where D: serde::Deserializer<'de> {
deserializer.deserialize_seq(VecVisitor::new())
}
fn serialize_2d_vec_with_allocator<T: serde::Serialize, S, A: Allocator + serde::Serialize>(data: &Vec<Vec<T, A>>, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
use serde::ser::SerializeSeq;
let mut seq = serializer.serialize_seq(Some(data.len()))?;
for sub_vec in data {
for el in sub_vec{
seq.serialize_element(el)?;
}
}
seq.end()
}
fn deserialize_2d_vec_with_allocator<'de, D, T: serde::Deserialize<'de>, A: Allocator + Default + serde::Deserialize<'de>>(deserializer: D) -> Result<Vec<Vec<T, A>>, D::Error> where D: serde::Deserializer<'de> {
deserializer.deserialize_seq(TwoDVecVisitor::new())
}
fn serialize_hashmap_with_allocator<K: serde::Serialize, T: serde::Serialize, S, A: Allocator + Default + serde::Serialize>(data: &HashMap<K, Vec<T, A>>, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
use serde::ser::{SerializeMap, SerializeSeq};
let mut s = serializer.serialize_map(Some(data.len()))?;
for (k, v) in data{
s.serialize_key(k)?;
for el in v.iter(){
s.serialize_value(el)?;
}
}
s.end()
}
fn deserialize_hashmap_with_allocator<'de, D,K: serde::Deserialize<'de>, T: serde::Deserialize<'de>, A: Allocator + Default>(deserializer: D) -> Result<HashMap<K, Vec<T, A>>, D::Error> where D: serde::Deserializer<'de> {
deserializer.deserialize_map(MapVisitor::new())
}
}
}
macro_rules! impl_assembly {
{impl Assembly $inherent:tt} => {
#[cfg(feature = "allocator")]
impl<E: Engine, P: PlonkConstraintSystemParams<E>, MG: MainGate<E>, S: SynthesisMode, A: Allocator + Default + 'static + Send + Sync> Assembly<E, P, MG, S, A> $inherent
#[cfg(not(feature = "allocator"))]
impl<E: Engine, P: PlonkConstraintSystemParams<E>, MG: MainGate<E>, S: SynthesisMode> Assembly<E, P, MG, S> $inherent
};
{impl ConstraintSystem $inherent:tt} =>{
#[cfg(feature = "allocator")]
impl<E: Engine, P: PlonkConstraintSystemParams<E>, MG: MainGate<E>, S: SynthesisMode, A: Allocator + Default + 'static + Send + Sync> ConstraintSystem<E> for Assembly<E, P, MG, S, A> $inherent
#[cfg(not(feature = "allocator"))]
impl<E: Engine, P: PlonkConstraintSystemParams<E>, MG: MainGate<E>, S: SynthesisMode> ConstraintSystem<E> for Assembly<E, P, MG, S> $inherent
}
}
impl_assembly!{
impl ConstraintSystem {
type Params = P;
type MainGate = MG;
// allocate a variable
#[inline]
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
self.num_aux += 1;
let index = self.num_aux;
if S::PRODUCE_WITNESS {
let value = value()?;
self.aux_assingments.push(value);
}
Ok(Variable(Index::Aux(index)))
}
// allocate an input variable
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
self.num_inputs += 1;
let index = self.num_inputs;
if S::PRODUCE_WITNESS {
let value = value()?;
self.input_assingments.push(value);
}
let input_var = Variable(Index::Input(index));
let mut main_gate = MainGateTerm::<E>::new();
main_gate.sub_assign(ArithmeticTerm::from_variable(input_var));
let dummy = Self::get_dummy_variable();
let (variables_assignments, coefficients_assignments) = MG::format_term(main_gate, dummy).expect("must make empty padding gate");
let n = self.num_input_gates;
Self::allocate_into_storage(
&MG::default(),
&mut self.inputs_storage,
n,
&coefficients_assignments,
&variables_assignments,
&[]
)?;
self.num_input_gates += 1;
Ok(input_var)
}
#[inline]
fn get_main_gate(&self) -> &MG {
&self.main_gate
}
#[inline]
fn begin_gates_batch_for_step(&mut self) -> Result<(), SynthesisError> {
debug_assert!(self.trace_step_for_batch.is_none());
let n = self.num_aux_gates;
self.num_aux_gates += 1;
self.trace_step_for_batch = Some(n);
Ok(())
}
fn new_gate_in_batch<G: Gate<E>>(&mut self,
gate: &G,
coefficients_assignments: &[E::Fr],
variables_assignments: &[Variable],
witness_assignments: &[E::Fr]
) -> Result<(), SynthesisError> {
// check that gate is ok for config
// debug_assert!(check_gate_is_allowed_for_params::<E, P, G>(&gate), format!("supplied params do not work with gate {:?}", gate));
let n = self.trace_step_for_batch.unwrap();
// make zero-enumerated index
Self::allocate_into_storage(
gate,
&mut self.aux_storage,
n,
coefficients_assignments,
variables_assignments,
witness_assignments,
)?;
self.add_gate_into_list(gate);
if S::PRODUCE_SETUP {
if let Some(tracker) = self.aux_gate_density.0.get_mut(gate.as_internal() as &dyn GateInternal<E>) {
if tracker.len() != n {
let padding = n - tracker.len();
tracker.grow(padding, false);
}
tracker.push(true);
debug_assert_eq!(n+1, tracker.len());
} else {
self.aux_gate_density.0.insert(gate.clone().into_internal(), BitVec::new());
let tracker = self.aux_gate_density.0.get_mut(gate.as_internal() as &dyn GateInternal<E>).unwrap();
tracker.grow(n, false);
tracker.push(true);
debug_assert_eq!(n+1, tracker.len());
}
}
Ok(())
}
fn allocate_variables_without_gate(&mut self,
variables_assignments: &[Variable],
witness_assignments: &[E::Fr]
) -> Result<(), SynthesisError> {
let n = self.trace_step_for_batch.expect("may only be called in a batch");
// make zero-enumerated index
let empty_coefficients = Self::MainGate::empty_coefficients();
let gate = Self::MainGate::default();
Self::allocate_into_storage(
&gate,
&mut self.aux_storage,
n,
&empty_coefficients,
variables_assignments,
witness_assignments,
)?;
if S::PRODUCE_SETUP {
let apply_gate = false;
let tracker = self.aux_gate_density.0.get_mut(gate.as_internal() as &dyn GateInternal<E>).unwrap();
if tracker.len() != n {
let padding = n - tracker.len();
tracker.grow(padding, false);
}
tracker.push(apply_gate);
debug_assert_eq!(n+1, tracker.len());
}
Ok(())
}
fn end_gates_batch_for_step(&mut self) -> Result<(), SynthesisError> {
debug_assert!(self.trace_step_for_batch.is_some());
let n = self.trace_step_for_batch.take().unwrap();
debug_assert_eq!(n+1, self.num_aux_gates, "invalid batch id");
Ok(())
}
#[inline]
fn get_value(&self, var: Variable) -> Result<E::Fr, SynthesisError> {
if !S::PRODUCE_WITNESS {
return Err(SynthesisError::AssignmentMissing);
}
let value = match var {
Variable(Index::Aux(0)) => {
// use crate::rand::Rng;
// let mut rng = crate::rand::thread_rng();
// let value: E::Fr = rng.gen();
// value
E::Fr::zero()
// return Err(SynthesisError::AssignmentMissing);
}
Variable(Index::Input(0)) => {
return Err(SynthesisError::AssignmentMissing);
}
Variable(Index::Input(input)) => {
self.input_assingments[input - 1]
},
Variable(Index::Aux(aux)) => {
self.aux_assingments[aux - 1]
}
};
Ok(value)
}
#[inline]
fn get_dummy_variable() -> Variable {
Self::dummy_variable()
}
fn get_explicit_zero(&mut self) -> Result<Variable, SynthesisError> {
if let Some(var) = self.explicit_zero_variable {
return Ok(var);
}
let value = E::Fr::zero();
let zero = self.alloc(|| Ok(value))?;
let self_term = ArithmeticTerm::from_variable(zero);
let other_term = ArithmeticTerm::constant(value);
let mut term = MainGateTerm::new();
term.add_assign(self_term);
term.sub_assign(other_term);
self.allocate_main_gate(term)?;
self.explicit_zero_variable = Some(zero);
Ok(zero)
}
fn get_explicit_one(&mut self) -> Result<Variable, SynthesisError> {
if let Some(var) = self.explicit_one_variable {
return Ok(var);
}
let value = E::Fr::one();
let one = self.alloc(|| Ok(value))?;
let self_term = ArithmeticTerm::from_variable(one);
let other_term = ArithmeticTerm::constant(value);
let mut term = MainGateTerm::new();
term.add_assign(self_term);
term.sub_assign(other_term);
self.allocate_main_gate(term)?;
self.explicit_one_variable = Some(one);
Ok(one)
}
fn add_table(&mut self, table: LookupTableApplication<E>) -> Result<Arc<LookupTableApplication<E>>, SynthesisError> {
assert!(table.applies_over().len() == 3, "only support tables of width 3");
assert!(table.can_be_combined(), "can only add tables that are combinable");
assert!(!self.known_table_ids.contains_key(&table.functional_name()), "can not add a duplicate table for name {}", table.functional_name());
let table_name = table.functional_name();
let table_id = table.table_id();
let number_of_entries = table.size();
// ensure sorted format when we add table
let mut entries = Self::ensure_sorted_table(&table);
assert_eq!(entries.len(), 3);
let mut entries_as_arrays = Vec::with_capacity(entries[0].len());
let mut entries_into_table_row = std::collections::HashMap::with_capacity(entries[0].len());
let column_2 = entries.pop().unwrap();
let column_1 = entries.pop().unwrap();
let column_0 = entries.pop().unwrap();
for (idx, ((a, b), c)) in column_0.into_iter().zip(column_1.into_iter()).zip(column_2.into_iter()).enumerate() {
entries_as_arrays.push([a, b, c]);
entries_into_table_row.insert([a, b, c], idx);
}
let shared = Arc::from(table);
let res = shared.clone();
self.tables.push(shared);
self.individual_table_canonical_sorted_entries.insert(table_name.clone(), entries_as_arrays);
self.individual_table_entries_lookups.insert(table_name.clone(), entries_into_table_row);
let buffer_for_current_table = if let Some(mut buffer) = self.reusable_buffer_for_lookup_entries.pop(){
buffer.clear();
buffer
}else{
// println!("allocating new buffer for table {}", table_name);
new_vec_with_allocator!(0)
};
self.individual_table_entries.insert(table_name.clone(), buffer_for_current_table);
self.known_table_names.push(table_name.clone());
self.table_selectors.insert(table_name.clone(), BitVec::new());
self.known_table_ids.insert(table_name, table_id);
self.total_length_of_all_tables += number_of_entries;
Ok(res)
}
fn get_table(&self, name: &str) -> Result<Arc<LookupTableApplication<E>>, SynthesisError> {
for t in self.tables.iter() {
if t.functional_name() == name {
return Ok(Arc::clone(t));
}
}
Err(SynthesisError::AssignmentMissing)
}
fn add_multitable(&mut self, table: MultiTableApplication<E>) -> Result<(), SynthesisError> {
let table_name = table.functional_name();
let mut exists = false;
for t in self.multitables.iter() {
if t.functional_name() == table_name {
exists = true;
}
}
assert!(exists == false);
self.multitables.push(Arc::from(table));
self.multitable_selectors.insert(table_name.clone(), BitVec::new());
self.individual_table_entries.insert(table_name.clone(), new_vec_with_allocator!(0));
Ok(())
}
fn get_multitable(&self, functional_name: &str) -> Result<Arc<MultiTableApplication<E>>, SynthesisError> {
for t in self.multitables.iter() {
if t.functional_name() == functional_name {
return Ok(Arc::clone(t));
}
}
Err(SynthesisError::AssignmentMissing)
}
#[track_caller]
fn apply_single_lookup_gate(&mut self, variables: &[Variable], table: Arc<LookupTableApplication<E>>) -> Result<(), SynthesisError> {
let n = self.trace_step_for_batch.expect("may only add table constraint in a transaction");
// make zero-enumerated index
if S::PRODUCE_SETUP {
debug_assert!(self.tables.contains(&table));
assert!(table.can_be_combined() == true);
assert!(table.applies_over().len() == 3);
let table_name = table.functional_name();
let table_id = table.table_id();
// we need to:
// - mark that this table applies at this row
// - add values into the list to later on make a sorted polynomial
let tracker = self.table_selectors.get_mut(&table_name).unwrap();
if tracker.len() != n {
let padding = n - tracker.len();
tracker.grow(padding, false);
}
tracker.push(true);
debug_assert_eq!(n+1, tracker.len());
// keep track of what table is applied at what row
self.table_ids_poly.resize(n, E::Fr::zero());
self.table_ids_poly.push(table_id);
}
if S::PRODUCE_WITNESS {
let table_name = table.functional_name();
// add values for lookup table sorting later
let keys_and_values_len = table.applies_over().len();
let mut table_entries = arrayvec::ArrayVec::<_, 3>::new();
for v in variables.iter() {
let value = self.get_value(*v).unwrap();
table_entries.push(value);
}
use std::convert::TryInto;
let table_entries_as_array: [_; 3] = table_entries.into_inner().unwrap();
let entries = self.individual_table_entries.get_mut(&table_name).unwrap();
assert_eq!(variables.len(), table.applies_over().len());
// // This check is substituted by the lookup from values into index below
// let valid_entries = table.is_valid_entry(&table_entries_as_array[..keys_and_values_len]);
// assert!(valid_entries);
// if !valid_entries {
// return Err(SynthesisError::Unsatisfiable);
// }
let row_idx = self.individual_table_entries_lookups.get(&table_name).unwrap().get(&table_entries_as_array);
assert!(row_idx.is_some(), "table most likely doesn't contain a row for {:?}", table_entries_as_array);
entries.push(*row_idx.unwrap() as u32);
}
self.num_table_lookups += 1;
Ok(())
}
#[track_caller]
fn apply_multi_lookup_gate(&mut self, variables: &[Variable], table: Arc<MultiTableApplication<E>>) -> Result<(), SynthesisError> {
unimplemented!("not implementing multitable for now");
}
fn get_current_step_number(&self) -> usize {
self.n()
}
fn get_current_aux_gate_number(&self) -> usize {
self.num_aux_gates
}
}
}
impl_assembly!{
impl Assembly{
fn allocate_into_storage<G: Gate<E>>(
gate: &G,
#[cfg(feature = "allocator")]
storage: &mut PolynomialStorage<E, A>,
#[cfg(not(feature = "allocator"))]
storage: &mut PolynomialStorage<E>,
n: usize,
coefficients_assignments: &[E::Fr],
variables_assignments: &[Variable],
witness_assignments: &[E::Fr]
) -> Result<(), SynthesisError> {
let dummy = Self::get_dummy_variable();
let zero = E::Fr::zero();
if S::PRODUCE_SETUP {
let mut coeffs_it = coefficients_assignments.iter();
for &setup_poly in gate.setup_polynomials().into_iter() {
let poly_ref = storage.setup_map.entry(setup_poly).or_insert(new_vec_with_allocator!(0));
if poly_ref.len() < n {
poly_ref.resize(n, E::Fr::zero());
}
poly_ref.push(*coeffs_it.next().unwrap_or(&zero));
}
debug_assert!(coeffs_it.next().is_none(), "must consume all the coefficients for gate");
}
let mut variable_it = variables_assignments.iter();
for &var_poly in gate.variable_polynomials().into_iter() {
let poly_ref = storage.state_map.entry(var_poly).or_insert(new_vec_with_allocator!(0));
if poly_ref.len() < n {
poly_ref.resize(n, dummy);
}
if poly_ref.len() == n {
// we consume variable only ONCE
let var = *variable_it.next().unwrap_or(&dummy);
poly_ref.push(var);
}
}
debug_assert!(variable_it.next().is_none(), "must consume all variables for gate");
let mut witness_it = witness_assignments.iter();
for &key in gate.witness_polynomials().into_iter() {
let poly_ref = storage.witness_map.entry(key).or_insert(vec![]);
if poly_ref.len() < n {
poly_ref.resize(n, E::Fr::zero());
}
poly_ref.push(*witness_it.next().unwrap_or(&zero));
}
Ok(())
}
pub fn n(&self) -> usize {
self.num_input_gates + self.num_aux_gates
}
fn add_gate_into_list<G: Gate<E>>(&mut self, gate: &G) {
if !self.gates.contains(gate.as_internal() as &dyn GateInternal<E>) {
self.gates.insert(gate.clone().into_internal());
// self.add_gate_setup_polys_into_list(gate);
for &p in gate.all_queried_polynomials().into_iter() {
self.all_queried_polys_in_constraints.insert(p);
}
self.sorted_gates.push(gate.clone().into_internal());
let degree = gate.degree();
if self.max_constraint_degree < degree {
self.max_constraint_degree = degree;
}
}
}
pub fn new() -> Self {
let mut tmp = Self {
inputs_storage: PolynomialStorage::new(),
aux_storage: PolynomialStorage::new(),
max_constraint_degree: 0,
num_input_gates: 0,
num_aux_gates: 0,
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: new_vec_with_allocator!(0),
main_gate: MG::default(),
trace_step_for_batch: None,
gates: std::collections::HashSet::new(),
all_queried_polys_in_constraints: std::collections::HashSet::new(),
aux_gate_density: GateDensityStorage::new(),
// sorted_setup_polynomial_ids: vec![],
sorted_gates: vec![],
is_finalized: false,
explicit_zero_variable: None,
explicit_one_variable: None,
tables: vec![],
multitables: vec![],
table_selectors: std::collections::HashMap::new(),
multitable_selectors: std::collections::HashMap::new(),
table_ids_poly: vec![],
total_length_of_all_tables: 0,
individual_table_canonical_sorted_entries: std::collections::HashMap::new(),
individual_table_entries_lookups: std::collections::HashMap::new(),
individual_table_entries: std::collections::HashMap::new(),
reusable_buffer_for_lookup_entries: vec![],
individual_multitable_entries: std::collections::HashMap::new(),
known_table_ids: HashMap::new(),
known_table_names: vec![],
num_table_lookups: 0,
num_multitable_lookups: 0,
_marker_p: std::marker::PhantomData,
_marker_s: std::marker::PhantomData,
#[cfg(feature = "allocator")]
_marker_a: std::marker::PhantomData,
};
tmp.add_gate_into_list(&MG::default());
tmp
}
pub fn new_specialized_for_proving_assembly_and_state_4(domain_size: usize, aux_size: usize, num_lookup_tables: usize, max_num_lookup_entries: usize) -> Self {
assert!(domain_size <= 1 << <E::Fr as PrimeField>::S);
let reusable_buffer_for_lookup_entries = (0..num_lookup_tables).map(|_| new_vec_with_allocator!(max_num_lookup_entries)).collect();
let mut tmp = Self {
inputs_storage: PolynomialStorage::new(),
aux_storage: PolynomialStorage::new_specialized_for_proving_assembly_and_state_4(domain_size),
max_constraint_degree: 0,
num_input_gates: 0,
num_aux_gates: 0,
num_inputs: 0,
num_aux: 0,
input_assingments: vec![],
aux_assingments: new_vec_with_allocator!(aux_size),
main_gate: MG::default(),
trace_step_for_batch: None,
gates: std::collections::HashSet::new(),
all_queried_polys_in_constraints: std::collections::HashSet::new(),
aux_gate_density: GateDensityStorage::new(),
// sorted_setup_polynomial_ids: vec![],
sorted_gates: vec![],
is_finalized: false,
explicit_zero_variable: None,
explicit_one_variable: None,
tables: vec![],
multitables: vec![],
table_selectors: std::collections::HashMap::new(),
multitable_selectors: std::collections::HashMap::new(),
table_ids_poly: vec![],
total_length_of_all_tables: 0,
individual_table_canonical_sorted_entries: std::collections::HashMap::new(),
individual_table_entries_lookups: std::collections::HashMap::new(),
individual_table_entries: std::collections::HashMap::new(),
reusable_buffer_for_lookup_entries: reusable_buffer_for_lookup_entries,
individual_multitable_entries: std::collections::HashMap::new(),
known_table_ids: HashMap::new(),
known_table_names: vec![],
num_table_lookups: 0,
num_multitable_lookups: 0,
_marker_p: std::marker::PhantomData,
_marker_s: std::marker::PhantomData,
#[cfg(feature = "allocator")]
_marker_a: std::marker::PhantomData,
};
tmp.add_gate_into_list(&MG::default());
tmp
}
// return variable that is not in a constraint formally, but has some value
const fn dummy_variable() -> Variable {
Variable(Index::Aux(0))
}
pub fn finalize(&mut self) {
if self.is_finalized {
return;
}
// the lookup argument (as in the paper) will make two polynomials to fit jointly sorted set
// but in practice we fit it into one. For this purpose we only need to add as many empty rows
// as there is all the tables (as the worst case)
// let mut min_space_for_lookups = self.num_table_lookups;
// for table in self.tables.iter() {
// let table_num_rows = table.size();
// min_space_for_lookups += table_num_rows;
// }
// min_space_for_lookups += self.n();
// let new_size_candidates = [(self.n() + 1).next_power_of_two() - 1, (min_space_for_lookups + 1).next_power_of_two() - 1];
// In better case it's enough for us to have num_lookups + total length of tables to be smaller
// than problem size, so joinly sorted set fits into 1 polynomial, and we use zeroes as padding values
let total_number_of_table_entries = self.num_table_lookups + self.total_length_of_all_tables;
let new_size_candidates = [(self.n() + 1).next_power_of_two() - 1, (total_number_of_table_entries + 1).next_power_of_two() - 1];
let new_size = *new_size_candidates.iter().max().unwrap();
assert!(
new_size <= 1usize << E::Fr::S,
"Padded circuit size is {}, that is larget than number of roots of unity 2^{}. Padded from {} gates and {} lookup table accesses",
new_size,
E::Fr::S,
self.n(),
total_number_of_table_entries,
);
assert!(
new_size <= (1usize << E::Fr::S) / <Self as ConstraintSystem<E>>::Params::STATE_WIDTH,
"Circuit size is {}, that is larget than number of roots of unity 2^{} for copy-permutation over {} polys. Padded from {} gates and {} lookup table accesses",
new_size,
E::Fr::S,
<Self as ConstraintSystem<E>>::Params::STATE_WIDTH,
self.n(),
total_number_of_table_entries,
);
let dummy = Self::get_dummy_variable();
let empty_vars = vec![dummy; <Self as ConstraintSystem<E>>::Params::STATE_WIDTH];
let empty_witness = vec![E::Fr::zero(); <Self as ConstraintSystem<E>>::Params::WITNESS_WIDTH];
for _ in self.n()..new_size {
self.begin_gates_batch_for_step().unwrap();
self.allocate_variables_without_gate(
&empty_vars,
&empty_witness
).expect("must add padding gate");
self.end_gates_batch_for_step().unwrap();
}
let new_size_for_aux = new_size - self.num_input_gates;
if S::PRODUCE_SETUP {
// pad gate selectors
for (_, tracker) in self.aux_gate_density.0.iter_mut() {
tracker.grow(new_size_for_aux, false);
}
// pad lookup selectors
for (_, selector) in self.table_selectors.iter_mut() {
selector.grow(new_size_for_aux, false);
}
// pad special purpose table selector poly
self.table_ids_poly.resize(new_size_for_aux, E::Fr::zero());
}
assert!((self.n()+1).is_power_of_two());
self.is_finalized = true;
}
// Caller can specify how large circuit should be artificially inflated
// if possible. Will panic if size is already too large
pub fn finalize_to_size_log_2(&mut self, size_log_2: usize) {
if self.is_finalized {
return;
}
assert!(size_log_2 <= E::Fr::S as usize);
// the lookup argument (as in the paper) will make two polynomials to fit jointly sorted set
// but in practice we fit it into one. For this purpose we only need to add as many empty rows
// as there is all the tables (as the worst case)
// In better case it's enough for us to have num_lookups + total length of tables to be smaller
// than problem size, so joinly sorted set fits into 1 polynomial, and we use zeroes as padding values
let total_number_of_table_entries = self.num_table_lookups + self.total_length_of_all_tables;
let new_size_candidates = [(self.n() + 1).next_power_of_two() - 1, (total_number_of_table_entries + 1).next_power_of_two() - 1];
let new_size = *new_size_candidates.iter().max().unwrap();
assert!(
new_size <= 1usize << E::Fr::S,
"
size is {}, that is larget than number of roots of unity 2^{}. Padded from {} gates and {} lookup table accesses",
new_size,
E::Fr::S,
self.n(),
total_number_of_table_entries,
);
assert!(
new_size <= (1usize << E::Fr::S) / <Self as ConstraintSystem<E>>::Params::STATE_WIDTH,
"Circuit size is {}, that is larget than number of roots of unity 2^{} for copy-permutation over {} polys. Padded from {} gates and {} lookup table accesses",
new_size,
E::Fr::S,
<Self as ConstraintSystem<E>>::Params::STATE_WIDTH,
self.n(),
total_number_of_table_entries,
);
let pad_to = 1 << size_log_2;
let new_size = if new_size <= pad_to {
pad_to - 1
} else {
panic!("Requested padding to size 2^{}, but circuit already contains {} gates", size_log_2, new_size)
};
let dummy = Self::get_dummy_variable();
let empty_vars = vec![dummy; <Self as ConstraintSystem<E>>::Params::STATE_WIDTH];
let empty_witness = vec![E::Fr::zero(); <Self as ConstraintSystem<E>>::Params::WITNESS_WIDTH];
for _ in self.n()..new_size {
self.begin_gates_batch_for_step().unwrap();
self.allocate_variables_without_gate(
&empty_vars,
&empty_witness
).expect("must add padding gate");
self.end_gates_batch_for_step().unwrap();
}
assert_eq!(new_size, self.n());
let new_size_for_aux = new_size - self.num_input_gates;
if S::PRODUCE_SETUP {
// pad gate selectors
for (_, tracker) in self.aux_gate_density.0.iter_mut() {
tracker.grow(new_size_for_aux, false);
}
if self.num_table_lookups > 0{
// pad lookup selectors
for (_, selector) in self.table_selectors.iter_mut() {
selector.grow(new_size_for_aux, false);
}
// pad special purpose table selector poly
self.table_ids_poly.resize(new_size_for_aux, E::Fr::zero());
}
}
assert!((self.n()+1).is_power_of_two(), "padded circuit size is not power of two. self.n() = {}", self.n());
self.is_finalized = true;
}
#[cfg(feature = "allocator")]
fn get_storage_for_trace_step(&self, step: usize) -> &PolynomialStorage<E, A> {
if step < self.num_input_gates {
&self.inputs_storage
} else {
&self.aux_storage
}
}
#[cfg(not(feature = "allocator"))]
fn get_storage_for_trace_step(&self, step: usize) -> &PolynomialStorage<E> {
if step < self.num_input_gates {
&self.inputs_storage
} else {
&self.aux_storage
}
}
pub fn is_satisfied(&self) -> bool {
if !S::PRODUCE_SETUP || !S::PRODUCE_WITNESS {
// only testing mode can run this check for now
return true;
}
// expect a small number of inputs
if self.n() == 0 {
return true;
}
// TODO: handle public inputs
// for i in 0..self.num_input_gates {
// let gate = self.input_assingments
// }
// let one = E::Fr::one();
// let mut minus_one = E::Fr::one();
// minus_one.negate();
let n = self.n() - 1;
let worker = Worker::new();
let storage = self.make_assembled_poly_storage(&worker, false).unwrap();
for (gate_type, density) in self.aux_gate_density.0.iter() {
for (gate_index, is_applicable) in density.iter().enumerate() {
if is_applicable == false {
continue;
}
let trace_index = self.num_input_gates + gate_index;
let last = trace_index == n;
let value = gate_type.verify_on_row(trace_index, &storage, last);
if value.is_zero() == false {
println!("Unsatisfied at aux gate {} (zero enumerated)", gate_index);
println!("Constraint value = {}", value);
println!("Gate {:?}", gate_type.name());
return false;
}
}
}
true
}
pub fn make_permutations(&self, worker: &Worker) -> Result<Vec<Polynomial::<E::Fr, Values>>, SynthesisError> {
assert!(self.is_finalized);
if !S::PRODUCE_SETUP {
return Err(SynthesisError::AssignmentMissing);
}
let num_gates = self.n();
let num_partitions = self.num_inputs + self.num_aux;
let num_inputs = self.num_inputs;
// in the partition number i there is a set of indexes in V = (a, b, c) such that V_j = i
let mut partitions = vec![vec![]; num_partitions + 1];
let mut poly_ids = vec![];
for i in 0..P::STATE_WIDTH {
let id = PolyIdentifier::VariablesPolynomial(i);
poly_ids.push(id);
}
// gate_idx is zero-enumerated here
for gate_idx in 0..num_gates
{
let storage = self.get_storage_for_trace_step(gate_idx);
for (state_poly_index, poly_id) in poly_ids.iter().enumerate() {
let variables_vec_ref = storage.state_map.get(&poly_id).expect("must get a variables polynomial");
let storage_idx = if gate_idx < self.num_input_gates {
gate_idx
} else {
gate_idx - self.num_input_gates
};
let v = variables_vec_ref[storage_idx];
match v {
Variable(Index::Aux(0)) => {
// Dummy variables do not participate in the permutation
},
Variable(Index::Input(0)) => {
unreachable!("There must be no input with index 0");
},
Variable(Index::Input(index)) => {
let i = index; // inputs are [1, num_inputs]
partitions[i].push((state_poly_index, gate_idx+1));
},
Variable(Index::Aux(index)) => {
let i = index + num_inputs; // aux are [num_inputs + 1, ..]
partitions[i].push((state_poly_index, gate_idx+1));
},
}
}
}
// sanity check
assert_eq!(partitions[0].len(), 0);
let domain = Domain::new_for_size(num_gates as u64).expect("must have enough roots of unity to fit the circuit");
// now we need to make root at it's cosets
let domain_elements = materialize_domain_elements_with_natural_enumeration(
&domain, &worker
);
// domain_elements.pop().unwrap();
use crate::ff::SqrtField;
use crate::ff::LegendreSymbol;
let mut non_residues = vec![];
non_residues.push(E::Fr::one());
non_residues.extend(make_non_residues::<E::Fr>(P::STATE_WIDTH - 1));
assert_eq!(non_residues.len(), 4);
let mut sigmas = vec![];
for i in 0..P::STATE_WIDTH {
let mut sigma_i = Polynomial::from_values_unpadded(domain_elements.clone()).unwrap();
sigma_i.scale(&worker, non_residues[i]);
sigmas.push(sigma_i);
}
let mut permutations = vec![vec![]; num_partitions + 1];
fn rotate<T: Sized>(mut vec: Vec<T>) -> Vec<T> {
if vec.len() > 1 {
let mut els: Vec<_> = vec.drain(0..1).collect();
debug_assert_eq!(els.len(), 1);
// els.reverse();
vec.push(els.pop().unwrap());
}
vec
}
for (i, partition) in partitions.into_iter().enumerate().skip(1) {
// copy-permutation should have a cycle around the partition
// we do not need to permute over partitions of length 1,
// as this variable only happends in one place
if partition.len() == 1 {
continue;
}
let permutation = rotate(partition.clone());
permutations[i] = permutation.clone();
for (original, new) in partition.into_iter()
.zip(permutation.into_iter())
{
// (column_idx, trace_step_idx)
let new_zero_enumerated = new.1 - 1;
let mut new_value = domain_elements[new_zero_enumerated];
// we have shuffled the values, so we need to determine FROM
// which of k_i * {1, omega, ...} cosets we take a value
// for a permutation polynomial
new_value.mul_assign(&non_residues[new.0]);
// check to what witness polynomial the variable belongs
let place_into = &mut sigmas[original.0].as_mut();
let original_zero_enumerated = original.1 - 1;
place_into[original_zero_enumerated] = new_value;
}
}
Ok(sigmas)
}
pub fn make_setup_polynomials(
&self,
with_finalization: bool
) -> Result<std::collections::HashMap<PolyIdentifier, Polynomial<E::Fr, Values>>, SynthesisError> {
if with_finalization {
assert!(self.is_finalized);
}
if !S::PRODUCE_SETUP {
return Err(SynthesisError::AssignmentMissing);
}
let total_num_gates = self.n();
let num_input_gates = self.num_input_gates;
let mut map = std::collections::HashMap::new();
let setup_poly_ids: Vec<_> = self.aux_storage.setup_map.keys().collect();
for &id in setup_poly_ids.into_iter() {
let mut assembled_poly = vec![E::Fr::zero(); total_num_gates];
if num_input_gates != 0 {
let input_gates_coeffs = &mut assembled_poly[..num_input_gates];
input_gates_coeffs.copy_from_slice(&self.inputs_storage.setup_map.get(&id).unwrap()[..]);
}
{
let src = &self.aux_storage.setup_map.get(&id).unwrap()[..];
let src_len = src.len();
let aux_gates_coeffs = &mut assembled_poly[num_input_gates..(num_input_gates+src_len)];
aux_gates_coeffs.copy_from_slice(src);
}
let as_poly = Polynomial::from_values_unpadded(assembled_poly)?;
map.insert(id, as_poly);
}
Ok(map)
}
#[track_caller]
pub fn create_setup<C: Circuit<E>>(
&self,
worker: &Worker
) -> Result<Setup<E, C>, SynthesisError> {
assert!(self.is_finalized);
assert!(S::PRODUCE_SETUP);
let claimed_gates_list = C::declare_used_gates()?;
let known_gates_list = &self.sorted_gates;
assert_eq!(&claimed_gates_list, known_gates_list, "trying to perform setup for a circuit that has different gates set from synthesized one: circuit claims {:?}, in synthesis we had {:?}", &claimed_gates_list, &known_gates_list);
// check for consistency
{
assert!(&<Self as ConstraintSystem<E>>::MainGate::default().into_internal() == &claimed_gates_list[0]);
assert!(&C::MainGate::default().into_internal() == &claimed_gates_list[0]);
// dbg!(&claimed_gates_list[0]);
// let as_any = (&claimed_gates_list[0]) as &dyn std::any::Any;
// match as_any.downcast_ref::<<Self as ConstraintSystem<E>>::MainGate>() {
// Some(..) => {
// },
// None => {
// println!("Type mismatch: first gate among used gates must be the main gate of CS");
// // panic!("first gate among used gates must be the main gate of CS");
// }
// }
}
let mut setup = Setup::<E, C>::empty();
setup.n = self.n();
setup.num_inputs = self.num_inputs;
setup.state_width = <Self as ConstraintSystem<E>>::Params::STATE_WIDTH;
setup.num_witness_polys = <Self as ConstraintSystem<E>>::Params::WITNESS_WIDTH;
setup.non_residues = make_non_residues::<E::Fr>(setup.state_width - 1);
let (mut setup_polys_values_map, permutation_polys) = self.perform_setup(&worker)?;
for gate in known_gates_list.iter() {
let setup_polys = gate.setup_polynomials();
for id in setup_polys.into_iter() {
let values = setup_polys_values_map.remove(&id).expect("must contain setup poly").clone_padded_to_domain()?;
let mon = values.icoset_fft_for_generator(&worker, &E::Fr::one());
setup.gate_setup_monomials.push(mon);
}
}
for perm in permutation_polys.into_iter() {
let mon = perm.icoset_fft_for_generator(&worker, &E::Fr::one());
setup.permutation_monomials.push(mon);
}
let gate_selector_values = self.output_gate_selectors(&worker)?;
if known_gates_list.len() > 1 {
assert_eq!(gate_selector_values.len(), known_gates_list.len(), "numbers of selectors and known gates mismatch");
}
for values in gate_selector_values.into_iter() {
let poly = Polynomial::from_values(values)?;
let mon = poly.icoset_fft_for_generator(&worker, &E::Fr::one());
setup.gate_selectors_monomials.push(mon);
}
if self.tables.len() > 0 && self.num_table_lookups > 0 {
// we have lookup tables, so add them to setup
let num_lookups = self.num_table_lookups;
setup.total_lookup_entries_length = num_lookups;
let table_tails = self.calculate_t_polynomial_values_for_single_application_tables()?;
assert_eq!(table_tails.len(), 4);
let tails_len = table_tails[0].len();
// total number of gates, Input + Aux
let size = self.n();
let copy_start = size - tails_len;
for tail in table_tails.into_iter() {
let mut values = vec![E::Fr::zero(); size];
values[copy_start..].copy_from_slice(&tail[..]);
let poly = Polynomial::from_values(values)?;
let mon = poly.icoset_fft_for_generator(&worker, &E::Fr::one());
setup.lookup_tables_monomials.push(mon);
}
let selector_for_lookup_values = self.calculate_lookup_selector_values()?;
let poly = Polynomial::from_values(selector_for_lookup_values)?;
let mon = poly.icoset_fft_for_generator(&worker, &E::Fr::one());
setup.lookup_selector_monomial = Some(mon);
let table_type_values = self.calculate_table_type_values()?;
let poly = Polynomial::from_values(table_type_values)?;
let mon = poly.icoset_fft_for_generator(&worker, &E::Fr::one());
setup.lookup_table_type_monomial = Some(mon);
}
Ok(setup)
}
pub fn perform_setup(
&self,
worker: &Worker
) -> Result<
(std::collections::HashMap<PolyIdentifier, Polynomial<E::Fr, Values>>, Vec<Polynomial<E::Fr, Values>>),
SynthesisError
> {
let map = self.make_setup_polynomials(true)?;
let permutation_polys = self.make_permutations(&worker)?;
Ok((map, permutation_polys))
}
pub fn output_gate_selectors(&self, worker: &Worker) -> Result<Vec<Vec<E::Fr>>, SynthesisError> {
if self.sorted_gates.len() == 1 {
return Ok(vec![]);
}
let num_gate_selectors = self.sorted_gates.len();
let one = E::Fr::one();
let empty_poly_values = vec![E::Fr::zero(); self.n()];
let mut poly_values = vec![empty_poly_values.clone(); num_gate_selectors];
let num_input_gates = self.num_input_gates;
// first gate in sorted in a main gate and applies on public inputs
for p in poly_values[0][..num_input_gates].iter_mut() {
*p = one;
}
worker.scope(poly_values.len(), |scope, chunk| {
for (i, lh) in poly_values.chunks_mut(chunk)
.enumerate() {
scope.spawn(move |_| {
// we take `values_per_leaf` values from each of the polynomial
// and push them into the conbinations
let base_idx = i*chunk;
for (j, lh) in lh.iter_mut().enumerate() {
let idx = base_idx + j;
let id = &self.sorted_gates[idx];
let density = self.aux_gate_density.0.get(id).unwrap();
let poly_mut_slice: &mut [E::Fr] = &mut lh[num_input_gates..];
for (i, d) in density.iter().enumerate() {
if d {
poly_mut_slice[i] = one;
}
}
}
});
}
});
Ok(poly_values)
}
pub fn calculate_t_polynomial_values_for_single_application_tables(&self) ->
Result<Vec<Vec<E::Fr>>, SynthesisError> {
if !S::PRODUCE_SETUP {
return Err(SynthesisError::AssignmentMissing);
}
if self.tables.len() == 0 {
return Ok(vec![])
}
// we should pass over every table and append it
let mut width = 0;
for table in self.tables.iter() {
if width == 0 {
width = table.width();
} else {
assert_eq!(width, table.width());
}
}
assert_eq!(width, 3, "only support tables that span over 3 polynomials for now");
let mut column_contributions = vec![vec![]; width + 1];
for table in self.tables.iter() {
let entries = self.individual_table_canonical_sorted_entries.get(&table.functional_name()).unwrap();
assert!(entries.len() == table.size(), "invalid number of elements in table {}", table.functional_name());
// these are individual column vectors, so just copy
for e in entries.iter() {
for idx in 0..3 {
column_contributions[idx].push(e[idx]);
}
}
let table_id = table.table_id();
let pad_to_len = column_contributions[0].len();
column_contributions.last_mut().unwrap().resize(pad_to_len, table_id);
}
Ok(column_contributions)
}
pub fn ensure_sorted_table(table: &LookupTableApplication<E>) -> Vec<Vec<E::Fr>> {
let entries = table.get_table_values_for_polys();
assert_eq!(entries.len(), 3);
let mut uniqueness_checker = std::collections::HashSet::with_capacity(entries[0].len());
// sort them in a standard lexicographic way, so our sorting is always simple
let size = entries[0].len();
let mut kv_set_entries = Vec::with_capacity(size);
for i in 0..size {
let entry = KeyValueSet::<E>::new([entries[0][i], entries[1][i], entries[2][i]]);
let is_unique = uniqueness_checker.insert(entry);
assert!(is_unique);
kv_set_entries.push(entry);
}
kv_set_entries.sort();
let mut result = vec![Vec::with_capacity(size); 3];
for kv in kv_set_entries.iter() {
for i in 0..3 {
result[i].push(kv.inner[i]);
}
}
result
}
// pub fn calculate_interleaved_t_polys(&self) ->
// Result<Vec<Vec<E::Fr>>, SynthesisError> {
// assert!(self.is_finalized);
// if self.tables.len() == 0 {
// return Ok(vec![])
// }
// // we should pass over every table and append it
// let mut width = 0;
// for table in self.tables.iter() {
// if width == 0 {
// width = table.width();
// } else {
// assert_eq!(width, table.width());
// }
// }
// assert_eq!(width, 3, "only support tables that span over 3 polynomials for now");
// assert!(self.is_finalized);
// if self.tables.len() == 0 {
// return Ok(vec![]);
// }
// // total number of gates, Input + Aux
// let size = self.n();
// let aux_gates_start = self.num_input_gates;
// let mut contributions = vec![vec![E::Fr::zero(); size]; 4];
// // make it shifted for ease of rotations later one
// let mut place_into_idx = aux_gates_start + 1;
// let lookup_selector = self.calculate_lookup_selector_values()?;
// for single_application in self.tables.iter() {
// // let entries = single_application.get_table_values_for_polys();
// println!("Sorting table {}", single_application.functional_name());
// let entries = Self::ensure_sorted_table(single_application);
// assert_eq!(entries.len(), 3);
// let table_id = single_application.table_id();
// let num_entries = single_application.size();
// assert_eq!(entries[0].len(), num_entries);
// for entry_idx in 0..num_entries {
// 'inner: loop {
// if lookup_selector[place_into_idx].is_zero() {
// // we can place a table row into the poly
// for column in 0..3 {
// contributions[column][place_into_idx] = entries[column][entry_idx];
// }
// contributions[3][place_into_idx] = table_id;
// place_into_idx += 1;
// break 'inner;
// } else {
// // go for a next one
// place_into_idx += 1;
// }
// }
// }
// }
// Ok(contributions)
// }
// pub fn calculate_s_poly_contributions_from_witness(&self) ->
// Result<Vec<Vec<E::Fr>>, SynthesisError>
// {
// if self.tables.len() == 0 {
// return Ok(vec![]);
// }
// // we first form a set of all occured witness values,
// // then include table entries to the set
// // and then sort this set
// let mut kv_set_entries = vec![];
// let mut contributions_per_column = vec![vec![]; 4];
// for (_table_idx, single_application) in self.tables.iter().enumerate() {
// // copy all queries from witness
// let table_name = single_application.functional_name();
// for kv_values in self.individual_table_entries.get(&table_name).unwrap().iter() {
// let entry = KeyValueSet::<E>::from_slice(&kv_values[..3]);
// kv_set_entries.push(entry);
// }
// // copy table elements themselves
// let entries = Self::ensure_sorted_table(single_application);
// // those are full values of polynomials, so we have to virtually transpose
// let size = entries[0].len();
// for i in 0..size {
// let entry = KeyValueSet::new([entries[0][i], entries[1][i], entries[2][i]]);
// kv_set_entries.push(entry)
// }
// kv_set_entries.sort();
// // now copy backward with addition of the table id
// for kv in kv_set_entries.iter() {
// for i in 0..3 {
// contributions_per_column[i].push(kv.inner[i]);
// }
// }
// let table_id = single_application.table_id();
// let pad_to_len = contributions_per_column[0].len();
// contributions_per_column.last_mut().unwrap().resize(pad_to_len, table_id);
// kv_set_entries.truncate(0);
// }
// Ok(contributions_per_column)
// }
pub fn calculate_s_poly_contributions_from_witness(&self, delinearization_challenge: E::Fr) ->
Result<Vec<E::Fr>, SynthesisError>
{
if self.tables.len() == 0 {
return Ok(vec![]);
}
// we can sort based on indexes only
let mut kv_set_entries = vec![];
let mut accumulated_contributions = vec![];
for (_table_idx, single_application) in self.tables.iter().enumerate() {
// copy all queries from witness
let table_name = single_application.functional_name();
for kv_index in self.individual_table_entries.get(&table_name).unwrap().iter() {
kv_set_entries.push(*kv_index);
}
let table_size = self.individual_table_canonical_sorted_entries.get(&table_name).unwrap().len();
// copy table elements themselves
for i in 0..table_size {
kv_set_entries.push(i as u32);
}
kv_set_entries.sort();
// now copy backward with addition of the table id
let canonical_rows = self.individual_table_canonical_sorted_entries.get(&table_name).unwrap();
let table_id = single_application.table_id();
let mut table_id_contribution = table_id;
table_id_contribution.mul_assign(&delinearization_challenge);
let mut collapsed_rows = Vec::with_capacity(table_size);
for row in canonical_rows.iter() {
let mut value = table_id_contribution;
// horner rule for row[0] + alpha * row[1] + alpha^2 * row[2] + alpha^3 * table_id
let mut tmp = table_id_contribution;
tmp.add_assign(&row[2]);
tmp.mul_assign(&delinearization_challenge);
tmp.add_assign(&row[1]);
tmp.mul_assign(&delinearization_challenge);
tmp.add_assign(&row[0]);
collapsed_rows.push(tmp);
}
for el in kv_set_entries.iter() {
let value = collapsed_rows[*el as usize];
accumulated_contributions.push(value);
}
kv_set_entries.truncate(0);
}
Ok(accumulated_contributions)
}
pub fn calculate_table_type_values(
&self
) ->
Result<Vec<E::Fr>, SynthesisError>
{
assert!(self.is_finalized);
if !S::PRODUCE_SETUP {
return Err(SynthesisError::AssignmentMissing);
}
if self.tables.len() == 0 {
return Ok(vec![]);
}
let table_ids_vector_on_aux_gates = &self.table_ids_poly;
let num_aux_gates = self.num_aux_gates;
// total number of gates, Input + Aux
let size = self.n();
let aux_gates_start = self.num_input_gates;
let aux_gates_end = aux_gates_start + num_aux_gates;
let mut values = vec![E::Fr::zero(); size];
assert_eq!(num_aux_gates, table_ids_vector_on_aux_gates.len());
values[aux_gates_start..aux_gates_end].copy_from_slice(table_ids_vector_on_aux_gates);
Ok(values)
}
pub fn calculate_lookup_selector_values(
&self
) -> Result<Vec<E::Fr>, SynthesisError> {
assert!(self.is_finalized);
if !S::PRODUCE_SETUP {
return Err(SynthesisError::AssignmentMissing);
}
if self.tables.len() == 0 {
return Ok(vec![]);
}
// total number of gates, Input + Aux
let size = self.n();
let aux_gates_start = self.num_input_gates;
let num_aux_gates = self.num_aux_gates;
// input + aux gates without t-polys
let mut lookup_selector_values = vec![E::Fr::zero(); size];
for single_application in self.tables.iter() {
let table_name = single_application.functional_name();
let selector_bitvec = self.table_selectors.get(&table_name).unwrap();
for aux_gate_idx in 0..num_aux_gates {
if selector_bitvec[aux_gate_idx] {
let global_gate_idx = aux_gate_idx + aux_gates_start;
// place 1 into selector
lookup_selector_values[global_gate_idx] = E::Fr::one();
}
}
}
Ok(lookup_selector_values)
}
pub fn calculate_masked_lookup_entries(
&self,
storage: &AssembledPolynomialStorage<E>
) ->
Result<Vec<Vec<E::Fr>>, SynthesisError>
{
assert!(self.is_finalized);
if self.tables.len() == 0 {
return Ok(vec![]);
}
// total number of gates, Input + Aux
let size = self.n();
let aux_gates_start = self.num_input_gates;
let num_aux_gates = self.num_aux_gates;
// input + aux gates without t-polys
let mut contributions_per_column = vec![vec![E::Fr::zero(); size]; 3];
for single_application in self.tables.iter() {
let table_id = single_application.table_id();
let table_name = single_application.functional_name();
let keys_and_values = single_application.applies_over();
let selector_bitvec = self.table_selectors.get(&table_name).unwrap();
// let num_non_empty = selector_bitvec.iter().filter(|x| *x).count();
// println!("{} lookups for table {}", num_non_empty, table_name);
assert!(selector_bitvec.len() >= num_aux_gates);
for aux_gate_idx in 0..num_aux_gates {
if selector_bitvec[aux_gate_idx] {
let global_gate_idx = aux_gate_idx + aux_gates_start;
// place value into f poly
for (idx, &poly_id) in keys_and_values.iter().enumerate() {
let value = storage.get_poly_at_step(poly_id, global_gate_idx);
contributions_per_column[idx][global_gate_idx] = value;
}
}
}
}
Ok(contributions_per_column)
}
pub fn calculate_masked_lookup_entries_using_selector<'a>(
&self,
storage: &AssembledPolynomialStorage<E>,
selector: &PolynomialProxy<'a, E::Fr, Values>
) ->
Result<Vec<Vec<E::Fr>>, SynthesisError>
{
assert!(self.is_finalized);
if self.tables.len() == 0 {
return Ok(vec![]);
}
// total number of gates, Input + Aux
let size = self.n();
let aux_gates_start = self.num_input_gates;
let num_aux_gates = self.num_aux_gates;
// input + aux gates without t-polys
let selector_ref = selector.as_ref().as_ref();
let one = E::Fr::one();
let mut contributions_per_column = vec![vec![E::Fr::zero(); size]; 3];
for single_application in self.tables.iter() {
let keys_and_values = single_application.applies_over();
for aux_gate_idx in 0..num_aux_gates {
let global_gate_idx = aux_gate_idx + aux_gates_start;
if selector_ref[global_gate_idx] == one {
// place value into f poly
for (idx, &poly_id) in keys_and_values.iter().enumerate() {
let value = storage.get_poly_at_step(poly_id, global_gate_idx);
contributions_per_column[idx][global_gate_idx] = value;
}
}
}
}
Ok(contributions_per_column)
}
fn sort_by_t(
witness_entries: &Vec<Vec<E::Fr>>,
table_entries: &Vec<Vec<E::Fr>>,
) -> Result< Vec<Vec<E::Fr>>, SynthesisError> {
assert_eq!(witness_entries.len(), table_entries.len());
if witness_entries.len() == 0 {
return Ok(vec![]);
}
// make s = f sorted by t (elements in s appear in the same order as elements in t)
let entries_len = table_entries[0].len();
let witnesses_len = witness_entries[0].len();
let mut index_lookups_for_sorting = vec![std::collections::HashMap::with_capacity(entries_len); witness_entries.len()];
for (idx, table) in table_entries.iter().enumerate() {
for (entry_index, &entry_value) in table.iter().enumerate() {
// make a reverse lookup field element -> index
index_lookups_for_sorting[idx].insert(entry_value, entry_index);
}
}
let mut column_contributions = vec![];
for (idx, witness_column) in witness_entries.iter().enumerate() {
let mut indexes = vec![usize::max_value(); witnesses_len];
for (witness_index, witness_value) in witness_column.iter().enumerate() {
let reverse_lookup_index = index_lookups_for_sorting[idx].get(witness_value).unwrap();
indexes[witness_index] = *reverse_lookup_index;
}
indexes.sort();
println!("sorted_index = {:?}", indexes);
let mut s_for_column = Vec::with_capacity(witnesses_len);
for sorted_index in indexes.into_iter() {
let table = &table_entries[idx];
s_for_column.push(table[sorted_index]);
}
column_contributions.push(s_for_column);
}
Ok(column_contributions)
}
pub fn make_state_and_witness_polynomials(
&self,
worker: &Worker,
with_finalization: bool
) -> Result<(Vec<Vec<E::Fr>>, Vec<Vec<E::Fr>>), SynthesisError>
{
if with_finalization {
assert!(self.is_finalized);
}
if !S::PRODUCE_WITNESS {
return Err(SynthesisError::AssignmentMissing);
}
let mut full_assignments = if with_finalization {
vec![Vec::with_capacity((self.n()+1).next_power_of_two()); P::STATE_WIDTH]
} else {
vec![Vec::with_capacity(self.n()+1); P::STATE_WIDTH]
};
let pad_to = if with_finalization {
(self.n()+1).next_power_of_two()
} else {
self.n()+1
};
let num_input_gates = self.num_input_gates;
let num_aux_gates = self.num_aux_gates;
full_assignments[0].extend_from_slice(&self.input_assingments);
assert!(full_assignments[0].len() == num_input_gates);
for i in 1..P::STATE_WIDTH {
full_assignments[i].resize(num_input_gates, E::Fr::zero());
}
let dummy = Self::get_dummy_variable();
worker.scope(full_assignments.len(), |scope, chunk| {
for (i, lh) in full_assignments.chunks_mut(chunk)
.enumerate() {
scope.spawn(move |_| {
// we take `values_per_leaf` values from each of the polynomial
// and push them into the conbinations
let base_idx = i*chunk;
for (j, lh) in lh.iter_mut().enumerate() {
let idx = base_idx + j;
let id = PolyIdentifier::VariablesPolynomial(idx);
let poly_ref = self.aux_storage.state_map.get(&id).unwrap();
for i in 0..num_aux_gates {
let var = poly_ref.get(i).unwrap_or(&dummy);
let value = self.get_value(*var).unwrap();
lh.push(value);
}
}
});
}
});
for a in full_assignments.iter() {
assert_eq!(a.len(), self.num_input_gates + self.num_aux_gates);
}
for p in full_assignments.iter_mut() {
p.resize(pad_to - 1, E::Fr::zero());
}
for a in full_assignments.iter() {
assert_eq!(a.len(), pad_to - 1);
}
Ok((full_assignments, vec![]))
}
pub fn make_assembled_poly_storage<'a>(
&self,
worker: &Worker,
with_finalization: bool
) -> Result<AssembledPolynomialStorage<'a, E>, SynthesisError> {
if with_finalization {
assert!(self.is_finalized);
}
let (state_polys, witness_polys) = self.make_state_and_witness_polynomials(&worker, with_finalization)?;
let mut state_polys_map = std::collections::HashMap::new();
for (idx, poly) in state_polys.into_iter().enumerate() {
let key = PolyIdentifier::VariablesPolynomial(idx);
let p = Polynomial::from_values_unpadded(poly)?;
let p = PolynomialProxy::from_owned(p);
state_polys_map.insert(key, p);
}
let mut witness_polys_map = std::collections::HashMap::new();
for (idx, poly) in witness_polys.into_iter().enumerate() {
let key = PolyIdentifier::WitnessPolynomial(idx);
let p = Polynomial::from_values_unpadded(poly)?;
let p = PolynomialProxy::from_owned(p);
witness_polys_map.insert(key, p);
}
let mut setup_map = std::collections::HashMap::new();
let mut gate_selectors_map = std::collections::HashMap::new();
if S::PRODUCE_SETUP {
let setup_polys_map = self.make_setup_polynomials(with_finalization)?;
let gate_selectors = self.output_gate_selectors(&worker)?;
for (gate, poly) in self.sorted_gates.iter().zip(gate_selectors.into_iter()) {
// let key = gate.clone();
let key = PolyIdentifier::GateSelector(gate.name());
let p = Polynomial::from_values_unpadded(poly)?;
let p = PolynomialProxy::from_owned(p);
gate_selectors_map.insert(key, p);
}
for (key, p) in setup_polys_map.into_iter() {
let p = PolynomialProxy::from_owned(p);
setup_map.insert(key, p);
}
}
let assembled = AssembledPolynomialStorage::<E> {
state_map: state_polys_map,
witness_map: witness_polys_map,
setup_map: setup_map,
scratch_space: std::collections::HashMap::new(),
gate_selectors: gate_selectors_map,
named_polys: std::collections::HashMap::new(),
is_bitreversed: false,
lde_factor: 1
};
Ok(assembled)
}
pub fn create_monomial_storage<'a, 'b>(
worker: &Worker,
omegas_inv: &OmegasInvBitreversed<E::Fr>,
value_form_storage: &'a AssembledPolynomialStorage<E>,
include_setup: bool,
) -> Result<AssembledPolynomialStorageForMonomialForms<'b, E>, SynthesisError> {
assert_eq!(value_form_storage.lde_factor, 1);
assert!(value_form_storage.is_bitreversed == false);
let mut monomial_storage = AssembledPolynomialStorageForMonomialForms::<E>::new();
for (&k, v) in value_form_storage.state_map.iter() {
let mon_form = v.as_ref().clone_padded_to_domain()?.ifft_using_bitreversed_ntt(
&worker,
omegas_inv,
&E::Fr::one()
)?;
let mon_form = PolynomialProxy::from_owned(mon_form);
monomial_storage.state_map.insert(k, mon_form);
}
for (&k, v) in value_form_storage.witness_map.iter() {
let mon_form = v.as_ref().clone_padded_to_domain()?.ifft_using_bitreversed_ntt(
&worker,
omegas_inv,
&E::Fr::one()
)?;
let mon_form = PolynomialProxy::from_owned(mon_form);
monomial_storage.witness_map.insert(k, mon_form);
}
if include_setup {
for (&k, v) in value_form_storage.gate_selectors.iter() {
let mon_form = v.as_ref().clone_padded_to_domain()?.ifft_using_bitreversed_ntt(
&worker,
omegas_inv,
&E::Fr::one()
)?;
let mon_form = PolynomialProxy::from_owned(mon_form);
monomial_storage.gate_selectors.insert(k, mon_form);
}
for (&k, v) in value_form_storage.setup_map.iter() {
let mon_form = v.as_ref().clone_padded_to_domain()?.ifft_using_bitreversed_ntt(
&worker,
omegas_inv,
&E::Fr::one()
)?;
let mon_form = PolynomialProxy::from_owned(mon_form);
monomial_storage.setup_map.insert(k, mon_form);
}
}
Ok(monomial_storage)
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pairing::Engine;
use crate::pairing::ff::PrimeField;
struct TestCircuit4<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for TestCircuit4<E> {
type MainGate = Width4MainGateWithDNext;
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| {
Ok(E::Fr::from_str("10").unwrap())
})?;
println!("A = {:?}", a);
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
println!("B = {:?}", b);
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
println!("C = {:?}", c);
let d = cs.alloc(|| {
Ok(E::Fr::from_str("100").unwrap())
})?;
println!("D = {:?}", d);
let one = E::Fr::one();
let mut two = one;
two.double();
let mut negative_one = one;
negative_one.negate();
// 2a - b = 0
let two_a = ArithmeticTerm::from_variable_and_coeff(a, two);
let minus_b = ArithmeticTerm::from_variable_and_coeff(b, negative_one);
let mut term = MainGateTerm::new();
term.add_assign(two_a);
term.add_assign(minus_b);
cs.allocate_main_gate(term)?;
// c - a*b == 0
let mut ab_term = ArithmeticTerm::from_variable(a).mul_by_variable(b);
ab_term.scale(&negative_one);
let c_term = ArithmeticTerm::from_variable(c);
let mut term = MainGateTerm::new();
term.add_assign(c_term);
term.add_assign(ab_term);
cs.allocate_main_gate(term)?;
// d - 100 == 0
let hundred = ArithmeticTerm::constant(E::Fr::from_str("100").unwrap());
let d_term = ArithmeticTerm::from_variable(d);
let mut term = MainGateTerm::new();
term.add_assign(d_term);
term.sub_assign(hundred);
cs.allocate_main_gate(term)?;
// let gamma = cs.alloc_input(|| {
// Ok(E::Fr::from_str("20").unwrap())
// })?;
let gamma = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
// gamma - b == 0
let gamma_term = ArithmeticTerm::from_variable(gamma);
let b_term = ArithmeticTerm::from_variable(b);
let mut term = MainGateTerm::new();
term.add_assign(gamma_term);
term.sub_assign(b_term);
cs.allocate_main_gate(term)?;
// 2a
let mut term = MainGateTerm::<E>::new();
term.add_assign(ArithmeticTerm::from_variable_and_coeff(a, two));
let dummy = CS::get_dummy_variable();
// 2a - d_next = 0
let (vars, mut coeffs) = CS::MainGate::format_term(term, dummy)?;
*coeffs.last_mut().unwrap() = negative_one;
// here d is equal = 2a, so we need to place b there
// and compensate it with -b somewhere before
cs.new_single_gate_for_trace_step(&CS::MainGate::default(),
&coeffs,
&vars,
&[]
)?;
let mut term = MainGateTerm::<E>::new();
term.add_assign(ArithmeticTerm::from_variable(b));
// b + 0 + 0 - b = 0
let (mut vars, mut coeffs) = CS::MainGate::format_term(term, dummy)?;
coeffs[3] = negative_one;
vars[3] = b;
cs.new_single_gate_for_trace_step(&CS::MainGate::default(),
&coeffs,
&vars,
&[]
)?;
Ok(())
}
}
struct TestCircuit4WithLookups<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for TestCircuit4WithLookups<E> {
type MainGate = Width4MainGateWithDNext;
fn declare_used_gates() -> Result<Vec<Box<dyn GateInternal<E>>>, SynthesisError> {
Ok(
vec![
Width4MainGateWithDNext::default().into_internal(),
TestBitGate::default().into_internal()
]
)
}
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let columns = vec![PolyIdentifier::VariablesPolynomial(0), PolyIdentifier::VariablesPolynomial(1), PolyIdentifier::VariablesPolynomial(2)];
let range_table = LookupTableApplication::new_range_table_of_width_3(2, columns.clone())?;
let range_table_name = range_table.functional_name();
let xor_table = LookupTableApplication::new_xor_table(2, columns.clone())?;
let xor_table_name = xor_table.functional_name();
let and_table = LookupTableApplication::new_and_table(2, columns)?;
let and_table_name = and_table.functional_name();
cs.add_table(range_table)?;
cs.add_table(xor_table)?;
cs.add_table(and_table)?;
let a = cs.alloc(|| {
Ok(E::Fr::from_str("10").unwrap())
})?;
println!("A = {:?}", a);
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
println!("B = {:?}", b);
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
println!("C = {:?}", c);
let d = cs.alloc(|| {
Ok(E::Fr::from_str("100").unwrap())
})?;
println!("D = {:?}", d);
let e = cs.alloc(|| {
Ok(E::Fr::from_str("2").unwrap())
})?;
let binary_x_value = E::Fr::from_str("3").unwrap();
let binary_y_value = E::Fr::from_str("1").unwrap();
let binary_x = cs.alloc(|| {
Ok(binary_x_value)
})?;
let binary_y = cs.alloc(|| {
Ok(binary_y_value)
})?;
let one = E::Fr::one();
let mut two = one;
two.double();
let mut negative_one = one;
negative_one.negate();
// 2a - b = 0
let two_a = ArithmeticTerm::from_variable_and_coeff(a, two);
let minus_b = ArithmeticTerm::from_variable_and_coeff(b, negative_one);
let mut term = MainGateTerm::new();
term.add_assign(two_a);
term.add_assign(minus_b);
cs.allocate_main_gate(term)?;
// c - a*b == 0
let mut ab_term = ArithmeticTerm::from_variable(a).mul_by_variable(b);
ab_term.scale(&negative_one);
let c_term = ArithmeticTerm::from_variable(c);
let mut term = MainGateTerm::new();
term.add_assign(c_term);
term.add_assign(ab_term);
cs.allocate_main_gate(term)?;
let dummy = CS::get_dummy_variable();
// and table (gate #2 zero enumerated)
{
let table = cs.get_table(&and_table_name)?;
let num_keys_and_values = table.width();
let and_result_value = table.query(&[binary_x_value, binary_y_value])?[0];
let binary_z = cs.alloc(|| {
Ok(and_result_value)
})?;
cs.begin_gates_batch_for_step()?;
let vars = [binary_x, binary_y, binary_z, dummy];
cs.allocate_variables_without_gate(
&vars,
&[]
)?;
cs.apply_single_lookup_gate(&vars[..num_keys_and_values], table)?;
cs.end_gates_batch_for_step()?;
}
// d - 100 == 0
let hundred = ArithmeticTerm::constant(E::Fr::from_str("100").unwrap());
let d_term = ArithmeticTerm::from_variable(d);
let mut term = MainGateTerm::new();
term.add_assign(d_term);
term.sub_assign(hundred);
cs.allocate_main_gate(term)?;
let var_zero = cs.get_explicit_zero()?;
// range table (gate #4 zero enumerated)
{
let table = cs.get_table(&range_table_name)?;
let num_keys_and_values = table.width();
cs.begin_gates_batch_for_step()?;
let mut term = MainGateTerm::<E>::new();
term.add_assign(ArithmeticTerm::from_variable_and_coeff(e, E::Fr::zero()));
term.add_assign(ArithmeticTerm::from_variable_and_coeff(var_zero, E::Fr::zero()));
term.add_assign(ArithmeticTerm::from_variable_and_coeff(var_zero, E::Fr::zero()));
let (vars, coeffs) = CS::MainGate::format_linear_term_with_duplicates(term, dummy)?;
cs.new_gate_in_batch(
&CS::MainGate::default(),
&coeffs,
&vars,
&[]
)?;
cs.apply_single_lookup_gate(&vars[..num_keys_and_values], table)?;
cs.end_gates_batch_for_step()?;
}
// xor table (gate #5 zero enumerated)
{
let table = cs.get_table(&xor_table_name)?;
let num_keys_and_values = table.width();
let xor_result_value = table.query(&[binary_x_value, binary_y_value])?[0];
let binary_z = cs.alloc(|| {
Ok(xor_result_value)
})?;
cs.begin_gates_batch_for_step()?;
let vars = [binary_x, binary_y, binary_z, dummy];
cs.allocate_variables_without_gate(
&vars,
&[]
)?;
cs.apply_single_lookup_gate(&vars[..num_keys_and_values], table)?;
cs.end_gates_batch_for_step()?;
}
let one = cs.get_explicit_one()?;
cs.new_single_gate_for_trace_step(
&TestBitGate::default(),
&[],
&[one],
&[],
)?;
Ok(())
}
}
struct TestCircuit4WithLookupsManyGatesSmallTable<E:Engine>{
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for TestCircuit4WithLookupsManyGatesSmallTable<E> {
type MainGate = Width4MainGateWithDNext;
fn declare_used_gates() -> Result<Vec<Box<dyn GateInternal<E>>>, SynthesisError> {
Ok(
vec![
Width4MainGateWithDNext::default().into_internal(),
]
)
}
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let columns = vec![PolyIdentifier::VariablesPolynomial(0), PolyIdentifier::VariablesPolynomial(1), PolyIdentifier::VariablesPolynomial(2)];
let range_table = LookupTableApplication::new_range_table_of_width_3(2, columns.clone())?;
let range_table_name = range_table.functional_name();
let xor_table = LookupTableApplication::new_xor_table(2, columns.clone())?;
let xor_table_name = xor_table.functional_name();
let and_table = LookupTableApplication::new_and_table(2, columns)?;
let and_table_name = and_table.functional_name();
cs.add_table(range_table)?;
cs.add_table(xor_table)?;
cs.add_table(and_table)?;
let a = cs.alloc(|| {
Ok(E::Fr::from_str("10").unwrap())
})?;
let b = cs.alloc(|| {
Ok(E::Fr::from_str("20").unwrap())
})?;
let c = cs.alloc(|| {
Ok(E::Fr::from_str("200").unwrap())
})?;
let binary_x_value = E::Fr::from_str("3").unwrap();
let binary_y_value = E::Fr::from_str("1").unwrap();
let binary_x = cs.alloc(|| {
Ok(binary_x_value)
})?;
let binary_y = cs.alloc(|| {
Ok(binary_y_value)
})?;
let mut negative_one = E::Fr::one();
negative_one.negate();
for _ in 0..((1 << 11) - 100) {
// c - a*b == 0
let mut ab_term = ArithmeticTerm::from_variable(a).mul_by_variable(b);
ab_term.scale(&negative_one);
let c_term = ArithmeticTerm::from_variable(c);
let mut term = MainGateTerm::new();
term.add_assign(c_term);
term.add_assign(ab_term);
cs.allocate_main_gate(term)?;
}
let dummy = CS::get_dummy_variable();
// and table
{
let table = cs.get_table(&and_table_name)?;
let num_keys_and_values = table.width();
let and_result_value = table.query(&[binary_x_value, binary_y_value])?[0];
let binary_z = cs.alloc(|| {
Ok(and_result_value)
})?;
cs.begin_gates_batch_for_step()?;
let vars = [binary_x, binary_y, binary_z, dummy];
cs.allocate_variables_without_gate(
&vars,
&[]
)?;
cs.apply_single_lookup_gate(&vars[..num_keys_and_values], table)?;
cs.end_gates_batch_for_step()?;
}
let var_zero = cs.get_explicit_zero()?;
// range table
{
let table = cs.get_table(&range_table_name)?;
let num_keys_and_values = table.width();
cs.begin_gates_batch_for_step()?;
let mut term = MainGateTerm::<E>::new();
term.add_assign(ArithmeticTerm::from_variable_and_coeff(binary_y, E::Fr::zero()));
term.add_assign(ArithmeticTerm::from_variable_and_coeff(var_zero, E::Fr::zero()));
term.add_assign(ArithmeticTerm::from_variable_and_coeff(var_zero, E::Fr::zero()));
let (vars, coeffs) = CS::MainGate::format_linear_term_with_duplicates(term, dummy)?;
cs.new_gate_in_batch(
&CS::MainGate::default(),
&coeffs,
&vars,
&[]
)?;
cs.apply_single_lookup_gate(&vars[..num_keys_and_values], table)?;
cs.end_gates_batch_for_step()?;
}
// xor table
{
let table = cs.get_table(&xor_table_name)?;
let num_keys_and_values = table.width();
let xor_result_value = table.query(&[binary_x_value, binary_y_value])?[0];
let binary_z = cs.alloc(|| {
Ok(xor_result_value)
})?;
cs.begin_gates_batch_for_step()?;
let vars = [binary_x, binary_y, binary_z, dummy];
cs.allocate_variables_without_gate(
&vars,
&[]
)?;
cs.apply_single_lookup_gate(&vars[..num_keys_and_values], table)?;
cs.end_gates_batch_for_step()?;
}
Ok(())
}
}
#[test]
fn test_trivial_circuit_with_gate_agnostic_cs() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::worker::Worker;
let mut assembly = TrivialAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNext>::new();
let circuit = TestCircuit4::<Bn256> {
_marker: PhantomData
};
circuit.synthesize(&mut assembly).expect("must work");
assert!(assembly.gates.len() == 1);
// println!("Assembly state polys = {:?}", assembly.storage.state_map);
// println!("Assembly setup polys = {:?}", assembly.storage.setup_map);
println!("Assembly contains {} gates", assembly.n());
assembly.finalize();
assert!(assembly.is_satisfied());
assembly.finalize();
let worker = Worker::new();
let (_storage, _permutation_polys) = assembly.perform_setup(&worker).unwrap();
}
#[test]
fn test_setup_and_prove_custom_gate_and_tables() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::worker::Worker;
use crate::plonk::better_better_cs::verifier::*;
use crate::plonk::better_better_cs::setup::VerificationKey;
let mut assembly = SetupAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNext>::new();
let circuit = TestCircuit4WithLookups::<Bn256> {
_marker: PhantomData
};
circuit.synthesize(&mut assembly).expect("must work");
println!("Assembly contains {} gates", assembly.n());
assert!(assembly.is_satisfied());
assembly.finalize();
println!("Finalized assembly contains {} gates", assembly.n());
let worker = Worker::new();
let setup = assembly.create_setup::<TestCircuit4WithLookups<Bn256>>(&worker).unwrap();
let mut assembly = ProvingAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNext>::new();
circuit.synthesize(&mut assembly).expect("must work");
assembly.finalize();
let size = assembly.n().next_power_of_two();
use crate::plonk::commitments::transcript::keccak_transcript::RollingKeccakTranscript;
use crate::kate_commitment::*;
let crs_mons = Crs::<Bn256, CrsForMonomialForm>::crs_42(size, &worker);
let proof = assembly.create_proof::<TestCircuit4WithLookups<Bn256>, RollingKeccakTranscript<Fr>>(
&worker,
&setup,
&crs_mons,
None
).unwrap();
let vk = VerificationKey::from_setup(&setup, &worker, &crs_mons).unwrap();
let valid = verify::<Bn256, TestCircuit4WithLookups<Bn256>, RollingKeccakTranscript<Fr>>(
&vk,
&proof,
None,
).unwrap();
assert!(valid);
println!("Done!");
}
#[test]
fn test_setup_and_prove_single_gate_and_tables() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::worker::Worker;
use crate::plonk::better_better_cs::verifier::*;
use crate::plonk::better_better_cs::setup::VerificationKey;
let mut assembly = SetupAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNext>::new();
let circuit = TestCircuit4WithLookupsManyGatesSmallTable::<Bn256> {
_marker: PhantomData
};
circuit.synthesize(&mut assembly).expect("must work");
println!("Assembly contains {} gates", assembly.n());
assert!(assembly.is_satisfied());
assembly.finalize();
println!("Finalized assembly contains {} gates", assembly.n());
let worker = Worker::new();
let setup = assembly.create_setup::<TestCircuit4WithLookupsManyGatesSmallTable<Bn256>>(&worker).unwrap();
let mut assembly = ProvingAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNext>::new();
circuit.synthesize(&mut assembly).expect("must work");
assembly.finalize();
let size = assembly.n().next_power_of_two();
use crate::plonk::commitments::transcript::keccak_transcript::RollingKeccakTranscript;
use crate::kate_commitment::*;
let crs_mons = Crs::<Bn256, CrsForMonomialForm>::crs_42(size, &worker);
let proof = assembly.create_proof::<TestCircuit4WithLookupsManyGatesSmallTable<Bn256>, RollingKeccakTranscript<Fr>>(
&worker,
&setup,
&crs_mons,
None
).unwrap();
let vk = VerificationKey::from_setup(&setup, &worker, &crs_mons).unwrap();
let valid = verify::<Bn256, TestCircuit4WithLookupsManyGatesSmallTable<Bn256>, RollingKeccakTranscript<Fr>>(
&vk,
&proof,
None,
).unwrap();
assert!(valid);
}
#[test]
fn test_bench_long_synthesis() {
use crate::pairing::bn256::{Bn256, Fr};
use crate::worker::Worker;
use crate::plonk::better_better_cs::verifier::*;
use crate::plonk::better_better_cs::setup::VerificationKey;
let mut assembly = TrivialAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNext>::new();
let circuit = TestCircuit4::<Bn256> {
_marker: PhantomData
};
circuit.synthesize(&mut assembly).expect("must work");
dbg!(&assembly.n());
assembly.finalize_to_size_log_2(26);
}
#[derive(Clone, Debug, Hash, Default)]
pub struct TestBitGate;
impl<E: Engine> GateInternal<E> for TestBitGate {
fn name(&self) -> &'static str {
"Test bit gate on A"
}
fn degree(&self) -> usize {
2
}
fn can_include_public_inputs(&self) -> bool {
false
}
fn all_queried_polynomials(&self) -> &'static [PolynomialInConstraint] {
const A: [PolynomialInConstraint; 1] = [
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
];
&A
}
fn setup_polynomials(&self) -> &'static [PolyIdentifier] {
&[]
}
fn variable_polynomials(&self) -> &'static [PolyIdentifier] {
const A: [PolyIdentifier; 1] = [
PolyIdentifier::VariablesPolynomial(0),
];
&A
}
fn benefits_from_linearization(&self) -> bool {
false
}
fn linearizes_over(&self) -> &'static [PolynomialInConstraint] {
&[]
}
fn needs_opened_for_linearization(&self) -> &'static [PolynomialInConstraint] {
&[]
}
fn num_quotient_terms(&self) -> usize {
1
}
fn verify_on_row<'a>(&self, row: usize, poly_storage: &AssembledPolynomialStorage<'a, E>, _last_row: bool) -> E::Fr {
let q_a = poly_storage.get_poly_at_step(PolyIdentifier::VariablesPolynomial(0), row);
// (A - 1) * A
let mut tmp = q_a;
tmp.sub_assign(&E::Fr::one());
tmp.mul_assign(&q_a);
tmp
}
fn contribute_into_quotient<'a, 'b>(
&self,
domain_size: usize,
poly_storage: &mut AssembledPolynomialStorage<'a, E>,
monomials_storage: & AssembledPolynomialStorageForMonomialForms<'b, E>,
challenges: &[E::Fr],
omegas_bitreversed: &BitReversedOmegas<E::Fr>,
_omegas_inv_bitreversed: &OmegasInvBitreversed<E::Fr>,
worker: &Worker
) -> Result<Polynomial<E::Fr, Values>, SynthesisError> {
assert!(domain_size.is_power_of_two());
assert_eq!(challenges.len(), <Self as GateInternal<E>>::num_quotient_terms(&self));
let lde_factor = poly_storage.lde_factor;
assert!(lde_factor.is_power_of_two());
assert!(poly_storage.is_bitreversed);
let coset_factor = E::Fr::multiplicative_generator();
for &p in <Self as GateInternal<E>>::all_queried_polynomials(&self).into_iter() {
ensure_in_map_or_create(&worker,
p,
domain_size,
omegas_bitreversed,
lde_factor,
coset_factor,
monomials_storage,
poly_storage
)?;
}
let ldes_storage = &*poly_storage;
// (A - 1) * A
let a_ref = get_from_map_unchecked(
PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)),
ldes_storage
);
let mut tmp = a_ref.clone();
drop(a_ref);
let one = E::Fr::one();
tmp.map(&worker,
|el| {
let mut tmp = *el;
tmp.sub_assign(&one);
tmp.mul_assign(&*el);
*el = tmp;
},
);
tmp.scale(&worker, challenges[0]);
Ok(tmp)
}
fn contribute_into_linearization<'a>(
&self,
_domain_size: usize,
_at: E::Fr,
_queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
_monomials_storage: & AssembledPolynomialStorageForMonomialForms<'a, E>,
_challenges: &[E::Fr],
_worker: &Worker
) -> Result<Polynomial<E::Fr, Coefficients>, SynthesisError> {
unreachable!("this gate does not contribute into linearization");
}
fn contribute_into_verification_equation(
&self,
_domain_size: usize,
_at: E::Fr,
queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
challenges: &[E::Fr],
) -> Result<E::Fr, SynthesisError> {
assert_eq!(challenges.len(), 1);
// (A-1) * A
let a_value = *queried_values.get(&PolynomialInConstraint::from_id(PolyIdentifier::VariablesPolynomial(0)))
.ok_or(SynthesisError::AssignmentMissing)?;
let mut result = a_value;
result.sub_assign(&E::Fr::one());
result.mul_assign(&a_value);
result.mul_assign(&challenges[0]);
Ok(result)
}
fn put_public_inputs_into_selector_id(&self) -> Option<usize> {
None
}
fn box_clone(&self) -> Box<dyn GateInternal<E>> {
Box::from(self.clone())
}
fn contribute_into_linearization_commitment(
&self,
_domain_size: usize,
_at: E::Fr,
_queried_values: &std::collections::HashMap<PolynomialInConstraint, E::Fr>,
_commitments_storage: &std::collections::HashMap<PolyIdentifier, E::G1Affine>,
_challenges: &[E::Fr],
) -> Result<E::G1, SynthesisError> {
unreachable!("this gate does not contribute into linearization");
}
}
impl<E: Engine> Gate<E> for TestBitGate {}
}
<file_sep>/src/plonk/better_better_cs/gadgets/rescue.rs
use crate::pairing::{
Engine,
};
use crate::pairing::ff::{
Field,
PrimeField,
PrimeFieldRepr,
BitIterator
};
use crate::{
SynthesisError,
};
use crate::plonk::better_better_cs::cs::{
Variable,
ConstraintSystem,
ArithmeticTerm,
MainGateTerm,
Width4MainGateWithDNextEquation,
MainGateEquation,
GateEquationInternal,
GateEquation,
LinearCombinationOfTerms,
PolynomialMultiplicativeTerm,
PolynomialInConstraint,
TimeDilation
};
use super::assignment::{
Assignment
};
use super::num::{AllocatedNum};
use crate::plonk::better_better_cs::cs::PlonkConstraintSystemParams;
pub struct Rescue125<E: Engine> {
_marker: std::marker::PhantomData<E>
}
enum RescueStateSimplifier<E: Engine> {
Number(AllocatedNum<E>),
Constant(E::Fr)
}
#[derive(Clone, Debug, Hash)]
pub struct Rescue5CustomGate(pub [LinearCombinationOfTerms; 3]);
impl<E: Engine> GateEquationInternal<E> for Rescue5CustomGate {
fn degree(&self) -> usize {
2
}
fn num_constraints(&self) -> usize {
3
}
fn get_constraint(&self) -> &LinearCombinationOfTerms<E> {
unreachable!("must not try to access single constraint of Rescue alpha 5 gate");
}
fn get_constraints(&self) -> &[LinearCombinationOfTerms<E>] {
&self.0[..]
}
}
impl GateEquation for Rescue5CustomGate {
// Width4MainGateWithDNextEquation is NOT generic, so this is fine
// and safe since it's sync!
fn static_description() -> &'static Self {
static mut VALUE: Option<Rescue5CustomGate> = None;
static INIT: std::sync::Once = std::sync::Once::new();
unsafe {
INIT.call_once(||{
VALUE = Some(Rescue5CustomGate::default());
});
VALUE.as_ref().unwrap()
}
}
}
impl std::default::Default for Rescue5CustomGate {
fn default() -> Self {
Self::get_equation()
}
}
impl Rescue5CustomGate {
pub fn get_equation() -> Self {
let mut term_square: Vec<PolynomialMultiplicativeTerm> = Vec::with_capacity(2);
// constant
term_square.push(
PolynomialMultiplicativeTerm(
vec![
PolynomialInConstraint::VariablesPolynomial(0, TimeDilation(0)),
PolynomialInConstraint::VariablesPolynomial(0, TimeDilation(0))
]
)
);
term_square.push(
PolynomialMultiplicativeTerm(
vec![
PolynomialInConstraint::VariablesPolynomial(1, TimeDilation(0))
]
)
);
let mut term_quad: Vec<PolynomialMultiplicativeTerm> = Vec::with_capacity(2);
// constant
term_quad.push(
PolynomialMultiplicativeTerm(
vec![
PolynomialInConstraint::VariablesPolynomial(1, TimeDilation(0)),
PolynomialInConstraint::VariablesPolynomial(1, TimeDilation(0))
]
)
);
term_quad.push(
PolynomialMultiplicativeTerm(
vec![
PolynomialInConstraint::VariablesPolynomial(2, TimeDilation(0))
]
)
);
let mut term_fifth: Vec<PolynomialMultiplicativeTerm> = Vec::with_capacity(2);
// constant
term_fifth.push(
PolynomialMultiplicativeTerm(
vec![
PolynomialInConstraint::VariablesPolynomial(0, TimeDilation(0)),
PolynomialInConstraint::VariablesPolynomial(2, TimeDilation(0))
]
)
);
term_fifth.push(
PolynomialMultiplicativeTerm(
vec![
PolynomialInConstraint::VariablesPolynomial(3, TimeDilation(0))
]
)
);
Self([
LinearCombinationOfTerms(term_square),
LinearCombinationOfTerms(term_quad),
LinearCombinationOfTerms(term_fifth)])
}
}
impl<E: Engine> Rescue125<E> {
const RATE: usize = 2;
const CAPACITY: usize = 1;
const STATE_WIDTH: usize = Self::RATE + Self::CAPACITY;
const ALPHA: u64 = 5;
const NUM_ROUNDS_DOUBLED: usize = 44;
pub fn hash<CS: ConstraintSystem<E, MainGate = Width4MainGateWithDNextEquation>>(
cs: &mut CS,
input: &[AllocatedNum<E>]
) -> Result<AllocatedNum<E>, SynthesisError> {
assert_eq!(input.len(), Self::RATE);
assert_eq!(CS::Params::STATE_WIDTH, 4);
assert!(CS::Params::CAN_ACCESS_NEXT_TRACE_STEP);
let one = E::Fr::one();
let zero = E::Fr::zero();
let mut minus_one = one;
minus_one.negate();
let mut round_constant_placeholder = E::Fr::one();
round_constant_placeholder.double();
let dummy_var = CS::get_dummy_variable();
// with first sbox
// (input[0] + round_constant[0]) ^ [alpha_inv] -> state[0]
// (input[1] + round_constant[1]) ^ [alpha_inv] -> state[1]
// round_constant[2] ^ [alpha_inv] -> state[2]
// but we proof otherwise
// state[0] ^ 5 = (input[0] + round_constant[0])
// state[1] ^ 5 = (input[1] + round_constant[1])
// state[2] = constant
// with state width of 4 we can have
// a = state[0]
// b = state[0] ^ 2
// c = state[0] ^ 4 = b^2
// d = c * a = state[0]^5
// and BEFORE it we have a gate
// input[0] + round_constant[0] - d_next = 0
let state_0 = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
let state_0_squared = state_0.clone();
let state_0_quad = state_0.clone();
let state_0_fifth = state_0.clone();
let state_1 = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
let state_1_squared = state_1.clone();
let state_1_quad = state_1.clone();
let state_1_fifth = state_1.clone();
// input[0] + round_constant[0] - state[0]^5 = 0
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[one, minus_one, zero, zero, zero, round_constant_placeholder, zero],
&[input[0].get_variable(), state_0_fifth.get_variable(), dummy_var, dummy_var],
&[]
)?;
// now it's time for a custom gate
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_0.get_variable(), state_0_squared.get_variable(), state_0_quad.get_variable(), state_0_fifth.get_variable()],
&[]
)?;
// same for input[1]
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[one, minus_one, zero, zero, zero, round_constant_placeholder, zero],
&[input[1].get_variable(), state_0_fifth.get_variable(), dummy_var, dummy_var],
&[]
)?;
// now it's time for a custom gate
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_1.get_variable(), state_1_squared.get_variable(), state_1_quad.get_variable(), state_1_fifth.get_variable()],
&[]
)?;
// now apply MDS, add constants and make next sbox manually
// state_0 * coeff + state_1 * coeff + constant + round_constant
//
let mut state_0_after_mds_and_round_const = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[round_constant_placeholder, round_constant_placeholder, minus_one, zero, round_constant_placeholder, zero, zero],
&[state_0.get_variable(), state_1.get_variable(), state_0_after_mds_and_round_const.get_variable(), dummy_var],
&[]
)?;
let mut state_1_after_mds_and_round_const = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[round_constant_placeholder, round_constant_placeholder, minus_one, zero, round_constant_placeholder, zero, zero],
&[state_0.get_variable(), state_1.get_variable(), state_1_after_mds_and_round_const.get_variable(), dummy_var],
&[]
)?;
let mut state_2_after_mds_and_round_const = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[round_constant_placeholder, round_constant_placeholder, minus_one, zero, round_constant_placeholder, zero, zero],
&[state_0.get_variable(), state_1.get_variable(), state_2_after_mds_and_round_const.get_variable(), dummy_var],
&[]
)?;
for round in 1..Self::NUM_ROUNDS_DOUBLED {
let (state_0, state_1, state_2) = if round & 1 == 0 {
let state_0 = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
let state_0_squared = state_0.clone();
let state_0_quad = state_0.clone();
let state_1 = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
let state_1_squared = state_1.clone();
let state_1_quad = state_1.clone();
let state_2 = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
let state_2_squared = state_2.clone();
let state_2_quad = state_2.clone();
// now it's time for a custom gates cause we already had round constant added
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_0.get_variable(), state_0_squared.get_variable(), state_0_quad.get_variable(), state_0_after_mds_and_round_const.get_variable()],
&[]
)?;
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_1.get_variable(), state_1_squared.get_variable(), state_1_quad.get_variable(), state_1_after_mds_and_round_const.get_variable()],
&[]
)?;
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_2.get_variable(), state_2_squared.get_variable(), state_2_quad.get_variable(), state_2_after_mds_and_round_const.get_variable()],
&[]
)?;
(state_0, state_1, state_2)
} else {
// this s-box is just making into 5th power
let state_0_squared = state_0_after_mds_and_round_const.clone();
let state_0_quad = state_0_after_mds_and_round_const.clone();
let state_0_fifth = state_0_after_mds_and_round_const.clone();
let state_1_squared = state_1_after_mds_and_round_const.clone();
let state_1_quad = state_1_after_mds_and_round_const.clone();
let state_1_fifth = state_1_after_mds_and_round_const.clone();
let state_2_squared = state_2_after_mds_and_round_const.clone();
let state_2_quad = state_2_after_mds_and_round_const.clone();
let state_2_fifth = state_2_after_mds_and_round_const.clone();
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_0_after_mds_and_round_const.get_variable(), state_0_squared.get_variable(), state_0_quad.get_variable(), state_0_fifth.get_variable()],
&[]
)?;
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_1_after_mds_and_round_const.get_variable(), state_1_squared.get_variable(), state_1_quad.get_variable(), state_1_fifth.get_variable()],
&[]
)?;
cs.new_single_gate_for_trace_step(
Rescue5CustomGate::static_description(),
&[],
&[state_2_after_mds_and_round_const.get_variable(), state_2_squared.get_variable(), state_2_quad.get_variable(), state_2_fifth.get_variable()],
&[]
)?;
(state_0_fifth, state_1_fifth, state_2_fifth)
};
// mds and constant
state_0_after_mds_and_round_const = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[round_constant_placeholder, round_constant_placeholder, round_constant_placeholder, minus_one, round_constant_placeholder, zero, zero],
&[state_0.get_variable(), state_1.get_variable(), state_2.get_variable(), state_0_after_mds_and_round_const.get_variable()],
&[]
)?;
state_1_after_mds_and_round_const = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[round_constant_placeholder, round_constant_placeholder, round_constant_placeholder, minus_one, round_constant_placeholder, zero, zero],
&[state_0.get_variable(), state_1.get_variable(), state_2.get_variable(), state_1_after_mds_and_round_const.get_variable()],
&[]
)?;
state_2_after_mds_and_round_const = AllocatedNum::<E>::alloc(
cs,
|| {
Ok(E::Fr::one())
}
)?;
cs.new_single_gate_for_trace_step(
Width4MainGateWithDNextEquation::static_description(),
&[round_constant_placeholder, round_constant_placeholder, round_constant_placeholder, minus_one, round_constant_placeholder, zero, zero],
&[state_0.get_variable(), state_1.get_variable(), state_2.get_variable(), state_2_after_mds_and_round_const.get_variable()],
&[]
)?;
}
Ok(state_0_after_mds_and_round_const)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::plonk::better_better_cs::cs::*;
#[test]
fn test_trivial_circuit_with_gate_agnostic_cs() {
use crate::pairing::bn256::{Bn256, Fr};
let mut assembly = TrivialAssembly::<Bn256, PlonkCsWidth4WithNextStepParams, Width4MainGateWithDNextEquation>::new();
let before = assembly.n;
let input_0 = AllocatedNum::alloc(
&mut assembly,
|| {
Ok(Fr::one())
}
).unwrap();
let input_1 = AllocatedNum::alloc(
&mut assembly,
|| {
Ok(Fr::one())
}
).unwrap();
let _ = Rescue125::hash(&mut assembly, &vec![input_0, input_1]).unwrap();
assert!(assembly.constraints.len() == 2);
let num_gates = assembly.n - before;
println!("Single rescue r = 2, c = 1, alpha = 5 invocation takes {} gates", num_gates);
for (gate, density) in assembly.gate_density.0.into_iter() {
println!("Custom gate {:?} selector = {:?}", gate, density);
}
// println!("Assembly state polys = {:?}", assembly.storage.state_map);
// println!("Assembly setup polys = {:?}", assembly.storage.setup_map);
}
}<file_sep>/src/plonk/better_better_cs/redshift/multioracle.rs
use crate::pairing::{Engine};
use crate::pairing::ff::{PrimeField, PrimeFieldRepr};
use crate::worker::Worker;
use crate::plonk::commitments::transparent::utils::log2_floor;
use super::*;
use super::tree_hash::*;
use super::binary_tree::{BinaryTree, BinaryTreeParams};
use crate::plonk::polynomials::*;
pub struct Multioracle<'a, E: Engine, H: BinaryTreeHasher<E::Fr> > {
pub polynomial_values_refs: Vec<&'a [E::Fr]>,
pub tree: BinaryTree<E, H>
}
impl<'a, E: Engine, H: BinaryTreeHasher<E::Fr>> Multioracle<'a, E, H> {
pub fn combine_leafs<'p>(
polynomials: &'p [Polynomial<E::Fr, Values>],
num_values_from_one_poly_into_leaf: usize,
worker: &Worker
) -> Vec<Vec<&'p [E::Fr]>> {
let num_polys = polynomials.len();
let num_leafs = polynomials[0].size() / num_values_from_one_poly_into_leaf;
println!("{} leafs total", num_leafs);
assert!(num_leafs.is_power_of_two());
// we need vector (over leafs)
// of vectors(over individual polys)
// of references
let mut leaf_refs_combined: Vec<Vec<&[E::Fr]>> = vec![vec![&[]; num_polys]; num_leafs];
let poly_refs: Vec<_> = polynomials.iter().map(|el| el.as_ref()).collect();
let poly_refs_ref = &poly_refs;
println!("Start combining leafs");
worker.scope(leaf_refs_combined.len(), |scope, chunk| {
for (i, lh) in leaf_refs_combined.chunks_mut(chunk)
.enumerate() {
scope.spawn(move |_| {
// we take `values_per_leaf` values from each of the polynomial
// and push them into the conbinations
let base_idx = i*chunk;
for (j, lh) in lh.iter_mut().enumerate() {
let idx = base_idx + j;
let start = idx * num_values_from_one_poly_into_leaf;
let end = start + num_values_from_one_poly_into_leaf;
for (idx, &poly_values) in poly_refs_ref.iter().enumerate() {
let slice = &poly_values[start..end];
lh[idx] = slice;
}
}
});
}
});
println!("Done combining leafs");
leaf_refs_combined
}
pub fn new_from_polynomials(
polynomials: &'a [Polynomial<E::Fr, Values>],
tree_hasher: H,
num_values_from_one_poly_into_leaf: usize,
worker: &Worker
) -> Self {
// first make combinations of leaf values
// expect polynomials to be in bitreverse enumeration
let num_polys = polynomials.len();
let values_per_leaf = num_polys * num_values_from_one_poly_into_leaf;
println!("Placing {} values into single leaf", values_per_leaf);
let tree_params = BinaryTreeParams {
values_per_leaf: values_per_leaf
};
// we need vector (over leafs)
// of vectors(over individual polys)
// of references
let leaf_refs_combined = Self::combine_leafs(
polynomials,
num_values_from_one_poly_into_leaf,
&worker
);
println!("Start making a tree");
let tree = BinaryTree::create_from_combined_leafs(
&leaf_refs_combined,
num_polys,
tree_hasher,
&tree_params
);
println!("Done making a tree");
let poly_refs: Vec<_> = polynomials.iter().map(|el| el.as_ref()).collect();
Self {
polynomial_values_refs: poly_refs,
tree
}
}
} | 143ff9af3510738691619a23e44609b28a0793b1 | [
"Markdown",
"Rust",
"Shell"
] | 109 | Rust | matter-labs/bellman | bbac0559fdc440b2331eca1c347a30559a3dd969 | e681ee195d1963be71bf85c764b2d6849c4a49e1 |
refs/heads/master | <repo_name>HouPoc/CS331_Assignment<file_sep>/Assignment_3/main.py
import sys
from preprocess import *
from classifier import *
def main():
train = str(sys.argv[1])
preprocessed_train_file = str(sys.argv[2])
test = str(sys.argv[3])
preprocessed_test_file = str(sys.argv[4])
# generate bag of words by train data
vocabulary = sorted(get_vocabulary(train))
# preprocess data to preprocessed file
preprocessed_train = preprocess(vocabulary, train, preprocessed_train_file)
preprocessed_test = preprocess(vocabulary, test, preprocessed_test_file)
# use Naive Bayes
train_accuracy = naive_bayes(preprocessed_train, preprocessed_train)
test_accuracy = naive_bayes(preprocessed_train, preprocessed_test)
fo = open("results.txt", "wb")
fo.write("Training Data: " + preprocessed_train_file)
fo.write(" Testing Data: " + preprocessed_train_file + '\n')
fo.write("Accuracy: " + train_accuracy + '\n')
fo.write("Training Data: " + preprocessed_train_file)
fo.write(" Testing Data: " + preprocessed_test_file + '\n')
fo.write("Accuracy: " + test_accuracy + '\n')
fo.close()
if __name__ == '__main__':
main()
<file_sep>/Assignment_3/classifier.py
from preprocess import *
from math import log
import numpy
######
# Purpose: Implement Naive Bayes algorithm
# Return: Accuracy of this data set
######
def naive_bayes(train_data, test_data):
P_class = []
distribution = []
training_phase(train_data, distribution, P_class)
return testing_phase(test_data, distribution, P_class)
######
# Purpose: Calculate distribution of train data
# Return: None
######
def training_phase(matrix, distribution, P_class):
num_C1 = 0.
for i in matrix:
if i[-1] == 1:
num_C1 += 1.
num_C0 = len(matrix) - num_C1
P_class.append(num_C0 / len(matrix)) # P(C=0)
P_class.append(num_C1 / len(matrix)) # P(C=1)
size = len(matrix[0]) - 1
for i in range(size):
num_A0C0 = 0.
num_A1C0 = 0.
num_A0C1 = 0.
num_A1C1 = 0.
result = []
for row in matrix:
if row[-1] == 0:
if row[i] == 0:
num_A0C0 += 1.
else:
num_A1C0 += 1.
else:
if row[i] == 0:
num_A0C1 += 1.
else:
num_A1C1 += 1.
# Dirichlet Priors
result.append((num_A0C0 + 1.)/(num_C0 + 2.)) # P(A=0|C=0)
result.append((num_A1C0 + 1.)/(num_C0 + 2.)) # P(A=1|C=0)
result.append((num_A0C1 + 1.)/(num_C1 + 2.)) # P(A=0|C=1)
result.append((num_A1C1 + 1.)/(num_C1 + 2.)) # P(A=1|C=1)
distribution.append(result)
######
# Purpose: Predict classlabel of each instance in test data according to distribution
# Return: Accuracy of this data set
######
def testing_phase(data, distribution, P_class):
answer_class_label = []
test_class_label = []
correct = 0.
for features in data:
PC0 = 0.
PC1 = 0.
for i in range(len(features)-1):
if (features[i] == 1):
PC0 = PC0 + log(distribution[i][1])
PC1 = PC1 + log(distribution[i][3])
else:
PC0 = PC0 + log(distribution[i][0])
PC1 = PC1 + log(distribution[i][2])
PC0 = PC0 + log(P_class[0])
PC1 = PC1 + log(P_class[1])
if PC0 > PC1:
test_class_label = 0
else:
test_class_label = 1
if features[-1] == test_class_label:
correct += 1.
return "{0:.2f}%".format(correct / len(data) * 100)<file_sep>/Assignment_1/refer.py
import sys
import operator
class state_node ():
def __init__(self, left = None, right = None, parent = None, f_value = None):
self.left_bank = left
self.right_bank = right
self.parent = parent
self.f_value = f_value
def goal_test(current, goal):
for i in range(3):
if current.left_bank[i] != goal.left_bank[i] or current.right_bank[i] != goal.right_bank[i]:
return False
return True
def check_balance(action):
if action.left_bank[0] >= 0 and action.left_bank[1] >= 0 and action.right_bank[0] >= 0 and action.right_bank[1] >= 0:
if (action.left_bank[0] >= action.left_bank[1] or action.left_bank[0]==0) and (action.right_bank[0] >= action.right_bank[1] or action.right_bank[0]==0):
return True
return False
def child_node(current):
children = []
m_l = current.left_bank[0]
c_l = current.left_bank[1]
b_l = current.left_bank[2]
m_r = current.right_bank[0]
c_r = current.right_bank[1]
b_r = current.right_bank[2]
if b_l == 1:
# Put one missionary in the boat
new_child = state_node ([m_l-1,c_l,b_l-1], [m_r+1,c_r,b_r+1], current)
if check_balance(new_child):
children.append(new_child)
# Put two missionaries in the boat
new_child = state_node ([m_l-2,c_l,b_l-1], [m_r+2,c_r,b_r+1], current)
if check_balance(new_child):
children.append(new_child)
# Put one cannibal in the boat
new_child = state_node ([m_l,c_l-1,b_l-1], [m_r,c_r+1,b_r+1], current)
if check_balance(new_child):
children.append(new_child)
# Put one cannibal and one missionary in the boat
new_child = state_node ([m_l-1,c_l-1,b_l-1], [m_r+1,c_r+1,b_r+1], current)
if check_balance(new_child):
children.append(new_child)
# Put two cannibals in the boat
new_child = state_node ([m_l,c_l-2,b_l-1], [m_r,c_r+2,b_r+1], current)
if check_balance(new_child):
children.append(new_child)
else:
# Put one missionary in the boat
new_child = state_node([m_l+1,c_l,b_l+1], [m_r-1,c_r,b_r-1], current)
if check_balance(new_child):
children.append(new_child)
# Put two missionaries in the boat
new_child = state_node ([m_l+2,c_l,b_l+1], [m_r-2,c_r,b_r-1], current)
if check_balance(new_child):
children.append(new_child)
# Put one cannibal in the boat
new_child = state_node ([m_l,c_l+1,b_l+1], [m_r,c_r-1,b_r-1], current)
if check_balance(new_child):
children.append(new_child)
# Put one cannibal and one missionary in the boat
new_child = state_node([m_l+1,c_l+1,b_l+1], [m_r-1,c_r-1,b_r-1], current)
if check_balance(new_child):
children.append(new_child)
# Put two cannibals in the boat
new_child = state_node([m_l,c_l+2,b_l+1], [m_r,c_r-2,b_r-1], current)
if check_balance(new_child):
children.append(new_child)
return children
def read_source (file):
f = open(file,'r')
state = []
for line in f:
if line is not None:
state.append(map(int, line.split(',')))
return state_node(state[0],state[1])
def is_not_in (target, collection):
for element in collection:
if goal_test (target, element):
return False
return True
def path (terminate_state):
node = terminate_state
route = []
while node is not None:
route.insert(0,(node.left_bank, node.right_bank))
node = node.parent
return route
def BFS(initial, goal, expand):
frontier = [initial]
explored = {}
while len(frontier) != 0:
current = frontier.pop(0)
explored[create_key(current)] = current
expand.append(current)
child_nodes = child_node(current)
for child in child_nodes:
if goal_test(child, goal):
return child
key = create_key(child)
if is_not_in(child, frontier) and (not explored.has_key(key)):
frontier.append(child)
return False
def DFS(initial, goal, expand):
frontier = [initial]
explored = {}
while len(frontier) != 0:
current = frontier.pop(0)
if goal_test(current, goal):
return current
explored[create_key(current)] = current
expand.append(current)
child_nodes = child_node(current)
i = 0
for child in child_nodes:
key = create_key(child)
if is_not_in(child, frontier) and (not explored.has_key(key)):
frontier.insert(i, child)
i += 1
return False
def IDDFS(initial, goal, expand):
for depth in range(100000):
explored = {}
frontier = [initial]
result = R_DLS(initial, goal, depth, explored, expand, frontier)
if result != 'cutoff':
return result
def R_DLS(current, goal, limit, explored, expand, frontier):
temp = []
key = create_key(current)
explored[key] = current
if len(frontier) != 0:
frontier.pop(0)
if goal_test(current, goal):
return current
elif limit == 0:
return 'cutoff'
else:
cutoff_occurred = False
expand.append(current)
child_nodes = child_node(current)
i = 0
for child in child_nodes:
key = create_key(child)
if is_not_in(child, frontier) and (not explored.has_key(key)):
frontier.insert(i, child)
temp.insert(i, child)
i += 1
for child in temp:
result = R_DLS(child, goal, limit - 1, explored, expand, frontier)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
def A_STAR(initial, goal, expand):
explored = {}
initial.f_value = 0 + heuristic(initial, goal)
frontier = [initial]
while len(frontier) != 0:
current = frontier.pop(0)
if goal_test(current, goal):
return current
expand.append(current)
explored[create_key(current)] = current
child_nodes = child_node(current)
for child in child_nodes:
key = create_key(child)
if is_not_in(child, frontier) and (not explored.has_key(key)):
store_priority_list(child, initial, goal, frontier)
return False
def store_priority_list(node, initial, goal, frontier):
node.f_value = path_cost(node) + heuristic(node, goal)
frontier.append(node)
frontier.sort(key = operator.attrgetter('f_value'))
def heuristic(node, goal):
return (goal.left_bank[0] - node.left_bank[0]) + (goal.left_bank[1] - node.left_bank[1])
def path_cost(node):
return len(path(node)) - 1
def create_key(node):
return str(node.left_bank[0]) + str(node.left_bank[1]) + str(node.left_bank[2]) + str(node.right_bank[0]) + str(node.right_bank[1]) + str(node.right_bank[2])
def out_solution (file, path, expand):
f = open(file, 'w')
if len(path) != 0:
for state in path:
f.write('Left Bank: ' + str(state[0][0]) + ' missionaries, ' + str(state[0][1]) + ' cannibal, '+ str(state[0][2]) + ' boat ' + ' Right Bank: ' + str(state[1][0]) + ' missionaries, ' + str(state[1][1]) + ' cannibals, ' + str(state[1][2]) + ' boat\n')
f.write('The number of nodes on solution is ' + str(len(path)) + '\n')
else:
f.write('No solution found\n')
f.write('The number of nodes expanded is ' + str(expand) + '\n')
f.close()
<file_sep>/Assignment_1/makefile
1_1:
python main.py start1.txt goal1.txt bfs 1_1sl.txt
1_2:
python main.py start2.txt goal2.txt bfs 1_2sl.txt
1_3:
python main.py start3.txt goal3.txt bfs 1_3sl.txt
2_1:
python main.py start1.txt goal1.txt dfs 2_1sl.txt
2_2:
python main.py start2.txt goal2.txt dfs 2_2sl.txt
2_3:
python main.py start3.txt goal3.txt dfs 2_3sl.txt
3_1:
python main.py start1.txt goal1.txt iddfs 3_1sl.txt
3_2:
python main.py start2.txt goal2.txt iddfs 3_2sl.txt
3_3:
python main.py start3.txt goal3.txt iddfs 3_3sl.txt
4_1:
python main.py start1.txt goal1.txt astar 4_1sl.txt
4_2:
python main.py start2.txt goal2.txt astar 4_2sl.txt
4_3:
python main.py start3.txt goal3.txt astar 4_3sl.txt
<file_sep>/Assignment_2/MinimaxPlayer.cpp
/*
* MinimaxPlayer.cpp
*
* Created on: Apr 17, 2015
* Author: wong
*/
#include <iostream>
#include <assert.h>
#include <climits>
#include "MinimaxPlayer.h"
using std::vector;
MinimaxPlayer::MinimaxPlayer(char symb) :
Player(symb) {
}
MinimaxPlayer::~MinimaxPlayer() {
}
void MinimaxPlayer::get_move(OthelloBoard* b, int& col, int& row) {
// To be filled in by you
OthelloBoard temp_b = clone_OthelloBoard(*b);
Action act = alpha_beta_search(temp_b, this->get_symbol());
col = act.column;
row = act.row;
}
Action MinimaxPlayer::alpha_beta_search(OthelloBoard b, char symbol){
int value, result_value = INT_MAX, alpha = INT_MIN, beta = INT_MAX;
Action_List act_list = successors(b, symbol);
Action a, result;
for(int i = 0; i < act_list.size; i++){
OthelloBoard temp_b = clone_OthelloBoard(b);
a = act_list.act[i];
temp_b.play_move(a.column, a.row, symbol);
value = max_value(temp_b, alpha, beta, symbol);
if(value < result_value)
result = a;
result_value = value;
}
return result;
}
int MinimaxPlayer::max_value(OthelloBoard b, int& alpha, int& beta, char symbol){
symbol = change_symbol(symbol);
if((terminal_test(b, symbol)))
return unility(b);
int v = INT_MIN;
Action_List act_list = successors(b, symbol);
Action a;
for(int i = 0; i < act_list.size; i++){
OthelloBoard temp_b = clone_OthelloBoard(b);
a = act_list.act[i];
temp_b.play_move(a.column, a.row, symbol);
v = max(v, min_value(temp_b, alpha, beta, symbol));
if(v >= beta)
return v;
alpha = max(alpha, v);
}
return v;
}
int MinimaxPlayer::min_value(OthelloBoard b, int& alpha, int& beta, char symbol){
symbol = change_symbol(symbol);
if((terminal_test(b, symbol)))
return unility(b);
int v = INT_MAX;
Action_List act_list = successors(b, symbol);
Action a;
for(int i = 0; i < act_list.size; i++){
OthelloBoard temp_b = clone_OthelloBoard(b);
a = act_list.act[i];
temp_b.play_move(a.column, a.row, symbol);
v = min(v, max_value(temp_b, alpha, beta, symbol));
if(v <= alpha)
return v;
beta = min(beta, v);
}
return v;
}
bool MinimaxPlayer::terminal_test(OthelloBoard b, char symbol){
return !b.has_legal_moves_remaining(symbol);
}
int MinimaxPlayer::unility(OthelloBoard b){
return b.count_score(b.get_p1_symbol()) - b.count_score(b.get_p2_symbol());
}
Action_List MinimaxPlayer::successors(OthelloBoard b, char symbol){
Action_List act_list;
act_list.size = 0;
act_list.capacity = 5;
act_list.act = (Action*)malloc(5*sizeof(Action));
for (int c = 0; c < b.get_num_cols(); c++) {
for (int r = 0; r < b.get_num_rows(); r++) {
if(b.is_legal_move(c, r, symbol)){
act_list.act[act_list.size].column = c;
act_list.act[act_list.size].row = r;
act_list.size += 1;
if(act_list.size == act_list.capacity){
act_list.act = expand_capacity(act_list);
act_list.capacity = 2 * act_list.capacity;
}
}
}
}
return act_list;
}
int MinimaxPlayer::max(int num1, int num2){
if(num1 > num2)
return num1;
else
return num2;
}
int MinimaxPlayer::min(int num1, int num2){
if(num1 > num2)
return num2;
else
return num1;
}
Action* MinimaxPlayer::expand_capacity(Action_List act_list){
int c = act_list.capacity;
Action* act = (Action*)malloc(2*c*sizeof(Action));
for(int i = 0; i < act_list.size; i++)
act[i] = act_list.act[i];
return act;
}
char MinimaxPlayer::change_symbol(char symbol){
if(symbol == 'X')
return 'O';
else
return 'X';
}
OthelloBoard MinimaxPlayer::clone_OthelloBoard(OthelloBoard b){
OthelloBoard temp(b.get_num_cols(), b.get_num_rows(), b.get_p1_symbol(), b.get_p2_symbol());
for(int i = 0; i < b.get_num_cols(); i++)
for(int j = 0; j < b.get_num_rows(); j++)
temp.set_cell(i, j, b.get_cell(i, j));
return temp;
}
MinimaxPlayer* MinimaxPlayer::clone() {
MinimaxPlayer* result = new MinimaxPlayer(symbol);
return result;
}<file_sep>/README.md
# CS331_Assignment
Implement Assignments
<file_sep>/Assignment_1/main.py
from refer import *
from time import clock
def main():
initial_file = str(sys.argv[1])
goal_file = str(sys.argv[2])
mode = str(sys.argv[3])
output_file = str(sys.argv[4])
initial = read_source(initial_file)
goal = read_source(goal_file)
expand = []
if mode == 'bfs':
result = BFS(initial, goal, expand)
elif mode == 'dfs':
result = DFS(initial, goal, expand)
elif mode == 'iddfs':
result = IDDFS(initial, goal, expand)
elif mode == 'astar':
result = A_STAR(initial, goal, expand)
else:
print 'Error'
if len(path(result)) != 0:
print 'The path is:'
for state in path(result):
print 'left bank: ' + str(state[0]) + ' ' + 'right bank:' + str(state[1])
print 'The number of nodes on solution is ' + str(len(path(result)))
else:
print 'No solution found!'
print 'The number of nodes expanded is ' + str(len(expand))
out_solution(output_file, path(result), len(expand))
if __name__ == '__main__':
main()
<file_sep>/Assignment_3/makefile
all:
python main.py trainingSet.txt preprocessed_train.txt testSet.txt preprocessed_test.txt
clean:
rm -rf preprocessed_train.txt preprocessed_test.txt results.txt<file_sep>/Assignment_3/preprocess.py
import string
import re
import numpy as np
######
# Purpose: Create bag of words based on data in input file
# Return: A list including words from input file.
######
def get_vocabulary(input_file):
vocabulary = []
stop_words = read_list("stop_word_list.txt")
f = open(input_file, 'r')
data_set = f.readlines()
for sentence in data_set:
sentence = filtrate_sentence(sentence)
for word in sentence:
if filtrate_word(word, vocabulary, stop_words):
vocabulary.append(word)
f.close()
return vocabulary
######
# Purpose: Build feature vectors and output them to preprcoess file
# Return: None
######
def preprocess(vocabulary, input_file, out_file):
class_label = []
feature_vectors = make_feature_vector(input_file, vocabulary, class_label)
return generate_out_file(out_file, feature_vectors, vocabulary, class_label)
######
# Purpose: Filtrate some words which are repeated or stop words.
# Return: Boolean value
######
def filtrate_word(word, vocabulary, stop_words):
if(word in vocabulary):
return False
if(word in stop_words):
return False
else:
return True
######
# Purpose: Filtrate a sentence such as removing punctuation and digit
# Return: A list including all words in this sentence
######
def filtrate_sentence(sentence):
sentence = sentence.lower()
sentence = re.sub(r"what's", "what is ", sentence)
sentence = re.sub(r"\'s", " ", sentence)
sentence = re.sub(r"\'ve", " have ", sentence)
sentence = re.sub(r"can't", "cannot ", sentence)
sentence = re.sub(r"couldn't", " could not ", sentence)
sentence = re.sub(r"n't", " not ", sentence)
sentence = re.sub(r"i'm", "i am ", sentence)
sentence = re.sub(r"\'re", " are ", sentence)
sentence = re.sub(r"\'d", " would ", sentence)
sentence = re.sub(r"\'ll", " will ", sentence)
sentence = re.sub(r",", " ", sentence)
sentence = "".join(l for l in sentence if l not in string.punctuation)
sentence = "".join([l for l in sentence if not l.isdigit()])
return sentence.split()
######
# Purpose: Build feature vectors
# Return: A matrix including all feature vectors and class labels
######
def make_feature_vector(input_file, vocabulary, class_label):
f = open(input_file, 'r')
data_set = f.readlines()
matrix = []
for sentence in data_set:
temp = sentence.split()
class_label.append(int(temp[-1]))
sentence = filtrate_sentence(sentence)
row = []
for j in range(len(vocabulary)):
word = vocabulary[j]
if(word in sentence):
row.append(1)
else:
row.append(0)
matrix.append(row)
f.close()
return matrix
######
# Purpose: Output feature vectors to a preprocess file
# Return: return a 2D array including preprocessed data
######
def generate_out_file(out_file, matrix, vocabulary, class_label):
preprocessed_data = []
v_temp = vocabulary[:]
v_temp.append('classlabel')
for index in range(len(matrix)):
matrix[index].append(class_label[index])
preprocessed_data.append(matrix[index])
with open (out_file, 'w') as fo:
fo.write(','.join(str(i) for i in v_temp))
fo.write('\n')
for index in range(len(preprocessed_data)):
fo.write(','.join(str(i) for i in preprocessed_data[index]))
fo.write('\n')
return preprocessed_data
######
# Purpose: Read other files including words we want to filtrate
# Return: A list including words we want to filtrate
######
def read_list(input_file):
words = []
f = open(input_file, 'r')
word_set = f.readlines()
for word in word_set:
word = word[:-1]
words.append(word)
f.close
return words<file_sep>/Assignment_3/README.txt
Team Member: <NAME>, <NAME>
1. Don't remove "stop_word_list.txt" from this folder
2. I have provided makefile and you can just type "make all" to run our program
3. The program will generate "preprocessed_train.txt", "preprocessed_test.txt", and "results.txt" automatically | 52ac901c6b608090f4f2032c257d4ccefb03a1da | [
"Markdown",
"Makefile",
"Python",
"Text",
"C++"
] | 10 | Python | HouPoc/CS331_Assignment | 0ac38d304c902cecb5c74e67c53c3ac155f06dab | 252159c16852538e8d0bbcd7794b9352fcbb1089 |
refs/heads/master | <repo_name>adinda17/cuacaku<file_sep>/app/src/main/java/com/androdocs/weatherapp/Main2Activity.java
package com.androdocs.weatherapp;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.TextView;
public class Main2Activity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main2);
// Find the View that shows the numbers category
TextView cerah = (TextView) findViewById(R.id.cerah);
// Set a click listener on that View
cerah.setOnClickListener(new View.OnClickListener() {
// The code in this method will be executed when the numbers category is clicked on.
@Override
public void onClick(View view) {
// Create a new intent to open the {@link NumbersActivity}
Intent cerahIntent = new Intent(Main2Activity.this,CerahActivity.class);
// Start the new activity
startActivity(cerahIntent);
}
});
// Find the View that shows the family category
TextView hujan = (TextView) findViewById(R.id.hujan);
// Set a click listener on that View
hujan.setOnClickListener(new View.OnClickListener() {
// The code in this method will be executed when the family category is clicked on.
@Override
public void onClick(View view) {
// Create a new intent to open the {@link FamilyActivity}
Intent hujanIntent = new Intent(Main2Activity.this, HujanActivity.class);
// Start the new activity
startActivity(hujanIntent);
}
});
// Find the View that shows the colors category
TextView panas = (TextView) findViewById(R.id.panas);
// Set a click listener on that View
panas.setOnClickListener(new View.OnClickListener() {
// The code in this method will be executed when the colors category is clicked on.
@Override
public void onClick(View view) {
// Create a new intent to open the {@link ColorsActivity}
Intent panasIntent = new Intent(Main2Activity.this,PanasActivity.class);
// Start the new activity
startActivity(panasIntent);
}
});
}
}
<file_sep>/app/src/main/java/com/androdocs/weatherapp/PanasActivity.java
package com.androdocs.weatherapp;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.CheckBox;
import android.widget.EditText;
import android.widget.ImageButton;
import android.widget.TextView;
import android.widget.Toast;
import java.text.NumberFormat;
public class PanasActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_panas);
//image button intent
ImageButton laguPanas = (ImageButton) findViewById(R.id.lagu_panas);
laguPanas.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Toast.makeText(PanasActivity.this, "clicked", Toast.LENGTH_LONG).show();
Intent Lpanas = new Intent(PanasActivity.this, LpanasActivity.class);
startActivity(Lpanas);
}
});
}
private Button btnBack;
int quantity = 0;
public void increment(View view){
if (quantity == 100){
//show an error message as a toast
Toast.makeText(this,"ga kelebihan nih?", Toast.LENGTH_SHORT ).show();
//exit this method early because nothing to do
return;
}
quantity = quantity + 1;
displayQuantity(quantity);
}
public void decrement(View view){
if (quantity == 1){
//show an error message as a toast
Toast.makeText(this,"masa gaada sih", Toast.LENGTH_SHORT ).show();
//exit this method early because nothing to do
return;
}
quantity = quantity - 1;
displayQuantity(quantity);
}
private void displayQuantity(int numberOfPeople) {
TextView quantityTextView = (TextView) findViewById(
R.id.quantity_text_view);
quantityTextView.setText("" + numberOfPeople);
}
public void submitKirim(View view) {
// Get user's name
EditText nameField = (EditText) findViewById(R.id.name_field);
// Editable nameEditable = nameField.getText();
String name = nameField.getText().toString();
// Get user's cerita
EditText ceritaField = (EditText) findViewById(R.id.cerita_field);
// Editable ceritaEditable = ceritaField.getText();
String cerita = ceritaField.getText().toString();
// yang bikin happy
CheckBox familyCheckBox = (CheckBox) findViewById(R.id.family_cb);
boolean hasFamily = familyCheckBox.isChecked();
CheckBox pacarCheckBox = (CheckBox) findViewById(R.id.pcr_cb);
boolean hasPacar = pacarCheckBox.isChecked();
CheckBox strangerCheckBox = (CheckBox) findViewById(R.id.st_cb);
boolean hasStranger = strangerCheckBox.isChecked();
CheckBox temanCheckBox = (CheckBox) findViewById(R.id.tmn_cb);
boolean hasTeman = temanCheckBox.isChecked();
// Calculate the price
int price = calculatePrice(hasFamily, hasPacar , hasStranger , hasTeman);
// Display the order summary on the screen
String priceMessage = createOrderSummary(name, cerita, price, hasFamily, hasPacar , hasStranger , hasTeman);
// Use an intent to launch an email app.
// Send the order summary in the email body.
Intent intent = new Intent(Intent.ACTION_SENDTO);
intent.setData(Uri.parse("mailto:")); // only email apps should handle this
intent.putExtra(Intent.EXTRA_SUBJECT, "Catatan kemarahan mu " + name);
intent.putExtra(Intent.EXTRA_TEXT,priceMessage );
if (intent.resolveActivity(getPackageManager()) != null) {
startActivity(intent);
finish();
}
}
/**
* Calculates the price of the order.
*
* @param addFamily is whether or not we should include pempek in the price
* @param addPacar is whether or not we should include gandus in the price
* @param addStranger is whether or not we should include bongkol in the price
* @param addTeman is whether or not we should include risol in the price
* @return total poin bahagia mu price
*/
private int calculatePrice(boolean addFamily, boolean addPacar , boolean addStranger , boolean addTeman) {
// First calculate the price of one appetizer
int basePrice = 5;
// If the family made user marah, add 10
if (addFamily) {
basePrice = basePrice + 10;
}
// If the pacar made user happy, add 10
if (addPacar) {
basePrice = basePrice + 10;
}
// If the stranger made user happy, add 10000
if (addStranger) {
basePrice = basePrice + 10;
}
// If the teman made user happy, add 10000
if (addTeman) {
basePrice = basePrice + 10;
}
// Calculate the total poin bahagia mu price by multiplying by the quantity
return quantity * basePrice;
}
/**
* Create summary of the order.
*
* @param name on the order
* @param price of the order
* @param addFamily is whether or not to add pempek
* @param addPacar is whether or not to add kue gandus
* @param addStranger is whether or not to add bongkol
* @param addTeman is whether or not to add risol
* @return text summary
*/
private String createOrderSummary(String name, String cerita, int price,boolean addFamily, boolean addPacar , boolean addStranger , boolean addTeman) {
String priceMessage = getString(R.string.order_summary_name, name);
priceMessage += "\n" + getString(R.string.order_summary_family3, addFamily);
priceMessage += "\n" + getString(R.string.order_summary_pacar3, addPacar);
priceMessage += "\n" + getString(R.string.order_summary_stranger3, addStranger);
priceMessage += "\n" + getString(R.string.order_summary_teman3, addTeman);
priceMessage += "\n" + getString(R.string.order_summary_quantity3, quantity);
priceMessage += "\n" + getString(R.string.order_summary_cerita, cerita);
priceMessage += "\n" + getString(R.string.order_summary_price3,
NumberFormat.getCurrencyInstance().format(price));
priceMessage += "\n" + getString(R.string.thank_you);
return priceMessage;
}
}
| 330968120cf0911b3065cedb0f7ea8dc9ecd1768 | [
"Java"
] | 2 | Java | adinda17/cuacaku | ac6cc407b951c5f22e89e55c51ffe66582b44509 | e2cd5f1c538b622f48167454ad811f60776d7ec4 |
refs/heads/master | <file_sep>import React from "react";
import Body from "./Body";
import { render, cleanup, act, waitFor } from '@testing-library/react';
import { createMemoryHistory } from 'history';
import { Router } from "react-router-dom";
import { CookiesProvider, Cookies } from 'react-cookie';
import { AMAZON_LOGIN_COOKIE } from "Constants";
let cookies;
it("renders logged-in view correctly when user is on home page and is authenticated", () => {
cookies.set(AMAZON_LOGIN_COOKIE, 'access token is present', { path: '/' });
const { asFragment } = renderWithRouter(<Body />, cookies, { route: "/" });
expect(asFragment()).toMatchSnapshot();
});
it("renders login-failed view correctly when user is on home page and is not authenticated", () => {
const { asFragment } = renderWithRouter(<Body />, cookies, { route: "/" });
expect(asFragment()).toMatchSnapshot();
});
it("renders login-failed view when user navigates to /access_denied", () => {
const { asFragment } = renderWithRouter(<Body />, cookies, { route: "/access_denied" });
expect(asFragment()).toMatchSnapshot();
});
it("renders logged-in view when user navigates to /authresponse and user logged in successfully", async () => {
const lwaResponseHash = "#access_token=some_access_token&expires_in=30";
const { asFragment } = renderWithRouter(<Body />, cookies, { route: "/authresponse" + lwaResponseHash });
await waitFor(() => {
expect(asFragment()).toMatchSnapshot();
});
});
it("renders login-failed view when user navigates to /authresponse and user failed to login", async () => {
const lwaResponseHash = "#";
const { asFragment } = renderWithRouter(<Body />, cookies, { route: "/authresponse#" + lwaResponseHash });
await waitFor(() => {
expect(asFragment()).toMatchSnapshot();
});
});
it("renders 404 for unknown paths", () => {
const { asFragment } = renderWithRouter(
<Body />, cookies,
{ route: "/a/random/path" }
);
expect(asFragment()).toMatchSnapshot();
});
function renderWithRouter(
component,
cookies,
{
route = '/',
history = createMemoryHistory({ initialEntries: [route] }),
} = {}
) {
return {
...render(
<Router history={history}>
<CookiesProvider cookies={cookies}>
{component}
</CookiesProvider>
</Router >),
history
}
}
beforeEach(() => { cookies = new Cookies() });
afterEach(() => {
cleanup();
act(() => cookies.remove(AMAZON_LOGIN_COOKIE, { path: '/' }));
});
<file_sep>const paths = Object.freeze({
EVENTS: "/v20160207/events",
DIRECTIVES: "/v20160207/directives"
});
const urls = Object.freeze({
NA: "https://avs-alexa-na.amazon.com",
EU: "https://avs-alexa-eu.amazon.com",
FE: "https://avs-alexa-fe.amazon.com"
});
module.exports = {
urls: urls,
paths: paths
};
<file_sep>import React from "react";
import "./PageNotFoundScreen.css";
export default function PageNotFoundScreen() {
return (
<div id="pagenotfound-screen">
<h1> 404 </h1>
<h1> This is just a placeholder. We might want to have a link here to let the user try logging in again or go home. </h1>
</div>
);
}
<file_sep>import React from "react";
import ReactLoading from "react-loading";
import "./LoadingAnimation.css";
export default function LoadingAnimation(props) {
return (
<div id="loading-animation">
<ReactLoading type={props.type} color={props.color} />
</div>
);
}
<file_sep>import React from "react";
import { shallow, mount } from "enzyme";
import clone from "clone";
import { List, fromJS } from "immutable";
import { ChatFeed, Message } from "monkas-chat";
import ChatWindow from "./ChatWindow";
import { cannedErrorResponses, customErrorCodes } from "CannedErrorResponses";
import { Cookies } from 'react-cookie';
import { AMAZON_LOGIN_COOKIE } from "Constants";
import {
mockSendTextMessageEventFunction,
mockAlexaSuccessResponses
} from "AVSGateway";
import { chatters, chatterIds } from "Chatters";
const CHATFEED_CONTAINER_HEIGHT = 234;
const CHATFEED_CONTAINER_HEIGHT_DEFAULT = 0;
const setHeightElement = function (height) {
Element.prototype.getBoundingClientRect = jest.fn(() => {
return {
height: height
};
});
};
jest.mock("AVSGateway");
let chatWindow;
let chatWindowInstance;
let originalState;
let preventDefaultSpy;
let cookies;
beforeEach(() => {
mockSendTextMessageEventFunction.mockClear();
cookies = new Cookies();
preventDefaultSpy = jest.fn();
chatWindow = shallow(<ChatWindow.WrappedComponent cookies={cookies} />);
chatWindowInstance = chatWindow.instance();
originalState = clone(chatWindowInstance.state);
});
it("renders correctly without crashing", () => {
expect(chatWindow).toMatchSnapshot();
});
it("passes height of container to ChatFeed component", () => {
// Set `height` element.
setHeightElement(CHATFEED_CONTAINER_HEIGHT);
const wrapper = mount(<ChatWindow.WrappedComponent cookies={cookies} />, {});
const chatFeed = wrapper.find(ChatFeed);
expect(chatFeed.length).toBe(1);
expect(chatFeed.prop("maxHeight")).toBe(CHATFEED_CONTAINER_HEIGHT);
wrapper.unmount();
//Reset `height` element
setHeightElement(CHATFEED_CONTAINER_HEIGHT_DEFAULT);
});
it("persists a given message in state when pushMessage is called", () => {
const numberOfMessagesAlreadyInState = originalState.messages.length;
expect(originalState.messages.length).toBe(numberOfMessagesAlreadyInState);
const userId = chatterIds.USER;
const user = chatters.get(userId);
const message = "test message";
const expectedMessage = new Message({
id: userId,
message,
senderName: user.name
});
chatWindowInstance.pushMessage(userId, message);
const finalState = chatWindowInstance.state;
const finalMessages = finalState.messages;
expect(finalMessages.length).toBe(numberOfMessagesAlreadyInState + 1);
expect(finalMessages[numberOfMessagesAlreadyInState]).toEqual(
expectedMessage
);
});
it("handles gracefully when pushMessage is called with an unknown user", () => {
const invalidUserid = 1000; // valid values are just 0 and 1
chatWindowInstance.pushMessage(invalidUserid, "test message");
const finalState = chatWindowInstance.state;
// Nothing about the state should have changed.
expect(finalState).toEqual(originalState);
});
it("handles gracefully when pushMessage is called with an empty or null message", () => {
const userid = 1;
const emptyMessage = "";
chatWindowInstance.pushMessage(userid, emptyMessage);
let finalState = chatWindowInstance.state;
// Nothing about the state should have changed.
expect(finalState).toEqual(originalState);
let nullMessage;
chatWindowInstance.pushMessage(userid, nullMessage);
finalState = chatWindowInstance.state;
// Nothing about the state should have changed.
expect(finalState).toEqual(originalState);
});
test("that when a user submits the form, we do not call AVSGateway if the user is not authenticated and redirect the user away from the protected chat screen.", () => {
const history = { push: jest.fn() };
cookies = new Cookies();
const mockCookiesRemove = cookies.remove = jest.fn();
const chatWindow = mount(<ChatWindow.WrappedComponent history={history} cookies={cookies} />);
const chatWindowInstance = chatWindow.instance();
const userRequestToAlexa = "a dummy user request";
chatWindowInstance.setState({
userRequestToAlexa: userRequestToAlexa,
curr_user: chatters.get(chatterIds.YOU)
});
chatWindow
.find("form")
.simulate("submit", { preventDefault: preventDefaultSpy });
expect(mockSendTextMessageEventFunction).not.toHaveBeenCalled();
expect(mockCookiesRemove).toHaveBeenCalledTimes(1);
expect(mockCookiesRemove.mock.calls[0][0]).toBe(AMAZON_LOGIN_COOKIE);
expect(mockCookiesRemove.mock.calls[0][1]).toBe(undefined);
expect(mockCookiesRemove.mock.calls[0][2]).toStrictEqual({
maxAge: 0,
secure: false,
path: "/"
});
expect(history.push).toHaveBeenCalledTimes(1);
expect(history.push).toHaveBeenCalledWith('/access_denied');
});
it("handles the user's form submission with request to Alexa and populates the state with the user request and Alexa's response", done => {
const alexaId = chatterIds.ALEXA;
const alexa = chatters.get(alexaId);
const expectedAlexaResponses = mockAlexaSuccessResponses.map(
alexaResponse =>
new Message({
id: alexaId,
message: alexaResponse,
senderName: alexa.name,
avatar: alexa.avatar
})
);
testOnUserRequestToAlexaSubmitHandling(expectedAlexaResponses, done);
});
it("handles the case when AVS throws an error in response to a user request. We should populate the state with the user request and a canned response.", done => {
// mock the AVSGateway to throw an error.
mockSendTextMessageEventFunction.mockImplementation(() =>
Promise.reject(
new Error(cannedErrorResponses.get(customErrorCodes.UNKNOWN_ERROR))
)
);
const alexaId = chatterIds.ALEXA;
const alexa = chatters.get(alexaId);
const expectedAlexaResponse = List.of(
new Message({
id: alexaId,
message: cannedErrorResponses.get(customErrorCodes.UNKNOWN_ERROR),
senderName: alexa.name,
avatar: alexa.avatar
})
);
testOnUserRequestToAlexaSubmitHandling(expectedAlexaResponse, done);
});
it("handles gracefully when the input form is submitted with a null or empty request string", () => {
let nullUserRequestToAlexa;
chatWindowInstance.setState({
userRequestToAlexa: nullUserRequestToAlexa
});
chatWindow
.find("UserRequestToAlexaForm")
.simulate("submit", { preventDefault: preventDefaultSpy });
const finalState = chatWindowInstance.state;
// Verify that preventDefault() is being called.
expect(preventDefaultSpy).toHaveBeenCalledTimes(1);
// Nothing about the state should have changed.
expect(finalState).toEqual(originalState);
});
it("handles the user's input as they are typing their request (before submission)", () => {
chatWindowInstance.setState({
userRequestToAlexa: "some initial value",
curr_user: 1
});
const expectedUserRequestToAlexa = "a dummy user request";
const event = {
target: { value: expectedUserRequestToAlexa },
preventDefault: preventDefaultSpy
};
chatWindow.find("UserRequestToAlexaForm").simulate("change", event);
const finalState = chatWindowInstance.state;
const finalUserRequestToAlexa = finalState.userRequestToAlexa;
// Verify that preventDefault() is being called.
expect(preventDefaultSpy).toHaveBeenCalledTimes(1);
expect(finalUserRequestToAlexa).toEqual(expectedUserRequestToAlexa);
});
/**
* Helper method to test the interaction with Alexa. Will simulate a user request and
* verify that the expected messages are populated into the state.
* @param {Message} expectedAlexaResponses The expected responses from Alexa to be verified
* against.
*/
const testOnUserRequestToAlexaSubmitHandling = (
expectedAlexaResponses,
done
) => {
const access_token = "a dummy access token";
cookies.set(AMAZON_LOGIN_COOKIE, access_token, { path: '/' });
const chatWindow = mount(<ChatWindow.WrappedComponent cookies={cookies} />);
const chatWindowInstance = chatWindow.instance();
const originalState = clone(chatWindowInstance.state);
const numberOfMessagesAlreadyInState = originalState.messages.length;
const userRequestToAlexa = "a dummy user request";
const userId = chatterIds.USER;
const user = chatters.get(userId);
chatWindowInstance.setState({
userRequestToAlexa: userRequestToAlexa,
curr_user: userId
});
chatWindow
.find("form")
.simulate("submit", { preventDefault: preventDefaultSpy });
expect(preventDefaultSpy).toHaveBeenCalledTimes(1);
expect(mockSendTextMessageEventFunction).toHaveBeenCalledTimes(1);
expect(mockSendTextMessageEventFunction).toHaveBeenCalledWith(
userRequestToAlexa,
access_token
);
const expectedUserMessage = new Message({
id: userId,
message: userRequestToAlexa,
senderName: user.name
});
setImmediate(() => {
const finalState = chatWindowInstance.state;
const finalMessages = finalState.messages;
// We should have added userMessage and all of Alexa's responses.
const numberOfExpectedAlexaResponses = expectedAlexaResponses.size;
const numberOfNewMessagesToGoIntoState = 1 + numberOfExpectedAlexaResponses;
expect(finalMessages.length).toBe(
numberOfMessagesAlreadyInState + numberOfNewMessagesToGoIntoState
);
expect(finalMessages[numberOfMessagesAlreadyInState]).toEqual(
expectedUserMessage
);
expect(
fromJS(finalMessages.slice(numberOfMessagesAlreadyInState + 1))
).toEqual(expectedAlexaResponses);
expect(chatWindowInstance.state.userRequestToAlexa).toEqual("");
done();
});
};
afterEach(() => {
chatWindow.unmount();
});
<file_sep>import React from "react";
import { shallow } from "enzyme";
import RightPanel from "./RightPanel";
it("renders RightPanel without crashing", () => {
const wrapper = shallow(<RightPanel />);
expect(wrapper).toMatchSnapshot();
wrapper.unmount();
});
<file_sep>import { extractAlexaTextResponses as parser } from "SpeakDirectiveParser";
import IllegalArgumentError from "errors/IllegalArgumentError";
import testData from "./test-data/multipart-response-test-data";
it("throws an error if an empty string is passed as input", () => {
const input = "";
testIllegalArgumentHandling(input);
});
it("throws an error if undefined is passed as input", () => {
let input;
testIllegalArgumentHandling(input);
});
it("handles the invalid case where the multi-part message has only one part.", () => {
testIllegalArgumentHandling(testData.multi_part_with_just_one_part.rawData);
});
it("handles the invalid case where the multi-part message has no body in the first part.", () => {
testIllegalArgumentHandling(testData.multi_part_with_no_body.rawData);
});
it("handles the invalid case where the directive in the AVS response is not a well formatted json.", () => {
testIllegalArgumentHandling(testData.directive_not_valid_json.rawData);
});
it("handles the invalid case where the directive in the AVS response is well formatted json but doesn't contain the 'directive' key.", () => {
testIllegalArgumentHandling(
testData.directive_key_doesnt_exist_in_avs_directive.rawData
);
});
it("handles the invalid case where the directive in the AVS response is well formatted json but doesn't contain the 'payload' key.", () => {
testIllegalArgumentHandling(
testData.payload_key_doesnt_exist_in_avs_directive.rawData
);
});
it("handles the invalid case where the directive in the AVS response is well formatted json but doesn't contain the 'caption' key.", () => {
testIllegalArgumentHandling(
testData.caption_key_doesnt_exist_in_avs_directive.rawData
);
});
it("handles the invalid case where the directive in the AVS response is well formatted json but doesn't contain the 'header' key.", () => {
testIllegalArgumentHandling(
testData.header_key_doesnt_exist_in_avs_directive.rawData
);
});
it("handles the invalid case where the directive in the AVS response is well formatted json but doesn't contain the 'name' key.", () => {
testIllegalArgumentHandling(
testData.name_key_doesnt_exist_in_avs_directive.rawData
);
});
it("handles the invalid case where the caption is in an invalid format.", () => {
testIllegalArgumentHandling(
testData.caption_invalid_format.rawData
);
});
it("does not include non-text parts in Alexa's response", () => {
const testObject = testData.multi_part_with_different_content_types;
const alexaTextResponses = parser(testObject.rawData);
expect(alexaTextResponses).toEqual(testObject.alexaResponses);
});
it("handles the case where Alexa's text response is broken into more than one part", () => {
const testObject = testData.multi_part_with_just_three_parts;
const alexaTextResponses = parser(testObject.rawData);
expect(alexaTextResponses).toEqual(testObject.alexaResponses);
});
it("handles the case where there are non-Speak directives.", () => {
const testObject = testData.non_speak_directives;
const alexaTextResponses = parser(testObject.rawData);
expect(alexaTextResponses).toEqual(testObject.alexaResponses);
});
it("extracts Alexa's response in the happy case", () => {
const testObject = testData.happy_case;
const alexaTextResponses = parser(testObject.rawData);
expect(alexaTextResponses).toEqual(testObject.alexaResponses);
});
it("handles gracefully when Alexa doesn't say anything in her webvtt response. For ex, when user says 'stop'", () => {
const testObject = testData.happy_case_when_alexa_responds_with_empty_webvtt_message;
const alexaTextResponses = parser(testObject.rawData);
expect(alexaTextResponses.size).toBe(0);
});
it("handles gracefully when Alexa returns a caption type that is not supported", () => {
const testObject = testData.happy_case_unknown_caption_type;
const alexaTextResponses = parser(testObject.rawData);
expect(alexaTextResponses.size).toBe(0);
});
/**
* Verifies that the given input results in an {IllegalArgumentError} when parsed.
* @param {String} input The multi part response string that needs to be parsed.
*/
const testIllegalArgumentHandling = input => {
expect(() => {
parser(input);
}).toThrow(IllegalArgumentError);
};
<file_sep>import React from "react";
import { Map } from "immutable";
import AlexaRingIcon from "Icons/AlexaRingIcon";
const chatterIds = Object.freeze({
USER: 0,
ALEXA: 1
});
const chatters = Map([
[chatterIds.USER, { name: "You", avatar: undefined }],
[
chatterIds.ALEXA,
{
avatar: <AlexaRingIcon />
}
]
]);
export { chatterIds, chatters };
<file_sep>import React from "react";
import "./LoginFailedScreen.css";
export default function LoginFailedScreen() {
return (
<div id="loginfailed-screen">
<h1> Login Failed </h1>
<h1> This is just a placeholder. We might want to redirect the user to the login page or at least have a link here to let the user try again or go home. </h1>
</div>
);
}
<file_sep>// Happy case EventProcessed directive
{
const happy_case = String.raw`--------abcde123
Content-Type: application/json; charset=UTF-8
{"directive":{"header":{"namespace":"Alexa","name":"EventProcessed","messageId":"{{STRING}}","eventCorrelationToken":"{{STRING}}"},"payload":{}}}
`;
exports.happy_case = happy_case;
}
<file_sep>export default {
bubbleStyles: {
text: {
fontSize: 20
},
chatbubble: {
background: "#00ACE0"
},
userBubble: {
background: "#646A72"
}
}
};
<file_sep>import IllegalArgumentError from "errors/IllegalArgumentError";
import { hasIn, getIn, fromJS } from "immutable";
import util from "util";
import httpMessageParser from "http-message-parser";
import webvtt from "node-webvtt";
const TEXT_PART_CONTENT_TYPE = `application/json; charset=UTF-8`;
/**
* This method parses the multi-part AVS responses and extracts the
* strings representing Alexa's responses from 'Speak' directives.
* It strips away other information like binary audio data, ssml
* tags etc in the 'Speak' directives.
*
* @param {String} alexaRawResponse The multi-part response from AVS. This
* should not be empty or undefined.
*
* @returns a List of text responses from Alexa. All the responses will be
* valid strings but a response can be empty if Alexa chooses to say nothing.
*
* @throws IllegalArgumentError if Alexa's response couldn't be parsed out
* of the input. The failure could be because the input is not well formatted
* or doesn't contain the right fields to fetch Alexa's response.
*/
export function extractAlexaTextResponses(alexaRawResponse) {
if (!alexaRawResponse) {
throw new IllegalArgumentError(
"The response to be parsed cannot be empty. Input: " + alexaRawResponse
);
}
const parsedResponse = httpMessageParser(alexaRawResponse);
// TODO: !parsedResponse condition isn't tested because mocking httpMessageParser responses turned
// out to be more involved than expected. While it is safe for now because the library appears to
// always return a response that contains a body, it needs to be tested.
if (!parsedResponse || !parsedResponse.multipart) {
throw new IllegalArgumentError(
"Given raw response is not a valid multi-part message. Input: " +
alexaRawResponse
);
}
// TODO: part.headers and part.body being undefined or null isn't tested because
// mocking httpMessageParser responses turned out to be more involved than expected.
// While it is safe for now because the library appears to always return a response
// that contains a body, it needs to be tested.
const textParts = parsedResponse.multipart
.filter(
part =>
TEXT_PART_CONTENT_TYPE === getIn(part, ["headers", "Content-Type"]) &&
part.body
)
.map(part => part.body);
const alexaResponses = [];
for (let part of textParts) {
let avsDirective;
try {
avsDirective = JSON.parse(part);
} catch (error) {
throw new IllegalArgumentError(
`Given directive couldn't be parsed to a JSON object. Input: " ${part.toString()}
StackTrace:
${util.inspect(error, { showHidden: true, depth: null })}`
);
}
_validateDirective(avsDirective);
// Directives that are not Speak directives (for ex, ExpectSpeech** directive) will be skipped for now. Tracking
// item to handle ExpectSpeech directives - https://github.com/s-maheshbabu/silent-alexa/issues/62
// ** https://developer.amazon.com/docs/alexa-voice-service/speechrecognizer.html#expectspeech
if (!_isSpeakDirective(avsDirective)) {
console.log(
"A non-Speak directive was encountered. Skipping the directive. " +
util.inspect(avsDirective, { showHidden: true, depth: null })
);
continue;
}
alexaResponses.push(..._extractCaption(avsDirective));
}
return fromJS(alexaResponses);
}
/**
* This method parses the captions in the Speak directives. This can handle
* both WEBVTT type captions and plain string captions. Any other types of
* captions will be ignored.
*
* @param avsDirective The avsDirective containing the captions.
*
* @returns a List of text responses from Alexa. All the responses will be
* valid strings but a response can be empty if Alexa chooses to say nothing.
*
* @throws IllegalArgumentError if Alexa's response couldn't be parsed out
* of the input. The failure could be because the input is not well formatted
* or doesn't contain the right fields to fetch Alexa's captions.
*/
function _extractCaption(avsDirective) {
if (hasIn(avsDirective, ["directive", "payload", "caption", "type"])) {
if (avsDirective.directive.payload.caption.type !== "WEBVTT") {
console.warn("An unexpceted captions type. Input: " +
`${util.inspect(avsDirective, { showHidden: true, depth: null })}`);
return [];
}
let captions;
try {
captions = webvtt.parse(avsDirective.directive.payload.caption.content);
} catch (error) {
console.error(`${util.inspect(error, { showHidden: true, depth: null })}`)
throw new IllegalArgumentError(
"Given captions are not a valid WEBVTT captions. Input: " +
`${util.inspect(captions, { showHidden: true, depth: null })}`
);
}
const alexaResponses = [];
captions.cues.map(cue =>
alexaResponses.push(cue.text)
)
return alexaResponses;
} else if (hasIn(avsDirective, ["directive", "payload", "caption"])) {
return [avsDirective.directive.payload.caption];
}
throw new IllegalArgumentError(
"Given Speak directive doesn't contain the expected path directive.payload.caption.type. Input: " +
`${util.inspect(avsDirective, { showHidden: true, depth: null })}`
);
}
function _validateDirective(avsDirective) {
if (!hasIn(avsDirective, ["directive", "header", "name"]))
throw new IllegalArgumentError(
"Given directive doesn't declare a type at directive.header.name. Input: " +
`${util.inspect(avsDirective, { showHidden: true, depth: null })}`
);
}
function _isSpeakDirective(avsDirective) {
return avsDirective.directive.header.name === "Speak";
}
<file_sep>import React from "react";
import "./RightPanel.css";
export default function RightPanel() {
return (
<div id="rightpanel">
<div className="panel-body">Right side content</div>
</div>
);
}
<file_sep>export const DEFAULT_PLACEHOLDER_FOR_USER_REQUEST_STRING =
"Type your request for Alexa..";
export const AMAZON_LOGIN_COOKIE = "amazon_Login_accessToken";
export const PRODUCT_ID = "Silent_Alexa";
export const CLIENT_ID = "amzn1.application-oa2-client.b7bd09596de34d37a2403a712f137e22";
<file_sep>import React from "react";
import IllegalArgumentError from "errors/IllegalArgumentError";
import Cookies from "js-cookie";
import util from "util";
export const AuthContext = React.createContext();
export const AMAZON_LOGIN_COOKIE = "amazon_Login_accessToken";
// TODO This class is using the presence of AMAZON_LOGIN_COOKIE to mean that the user
// is authenticated. The assumption is that Cookies.get won't return expired cookies.
// Is it true? If not, we should check for expiration of cookies.
export default ({ children }) => {
const defaultContext = {
setLWAResponse: lwaResponse => {
_persist(lwaResponse);
},
isAuthenticated: () => Cookies.get(AMAZON_LOGIN_COOKIE) !== undefined,
getAccessToken: () => {
return Cookies.get(AMAZON_LOGIN_COOKIE);
},
clear: () => {
Cookies.remove(AMAZON_LOGIN_COOKIE);
}
};
return (
<AuthContext.Provider value={defaultContext}>
{children}
</AuthContext.Provider>
);
};
const _persist = lwaResponse => {
const numberOfSecondsInADay = 86400;
if (lwaResponse && lwaResponse.access_token && lwaResponse.expires_in) {
Cookies.set(AMAZON_LOGIN_COOKIE, lwaResponse.access_token, {
expires: lwaResponse.expires_in / numberOfSecondsInADay, // TODO Why are cookies expiring in an hour?
secure: false // TODO: Change localhost to also use https and then change this to true.
});
} else {
const serializedLWAResponse = util.inspect(lwaResponse, {
showHidden: true,
depth: null
});
throw new IllegalArgumentError(
`LoginWithAmazon Authorization response is undefined or
doesnt have access_token/expires_in.
lwaResponse: ${serializedLWAResponse}`
);
}
};
<file_sep>const { Map } = require("immutable");
/**
* We need to show human readable error messages to users when Alexa responds
* with error codes or we encounter internal/unknown errors.
* This is a mapping from Alexa/Custom error codes to canned responses.
* @link https://developer.amazon.com/docs/alexa-voice-service/exceptions.html
*/
// TODO: Change the place holder error responses to real ones.
const cannedErrorResponses = Map({
INVALID_REQUEST_EXCEPTION: "a canned response for INVALID_REQUEST_EXCEPTION",
UNAUTHORIZED_REQUEST_EXCEPTION:
"a canned response for UNAUTHORIZED_REQUEST_EXCEPTION",
UNSUPPORTED_MEDIA_TYPE: "a canned response for UNSUPPORTED_MEDIA_TYPE",
THROTTLING_EXCEPTION: "a canned response for THROTTLING_EXCEPTION",
INTERNAL_SERVICE_EXCEPTION:
"a canned response for INTERNAL_SERVICE_EXCEPTION",
"N/A": "a canned response for N/A Exception",
UNKNOWN_ERROR:
"a canned response for UNKNOWN_ERROR. This shouldn't ever happen. Don't show it as an Alexa bubble."
});
const customErrorCodes = Object.freeze({
UNKNOWN_ERROR: "UNKNOWN_ERROR"
});
module.exports = {
cannedErrorResponses: cannedErrorResponses,
customErrorCodes: customErrorCodes
};
<file_sep>import React from "react";
import HeaderFlatButton from "HeaderFlatButton/HeaderFlatButton";
import { useCookies } from "react-cookie";
import { AMAZON_LOGIN_COOKIE, PRODUCT_ID } from "Constants";
// TODO: Logic for assigning 'deviceSerialNumber' needs to be revisited.
const DSN = "12345";
// LWA options to request implicit grant.
export const options = Object.freeze({
scope: ["alexa:all", "profile"],
scope_data: {
"alexa:all": {
productID: PRODUCT_ID,
productInstanceAttributes: { deviceSerialNumber: DSN }
}
},
popup: false
});
// Redirect path to handle the response from LoginWithAmazon
export const REDIRECT_PATH = "/authresponse";
export default function LoginControl() {
const [cookies, removeCookie] = useCookies([AMAZON_LOGIN_COOKIE]);
if (cookies[AMAZON_LOGIN_COOKIE] !== undefined) {
return <HeaderFlatButton label="Logout" onClick={() =>
clearAccessTokens(removeCookie)
} />;
} else {
return <HeaderFlatButton label="Login" onClick={handleLogin} />;
}
}
function clearAccessTokens(removeCookie) {
removeCookie(AMAZON_LOGIN_COOKIE, undefined, {
maxAge: 0,
secure: false, // TODO: Change localhost to also use https and then change this to true.
path: "/"
});
}
function handleLogin() {
// The authorization service will redirect the user-agent to the redirect path
// which will contain an authorization response as a URI fragment
window.amazon.Login.authorize(
options,
window.location.origin + REDIRECT_PATH
);
}
<file_sep>import React from "react";
import { render, cleanup, fireEvent, act } from '@testing-library/react';
import MuiThemeProvider from "material-ui/styles/MuiThemeProvider";
import { CookiesProvider, Cookies } from 'react-cookie';
import { AMAZON_LOGIN_COOKIE } from "Constants";
import {
options,
REDIRECT_PATH,
default as LoginControl
} from "./LoginControl";
const ORIGIN_PATH = "http://localhost:3000";
const mockLWAModule = jest.fn();
beforeEach(() => {
jest.resetAllMocks();
Object.defineProperty(window, "amazon", {
value: { Login: { authorize: mockLWAModule } },
writable: true
});
});
let cookies;
it("renders LoginControl with LoginButton component when the user is not authenticated", () => {
const { asFragment } = renderWithCookies(<LoginControl />, cookies);
expect(asFragment(<LoginControl />)).toMatchSnapshot();
});
it("renders LoginControl with LogoutButton component when the user is authenticated", () => {
cookies.set(AMAZON_LOGIN_COOKIE, 'access token is present', { path: '/' });
const { asFragment } = renderWithCookies(<LoginControl />, cookies);
expect(asFragment(<LoginControl />)).toMatchSnapshot();
});
it("verifies that login button calls the lwa authorizatin procedure", () => {
delete global.window.location;
global.window = Object.create(window);
Object.defineProperty(window, 'location', {
value: {
origin: ORIGIN_PATH
},
configurable: true
});
const { getByText } = renderWithCookies(<LoginControl />, cookies);
const loginButton = getByText("Login");
fireEvent.click(loginButton);
expect(mockLWAModule).toHaveBeenCalledTimes(1);
expect(mockLWAModule.mock.calls[0][0]).toBe(options);
expect(mockLWAModule.mock.calls[0][1]).toBe(ORIGIN_PATH + REDIRECT_PATH);
// Cleanup
delete global.window.location;
});
it("verifies that authentication info is cleared when logout button is clicked", () => {
cookies.set(AMAZON_LOGIN_COOKIE, 'access token is present', { path: '/' });
const { getByText } = renderWithCookies(<LoginControl />, cookies);
const logoutButton = getByText("Logout");
fireEvent.click(logoutButton);
expect(cookies.get(AMAZON_LOGIN_COOKIE)).toBe(undefined);
});
// TODO: Probably adapt custom render to make this easier across all test files.
// https://testing-library.com/docs/react-testing-library/setup#custom-render
const renderWithCookies = (component, cookies) => {
return {
...render(
<CookiesProvider cookies={cookies}>
<MuiThemeProvider>
{component}
</MuiThemeProvider>
</CookiesProvider>)
}
}
beforeEach(() => { cookies = new Cookies(); });
afterEach(() => {
cleanup();
act(() => cookies.remove(AMAZON_LOGIN_COOKIE, { path: '/' }));
});
<file_sep>import React from "react";
import FlatButton from "material-ui/FlatButton";
import "./HeaderFlatButton.css";
export default function HeaderFlatButton(props) {
return (
<FlatButton
className="header-flat-button"
label={props.label}
onClick={() => props.onClick()}
/>
);
}
<file_sep>import React, { useEffect, useState } from "react";
import { Redirect } from "react-router-dom";
import queryString from "query-string";
import util from "util";
import { hasIn } from "immutable";
import { useCookies } from "react-cookie";
import AVSGateway from "AVSGateway";
import LoadingAnimation from "LoadingAnimation/LoadingAnimation";
import { withRouter } from 'react-router-dom';
import { AMAZON_LOGIN_COOKIE } from "Constants";
const avs = new AVSGateway();
/*
This component handles the LWAResponse and takes one of the following actions -
1. Validate the LWAResponse and if it is not valid, route the user to an access_denied page.
2. Try to post AddOrUpdateReportEvent and if it fails, route the user to an access_denied page. While
posting the AddOrUpdateReportEvent, we render a component to indicate to the user that they need to wait.
3. In the happy case, save the LWAResponse access token to Cookies and route the user to the home screen.
*/
function LoginHandler(props) {
const [, setCookie] = useCookies([AMAZON_LOGIN_COOKIE]);
const [isAddOrUpdateReportEventPosted, setIsAddOrUpdateReportEventPosted] = useState(undefined);
useEffect(() => {
const asyncCallback = async () => {
if (!isLWAResponseValid(lwaResponse)) return;
setIsAddOrUpdateReportEventPosted(await avs.sendAddOrUpdateReportEvent(lwaResponse.access_token));
}
asyncCallback();
}, []);
let lwaResponse;
// Parses the query string to fetch the login with amazon response object
if (hasIn(props, ["location", "hash"])) {
lwaResponse = queryString.parse(props.location.hash, { parseNumbers: true });
}
// If the LWAResponse itself is invalid, nothing else matters. We can't talk to Alexa anymore.
// Even when LWAResponse is valid, if the attempt to post the AddOrUpdateReportEvent failed,
// we should talk to Alexa as per AVS documentation.
if (!isLWAResponseValid(lwaResponse) || isAddOrUpdateReportEventPosted === false) {
return <Redirect to="/access_denied" />;
}
// This is the state where we are still in the process of posting AddOrUpdateReportEvent.
if (isAddOrUpdateReportEventPosted === undefined) {
return <LoadingAnimation type="bars" color="red" />
}
// AddOrUpdateReportEvent was posted successfully.
setCookie(AMAZON_LOGIN_COOKIE, lwaResponse.access_token, {
maxAge: lwaResponse.expires_in, // TODO Why are cookies expiring in an hour?
secure: false, // TODO: Change localhost to also use https and then change this to true.
path: "/"
});
return <Redirect to="/" />;
}
/**
* @returns true if LoginWithAmazon response is valid
* false, otherwise.
*/
function isLWAResponseValid(lwaResponse) {
if (
!lwaResponse ||
!lwaResponse.access_token ||
!lwaResponse.expires_in ||
lwaResponse.expires_in < 0
) {
console.log(
"Encountered an error on login: " +
util.inspect(lwaResponse, { showHidden: true, depth: null })
);
return false;
}
return true;
}
export default withRouter(LoginHandler);
<file_sep>import React from "react";
import App from "./App";
import { render, cleanup, fireEvent, waitFor } from '@testing-library/react';
import { createMemoryHistory } from 'history';
import { Router } from "react-router-dom";
import '@testing-library/jest-dom/extend-expect';
import { CookiesProvider, Cookies } from 'react-cookie';
import { AMAZON_LOGIN_COOKIE } from "Constants";
afterEach(cleanup);
it("integration test to verify the actions to be taken when a currently authenticated user clicks on the logout button while on the chat page.", async () => {
let cookies = new Cookies();
cookies.set(AMAZON_LOGIN_COOKIE, 'access token is present', { path: '/' });
const { getByText, getByRole, queryByText, queryByRole } = renderWithProviders(<App />, cookies);
const logoutButton = getByText("Logout");
const chatInputTextBox = getByRole("textbox");
expect(logoutButton).toBeInTheDocument();
expect(chatInputTextBox).toBeInTheDocument();
fireEvent.click(logoutButton);
await waitFor(() => {
expect(queryByText("Logout")).not.toBeInTheDocument();
expect(queryByRole("textbox")).not.toBeInTheDocument();
expect(queryByText("Login")).toBeInTheDocument();
expect(getByText("WELCOME TO <NAME>", { exact: false })).toBeInTheDocument();
})
});
function renderWithProviders(
component,
cookies,
{
route = '/',
history = createMemoryHistory({ initialEntries: [route] }),
} = {}
) {
return {
...render(
<Router history={history}>
<CookiesProvider cookies={cookies}>
{component}
</CookiesProvider>
</Router>),
history
}
}
<file_sep>import React from "react";
import TextField from "material-ui/TextField";
import MuiThemeProvider from "material-ui/styles/MuiThemeProvider";
import { DEFAULT_PLACEHOLDER_FOR_USER_REQUEST_STRING } from "Constants";
/*
The input component where user's type in their requests for Alexa
*/
const UserRequestToAlexaForm = props => {
return (
<form onSubmit={e => props.onSubmit(e)}>
<MuiThemeProvider>
<TextField
id="user-request-to-alexa-text-field"
hintText={DEFAULT_PLACEHOLDER_FOR_USER_REQUEST_STRING}
value={props.value}
onChange={e => props.onChange(e)}
/>
</MuiThemeProvider>
</form>
);
};
export default UserRequestToAlexaForm;
<file_sep>class IllegalArgumentError extends Error {
constructor(message) {
super(message);
this.message = message;
this.name = "IllegalArgumentError";
}
}
export default IllegalArgumentError;
<file_sep>import { List } from "immutable";
export const mockAlexaSuccessResponses = List.of(
"mock alexa success response 1",
"mock alexa success response 2",
"mock alexa success response 3"
);
// Mock implementation that always resolves to a successful response from AVS.
export const mockSendTextMessageEventFunction = jest.fn(() =>
Promise.resolve(mockAlexaSuccessResponses)
);
// Mock implementation that always resolves to a successful response from AVS.
export const mockSendAddOrUpdateReportEventFunction = jest.fn(() => Promise.resolve(true)
);
const mockAVSGateway = jest.fn(() => {
return {
sendTextMessageEvent: mockSendTextMessageEventFunction,
sendAddOrUpdateReportEvent: mockSendAddOrUpdateReportEventFunction
};
});
export default mockAVSGateway;
<file_sep>import React from "react";
import "./WelcomeScreen.css";
export default function WelcomeScreen() {
return (
<div id="welcome-screen">
<h1> WELCOME TO <NAME> (Under Construction) </h1>
<h1> Login to get started </h1>
</div>
);
}
<file_sep>import React from "react";
import { shallow } from "enzyme";
import WelcomeScreen from "./WelcomeScreen";
it("renders without crashing", () => {
const wrapper = shallow(<WelcomeScreen />);
expect(wrapper).toMatchSnapshot();
wrapper.unmount();
});
<file_sep>import { urls, paths } from "AVSEndPoints";
import { cannedErrorResponses, customErrorCodes } from "CannedErrorResponses";
import { cannedResponses } from "CannedResponses";
import IllegalArgumentError from "errors/IllegalArgumentError";
import { extractAlexaTextResponses as parser } from "SpeakDirectiveParser";
import { hasIn, List } from "immutable";
import uuid from "uuid/v4";
import util from "util";
import { PRODUCT_ID, CLIENT_ID } from "Constants";
const sprintf = require("sprintf-js").sprintf;
const AVS_REQUEST_BODY = `--silent-alexa-http-boundary
Content-Disposition: form-data; name="metadata"
Content-Type: application/json; charset=UTF-8
%s`;
// TODO: Once the region setting is made configurable by the user, the URLS
// need to ge generated as against hard coded.
export const EVENTS_URL = urls.NA + paths.EVENTS;
/**
* Manages interactions with AVS
*/
export default class AVSGateway {
/**
* Sends the TextMessage event to AVS and extracts Alexa's responses.
*
* @param {String} userRequestToAlexa The request string that the user typed
* as a request for Alexa. This should not be empty or undefined.
* @param {String} accessToken The access token to communicate with AVS. This
* should not be empty of undefined.
*
* @returns A list of text responses from Alexa. Will never return undefined or
* empty list. If an error happens while communicating to AVS or while parsing
* the responses, a canned human-readable error message is returned.
*
* @throws IllegalArgumentError if the input is missing or invalid.
*/
async sendTextMessageEvent(userRequestToAlexa, accessToken) {
if (!userRequestToAlexa || !accessToken) {
throw new IllegalArgumentError(
`The request string to Alexa or the access token cannot be empty.
RequestString: ${userRequestToAlexa}
AccessToken: ${accessToken}`
);
}
const textMessageEvent = JSON.stringify(
this.buildTextMessageEvent(userRequestToAlexa)
);
const requestOptions = this.buildTextMessageFetchRequestOptions(
textMessageEvent,
accessToken
);
let isOk = false;
let payload;
await fetch(EVENTS_URL, requestOptions)
.then(response => {
if (response.ok) {
isOk = true;
return response.text();
} else {
return response.json();
}
})
.then(data => {
payload = data;
})
.catch(error => {
isOk = false; // We obtained a successful response but couldn't parse the body, probably because it was malformed.
console.log(util.inspect(error, { showHidden: true, depth: null }));
});
if (isOk) {
try {
let textResponsesFromAlexa;
if (payload) textResponsesFromAlexa = parser(payload);
if (
!textResponsesFromAlexa ||
// We don't anticipate Alexa to return a response in multiple parts where the first part is empty
// but the other parts aren't. So, the moment we see that the first part is empty, it is safe to
// ignore all other parts (which probably don't exist).
!textResponsesFromAlexa.get(0)
)
textResponsesFromAlexa = List.of(
cannedResponses.EMPTY_RESPONSE_FROM_ALEXA
);
return textResponsesFromAlexa;
} catch (error) {
console.error(
"Encountered an error while trying to parse the speak directive from AVS." +
util.inspect(error, { showHidden: true, depth: null })
);
}
}
return List.of(this.convertErrorToHumanReadableMessage(payload));
}
/**
* Sends the AddOrUpdateReport event to AVS. This event is meant to proactively
* inform Alexa about the endpoint.
*
* @returns true if the event has been successfully processed by Alexa and
* false otherwise. Alexa requires that no other events should be sent if this
* event is not processed successfully.
*/
// TODO: We should probably retry three times before propagating failures.
async sendAddOrUpdateReportEvent(accessToken) {
const addOrUpdateReportEvent = JSON.stringify(
this.buildAddOrUpdateReportEvent(accessToken)
);
const requestOptions = this.buildAddOrUpdateReportEventFetchRequestOptions(
addOrUpdateReportEvent,
accessToken
);
let isOk = false;
await fetch(EVENTS_URL, requestOptions)
.then(response => {
if (response.ok) {
isOk = true;
}
}).catch(error => {
console.log(util.inspect(error, { showHidden: true, depth: null }));
});;
return isOk;
}
convertErrorToHumanReadableMessage(errorPayload) {
let errorCode;
if (hasIn(errorPayload, ["payload", "code"]))
errorCode = errorPayload.payload.code;
return cannedErrorResponses.get(
errorCode,
// default value to return if errorCode doesn't exist
cannedErrorResponses.get(customErrorCodes.UNKNOWN_ERROR)
);
}
buildTextMessageFetchRequestOptions(textMessageEvent, accessToken) {
const data = sprintf(AVS_REQUEST_BODY, textMessageEvent);
return {
body: data,
headers: {
Authorization: "Bearer " + accessToken,
"content-type":
"multipart/form-data; boundary=silent-alexa-http-boundary"
},
cache: "no-store", // Alexa often responds differently to the same request and so we don't want to cache anything.
method: "POST"
};
}
buildTextMessageEvent(requestString) {
return {
event: {
header: {
namespace: "Text",
name: "TextMessage",
messageId: uuid()
},
payload: {
textMessage: requestString
}
}
};
}
buildAddOrUpdateReportEventFetchRequestOptions(addOrUpdateReportEvent, accessToken) {
const data = sprintf(AVS_REQUEST_BODY, addOrUpdateReportEvent);
return {
body: data,
headers: {
Authorization: "Bearer " + accessToken,
"content-type":
"multipart/form-data; boundary=silent-alexa-http-boundary"
},
cache: "no-store", // Alexa often responds differently to the same request and so we don't want to cache anything.
method: "POST"
};
}
buildAddOrUpdateReportEvent(accessToken) {
// TODO: Logic for assigning 'deviceSerialNumber' needs to be revisited.
const DSN = "12345";
return {
event: {
header: {
namespace: "Alexa.Discovery",
name: "AddOrUpdateReport",
payloadVersion: "3",
messageId: uuid(),
eventCorrelationToken: uuid()
},
payload: {
scope: {
type: "BearerToken",
token: accessToken
},
endpoints: [
{
endpointId: `${CLIENT_ID}::${PRODUCT_ID}::${DSN}`,
registration: {
productId: PRODUCT_ID,
deviceSerialNumber: DSN
},
manufacturerName: "Silent Voice Assistants",
description: "Interact with voice assistants without having to talk to them.",
friendlyName: "<NAME>",
displayCategories: ["COMPUTER", "LAPTOP", "TABLET"],
capabilities: [
{
type: "AlexaInterface",
interface: "SpeechSynthesizer",
version: "1.3"
}
],
connections: [
{
type: "UNKNOWN",
value: DSN //TODO: Is it reasonable to use a DSN here?
}
]
}
]
}
}
};
}
}
<file_sep>import React from "react";
import { shallow } from "enzyme";
import ChatBubble from "./ChatBubble";
it("renders correctly (snapshot testing)", () => {
const wrapper = shallow(<ChatBubble />);
expect(wrapper).toMatchSnapshot();
wrapper.unmount();
});
<file_sep>import React from "react";
import { render, cleanup } from '@testing-library/react';
import LoginFailedScreen from "./LoginFailedScreen";
afterEach(cleanup);
it("renders without crashing", () => {
const { asFragment } = render(<LoginFailedScreen />);
expect(asFragment(<LoginFailedScreen />)).toMatchSnapshot();
});
<file_sep>import React from "react";
import ReactDOM from "react-dom";
import "index.css";
import App from "App/App";
import AuthContext from "auth/AuthContextProvider";
import { BrowserRouter as Router } from "react-router-dom";
import registerServiceWorker from "registerServiceWorker";
import { CookiesProvider } from 'react-cookie';
ReactDOM.render(
<Router>
<CookiesProvider>
<AuthContext>
<App />
</AuthContext>
</CookiesProvider>
</Router>,
document.getElementById("root")
);
registerServiceWorker();
| 86ee12acb07d9a8f7e27de4b5ff512f72bc5ea5d | [
"JavaScript"
] | 30 | JavaScript | s-maheshbabu/silent-alexa | 8b03db144607b4ad20f417c5762f9e648a46ed29 | ab29735ef9ac06a5854ab29ca200721beb9e8ddc |
refs/heads/master | <repo_name>EdibleNickname/rpc-frame<file_sep>/pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.can</groupId>
<artifactId>rpc-frame</artifactId>
<packaging>pom</packaging>
<version>1.0-SNAPSHOT</version>
<modules>
<module>rpc-provider</module>
<module>rpc-provider-impl</module>
<module>rpc-service</module>
<module>rpc-dao</module>
<module>rpc-common</module>
</modules>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<!-- spring -->
<spring.version>5.0.5.RELEASE</spring.version>
<!-- dubbo 服务-->
<dubbo.version>2.6.1</dubbo.version>
<zookeeper.version>3.5.3-beta</zookeeper.version>
<curator.version>4.0.1</curator.version>
<kryo.version>2.24.0</kryo.version>
<kryo.serializers.version>0.42</kryo.serializers.version>
<!--database-->
<mybatis.version>3.4.6</mybatis.version>
<mybatis.spring.version>1.3.2</mybatis.spring.version>
<druid.version>1.1.9</druid.version>
<mysql.connector.version>5.1.46</mysql.connector.version>
<!--jackson-->
<jackson.version>2.9.5</jackson.version>
<!-- rabbitMq -->
<sprong.amqp.version>2.0.3.RELEASE</sprong.amqp.version>
<!--lombok-->
<lombok.version>1.16.20</lombok.version>
<!--logback-->
<logback.version>1.3.0-alpha4</logback.version>
<log4j.over.slf4j.version>1.8.0-beta1</log4j.over.slf4j.version>
<!--testng-->
<testng.version>6.14.3</testng.version>
</properties>
<dependencyManagement>
<dependencies>
<!-- 项目自身依赖 start -->
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-provider</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-provider-impl</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-service</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-dao</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-common</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<!-- 项目自身依赖 end -->
<!--spring start-->
<!-- 核心基础包-->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context-support</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-expression</artifactId>
<version>${spring.version}</version>
</dependency>
<!-- 数据接入 -->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-orm</artifactId>
<version>${spring.version}</version>
</dependency>
<!--切面和事务-->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-aspects</artifactId>
<version>${spring.version}</version>
</dependency>
<!--测试-->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<!--spring end -->
<!-- dubbo服务 start -->
<!--dubbo核心包-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>dubbo</artifactId>
<version>${dubbo.version}</version>
<exclusions>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-expression</artifactId>
</exclusion>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- zookeeper-->
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- curator -->
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- kryo序列化 -->
<dependency>
<groupId>com.esotericsoftware.kryo</groupId>
<artifactId>kryo</artifactId>
<version>${kryo.version}</version>
</dependency>
<dependency>
<groupId>de.javakaffee</groupId>
<artifactId>kryo-serializers</artifactId>
<version>${kryo.serializers.version}</version>
<exclusions>
<exclusion>
<groupId>org.objenesis</groupId>
<artifactId>objenesis</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- dubbo服务 end -->
<!--日志 start-->
<!-- logback -->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
<exclusions>
<exclusion>
<groupId>com.sun.mail</groupId>
<artifactId>javax.mail</artifactId>
</exclusion>
</exclusions>
</dependency>
<!--适配器: 将log4j的日志委托给slf4j框架-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>log4j-over-slf4j</artifactId>
<version>${log4j.over.slf4j.version}</version>
</dependency>
<!--日志 end-->
<!--数据库 start-->
<!-- mybatis -->
<dependency>
<groupId>org.mybatis</groupId>
<artifactId>mybatis</artifactId>
<version>${mybatis.version}</version>
</dependency>
<!-- mybatis-spring -->
<dependency>
<groupId>org.mybatis</groupId>
<artifactId>mybatis-spring</artifactId>
<version>${mybatis.spring.version}</version>
</dependency>
<!--druid连接池-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<!--mysql连接驱动-->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
</dependency>
<!--数据库 end-->
<!-- jackson start -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<!-- jackson end -->
<!-- rabbitmq -->
<dependency>
<groupId>org.springframework.amqp</groupId>
<artifactId>spring-rabbit</artifactId>
<version>${sprong.amqp.version}</version>
<exclusions>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- 工具包 start -->
<!--lombok-->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>${lombok.version}</version>
<scope>provided</scope>
</dependency>
<!-- testng -->
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!--工具包 end-->
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.7.0</version>
<configuration>
<source>${maven.compiler.source}</source>
<target>${maven.compiler.target}</target>
</configuration>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.20.1</version>
<configuration>
<skipTests>true</skipTests>
</configuration>
</plugin>
</plugins>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
</resource>
</resources>
</build>
<profiles>
<profile>
<id>dev</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<build>
<filters>
<filter>../dev.properties</filter>
</filters>
</build>
</profile>
</profiles>
</project><file_sep>/rpc-dao/src/main/java/com/can/dao/UserInfoDao.java
package com.can.dao;
import com.can.model.UserInfoDO;
/**
* Description:
*
* @Author LCN
* @Date 2018-04-24 下午 08:20
*/
public interface UserInfoDao {
int deleteByPrimaryKey(Long id);
int insert(UserInfoDO record);
int insertSelective(UserInfoDO record);
UserInfoDO selectByPrimaryKey(Long id);
int updateByPrimaryKeySelective(UserInfoDO record);
int updateByPrimaryKey(UserInfoDO record);
}
<file_sep>/rpc-provider-impl/src/test/java/com/can/impl/MessageProducerTest.java
package com.can.impl;
import com.can.queue.producer.MessageProducer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.testng.AbstractTestNGSpringContextTests;
import org.testng.annotations.Test;
/**
* Description: 测试rabbit的发送消息功能
*
* @Author LCN
* @Date 2018-04-25 上午 08:35
*/
@ContextConfiguration(locations = {"classpath:applicationContext-base.xml"})
public class MessageProducerTest extends AbstractTestNGSpringContextTests {
@Autowired
private MessageProducer messageProducer;
@Test
public void testSendMessage() {
messageProducer.sendMessage("rpc-frame");
}
}
<file_sep>/rpc-common/src/main/java/com/can/json/JsonUtil.java
package com.can.json;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
/**
* Description: JSON工具类
*
* @Author LCN
* @Date 2018-04-12 上午 11:22
*/
public class JsonUtil {
private static JsonUtil mJosnUtil = null;
private ObjectMapper mapper = null;
public static JsonUtil getInstance() {
if(mJosnUtil == null){
synchronized (JsonUtil.class) {
if(mJosnUtil == null){
mJosnUtil = new JsonUtil();
mJosnUtil.mapper = new ObjectMapper();
}
}
}
return mJosnUtil;
}
public String writeJson(Object entity){
String str = "";
try {
str = mapper.disableDefaultTyping().writeValueAsString(entity);
} catch (JsonGenerationException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return str;
}
public <T> T readJson(String jsonStr, Class<T> T){
T obj = null;
try {
obj = mapper.readValue(jsonStr, T);
} catch (JsonParseException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return obj;
}
}
<file_sep>/rpc-provider/src/main/java/com/can/provider/UserInfoProvider.java
package com.can.provider;
import com.can.request.UserInfoRequest;
import com.can.response.Response;
import com.can.response.dto.UserInfoDto;
/**
* Description:
*
* @Author LCN
* @Date 2018-04-24 下午 08:49
*/
public interface UserInfoProvider {
Response<Boolean> addUser(UserInfoRequest request);
Response<UserInfoDto> getUser(Long id);
}
<file_sep>/rpc-provider-impl/src/test/java/com/can/impl/UserInfoProviderImplTest.java
package com.can.impl;
import com.can.json.JsonUtil;
import com.can.provider.UserInfoProvider;
import com.can.request.UserInfoRequest;
import com.can.response.Response;
import com.can.response.dto.UserInfoDto;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.testng.AbstractTestNGSpringContextTests;
import org.testng.annotations.Test;
import javax.annotation.Resource;
/**
* Description:
*
* @Author LCN
* @Date 2018-04-24 下午 08:43
*/
@ContextConfiguration(locations = {"classpath:applicationContext-base.xml"})
public class UserInfoProviderImplTest extends AbstractTestNGSpringContextTests {
private Logger log = LoggerFactory.getLogger(UserInfoProviderImplTest.class);
@Resource
private UserInfoProvider userInfoProvider;
@Test
public void testAddUser() {
UserInfoRequest request = new UserInfoRequest();
request.setUserAge(23);
request.setUserName("LCX");
Response<Boolean> response = userInfoProvider.addUser(request);
log.info("返回结果为---------------->{}", JsonUtil.getInstance().writeJson(response));
}
@Test
public void testGetUser() {
Long id = 9L;
Response<UserInfoDto> response = userInfoProvider.getUser(id);
log.info("返回结果为---------------->{}", JsonUtil.getInstance().writeJson(response));
}
}
<file_sep>/rpc-provider-impl/pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>rpc-frame</artifactId>
<groupId>com.can</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>rpc-provider-impl</artifactId>
<name>rpc-provider-impl</name>
<url>http://www.example.com</url>
<dependencies>
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-service</artifactId>
</dependency>
<dependency>
<groupId>com.can</groupId>
<artifactId>rpc-provider</artifactId>
</dependency>
<!-- dubbo start -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>dubbo</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
</dependency>
<dependency>
<groupId>com.esotericsoftware.kryo</groupId>
<artifactId>kryo</artifactId>
</dependency>
<dependency>
<groupId>de.javakaffee</groupId>
<artifactId>kryo-serializers</artifactId>
</dependency>
<!--dubbo end -->
<dependency>
<groupId>org.springframework.amqp</groupId>
<artifactId>spring-rabbit</artifactId>
</dependency>
<!--测试-->
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
</dependency>
</dependencies>
</project>
<file_sep>/rpc-provider/src/main/java/com/can/response/Response.java
package com.can.response;
import java.io.Serializable;
/**
* Description:
*
* @Author LCN
* @Date 2018-04-24 下午 04:23
*/
public class Response<T> implements Serializable {
private T result;
private String msg = "200";
public T getResult() {
return result;
}
public void setResult(T result) {
this.result = result;
}
public String getMsg() {
return msg;
}
public void setMsg(String msg) {
this.msg = msg;
}
}<file_sep>/README.md
# rpc-frame
rpc服务调用框架
## 数据库
1. 数据库名称
数据库名称: rpc-db
2. sql语句
```sql
SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;
-- ----------------------------
-- Table structure for userinfo
-- ----------------------------
DROP TABLE IF EXISTS `userinfo`;
CREATE TABLE `userinfo` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '物理主键',
`user_name` varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL COMMENT '用户名',
`user_age` int(3) DEFAULT NULL COMMENT '用户年龄',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;
SET FOREIGN_KEY_CHECKS = 1;
```
## 计划
>1. 对MQ的支持
>2. Redis
>3. hibernate-validator 进行参数验证
| 9ebebb28b4d12e28087694be96ab7267457cb64f | [
"Markdown",
"Java",
"Maven POM"
] | 9 | Maven POM | EdibleNickname/rpc-frame | 89f5965bcb8c46a8c56342e32b23a935115e6c42 | 01a20c8dcbcaa6fb9ba4f284d3ad455e2c4d5e29 |
refs/heads/master | <file_sep> db.collection('comments', function(err, collection) {
collection.insert(wine, {safe:true}, function(err, result) {
if (err) {
res.send({'error':'An error has occurred'});
} else {
console.log('Success: ' + JSON.stringify(result[0]));
res.send(req.body);
}
});
});<file_sep>Wines = new Meteor.Collection('comments');
if (Meteor.isClient) {
Template.selected.positives = function(){
return Wines.find({}, { sort: { time: -1 }});
}
}
<file_sep><?php
$mongo = new Mongo(/*"localhost:3001"*/);
$chat = $mongo->chat;
$messages = $chat->messages;
$message = array(
'name' => 'Amit',
'message' => 'Hello World!!!!',
'date' => new MongoDate(/*time()*/)
);
$messages->insert($message);
?><file_sep>
$(function(){
// $('#select_link').click(function(e){
// e.preventDefault();
// console.log('select_link clicked');
// var data = {};
// data.name = "ramu";
// data.message = "tttytyt";
// $.ajax({
// type: 'POST',
// data: JSON.stringify(data),
// contentType: 'application/json',
// url: 'http://localhost:3000/endpoint',
// success: function(data) {
// console.log('success');
// console.log(JSON.stringify(data));
// }
// });
// });
// }
var $iFrame=$('#yoyo');
$('.bobmarley').hover(){
var src= this.href /* not sure of source*/
$iFrame.show().attr('src', src);
},function(){
$iFrame.hide()
};
}
);
| 785f25289c027a3efa07b92dc9633ec37a70b7bf | [
"JavaScript",
"PHP"
] | 4 | JavaScript | aggchaitanya/LiveFeed | 6593a35e7c53caf95508329f0f51bc6d1bf10edf | 8e0c154b77861c8268568883d8f82dffc1f253bc |
refs/heads/master | <file_sep>import React, { Component } from 'react';
import './App.css';
import ShowData from './ShowData';
import firebase, { auth, provider } from './firebase.js';
class App extends Component {
componentDidMount() {
auth.onAuthStateChanged((user) => {
if (user) {
this.setState({ user });
}
});
}
constructor() {
super();
this.state = {
currentItem: '',
username: '',
user: null
};
this.handleClick = this.handleClick.bind(this);
this.login = this.login.bind(this); // <-- add this line
this.logout = this.logout.bind(this); // <-- add this line
}
handleClick(e) {
e.preventDefault();
const itemsRef = firebase.database().ref('items');
const item = {
title: this.state.currentItem,
user: this.state.username
}
itemsRef.push(item);
this.setState({
currentItem: '',
username: ''
});
}
handleChange(e) {
/* ... */
}
logout() {
auth.signOut()
.then(() => {
this.setState({
user: null
});
});
}
login() {
auth.signInWithPopup(provider)
.then((result) => {
const user = result.user;
this.setState({
user
});
});
}
render() {
return (
<div className='app'>
<header>
<div className="wrapper">
<div className="title">
<h1>GetMyPassword</h1>
</div>
<div className="buttons">
{this.state.user ?
<button onClick={this.logout}>Log Out</button>
:
<button onClick={this.login}>Log In</button>
}
</div>
</div>
</header>
<div className='container'>
{this.state.user ?
<ShowData/>
:
<div className="notlogged">sss</div>
}
</div>
</div>
);
}
}
export default App;
| 1c104eb486c41de4ea18347e3428cb00e574e771 | [
"JavaScript"
] | 1 | JavaScript | edbeej/ethicalhacking | 6fa3149f7b134a8a7ed767d01cc6b9b5b8765a48 | c861c520c6f2e56ed47639dcfc65edc416630e26 |
refs/heads/master | <repo_name>aaronrtrevino12/ruby-object-attributes-lab-v-000<file_sep>/lib/person.rb
class Person
def name=(persons_name)
@name = persons_name
end
def name
@name
end
def job=(persons_job)
@job = persons_job
end
def job
@job
end
end
beyonce = Person.new
beyonce.name = "Beyonce"
beyonce.job = "Singer"
puts beyonce.name
puts beyonce.job
| 7f9e01f2265d9031c6e18ac3673d459517880e4e | [
"Ruby"
] | 1 | Ruby | aaronrtrevino12/ruby-object-attributes-lab-v-000 | acd0dc93e19834fab46ca19f13c06a871268a887 | 09e92d39a0303543668b16b4b6ed3cfc078f90b1 |
refs/heads/master | <file_sep># ButtonBlockAction
Wraps UIButton actions into blocks. Built using the composition pattern.
<file_sep>//
// ButtonBlockAction.swift
//
// Created by <NAME> on 20/12/2017.
// Copyright <NAME> 2017
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import Foundation
import UIKit
final class ButtonBlockAction {
fileprivate weak var button: UIButton?
fileprivate var blocksPerAction: [UIControlEvents.RawValue: (_ button: UIButton?)->()] = [:]
init(button: UIButton) {
self.button = button
button.addTarget(self, action: #selector(touchDown), for: .touchDown)
button.addTarget(self, action: #selector(touchUpOutside), for: .touchUpOutside)
button.addTarget(self, action: #selector(touchUpInside), for: .touchUpInside)
button.addTarget(self, action: #selector(touchCancel), for: .touchCancel)
}
deinit {
self.button?.removeTarget(self, action: #selector(touchDown), for: .touchDown)
self.button?.removeTarget(self, action: #selector(touchUpOutside), for: .touchUpOutside)
self.button?.removeTarget(self, action: #selector(touchUpInside), for: .touchUpInside)
self.button?.removeTarget(self, action: #selector(touchCancel), for: .touchCancel)
}
@objc func touchDown() {
if let block = self.blocksPerAction[UIControlEvents.touchDown.rawValue] {
block(self.button)
}
}
@objc func touchUpOutside() {
if let block = self.blocksPerAction[UIControlEvents.touchUpOutside.rawValue] {
block(self.button)
}
}
@objc func touchUpInside() {
if let block = self.blocksPerAction[UIControlEvents.touchUpInside.rawValue] {
block(self.button)
}
}
@objc func touchCancel() {
if let block = self.blocksPerAction[UIControlEvents.touchCancel.rawValue] {
block(self.button)
}
}
func setBlockAction(for controlEvents: UIControlEvents, _ block: @escaping (_ button: UIButton?)->()) {
self.blocksPerAction[controlEvents.rawValue] = block
}
}
<file_sep>//: A UIKit based Playground for presenting user interface
import UIKit
import PlaygroundSupport
class MyViewController : UIViewController {
var buttonActions: ButtonBlockAction?
override func loadView() {
let view = UIView()
view.backgroundColor = .white
let label = UILabel()
label.frame = CGRect(x: 150, y: 200, width: 200, height: 20)
label.text = "Hello World!"
label.textColor = .black
view.addSubview(label)
self.view = view
let buttonFrame = CGRect(x: 150, y: 240, width: 100, height: 40)
let button = UIButton(frame: buttonFrame)
button.backgroundColor = .red
button.setTitle("press me", for: .normal)
self.view.addSubview(button)
self.buttonActions = ButtonBlockAction(button: button)
self.buttonActions?.setBlockAction({
print("button pressed")
}, for: .touchUpInside)
}
@objc func buttonPress() {
print("button pressed")
}
}
// Present the view controller in the Live View window
let controller = MyViewController()
PlaygroundPage.current.liveView = controller
| cd43d728e6b8d4fa7a2102c1b85f6e077b0e0feb | [
"Markdown",
"Swift"
] | 3 | Markdown | greggjaskiewicz/ButtonBlockAction | 800dd82cf7287c93782814044fe8babbaf874076 | 32fb052807a3542df5165b86bb72756fef782cb9 |
refs/heads/master | <file_sep>package com.udacity.firebase.shoppinglistplusplus.ui.login;
import android.app.ProgressDialog;
import android.content.Intent;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.util.Log;
import android.view.Menu;
import android.view.View;
import android.widget.EditText;
import android.widget.LinearLayout;
import android.widget.Toast;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.Task;
import com.google.firebase.auth.AuthResult;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ServerValue;
import com.google.firebase.database.ValueEventListener;
import com.udacity.firebase.shoppinglistplusplus.R;
import com.udacity.firebase.shoppinglistplusplus.model.User;
import com.udacity.firebase.shoppinglistplusplus.ui.BaseActivity;
import com.udacity.firebase.shoppinglistplusplus.utils.Constants;
import java.util.HashMap;
import java.util.Map;
/**
* Represents Sign up screen and functionality of the app
*/
public class CreateAccountActivity extends BaseActivity {
private static final String LOG_TAG = CreateAccountActivity.class.getSimpleName();
private ProgressDialog mAuthProgressDialog;
private EditText mEditTextUsernameCreate, mEditTextEmailCreate, mEditTextPasswordCreate;
private DatabaseReference mFirebaseRef;
private FirebaseAuth mAuth;
private String mName,mEmail,mPassword;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_create_account);
/**
* Link layout elements from XML and setup the progress dialog
*/
//setting up firebase refference
mFirebaseRef = FirebaseDatabase.getInstance().getReference();
mAuth = FirebaseAuth.getInstance();
initializeScreen();
}
/**
* Override onCreateOptionsMenu to inflate nothing
*
* @param menu The menu with which nothing will happen
*/
@Override
public boolean onCreateOptionsMenu(Menu menu) {
return true;
}
/**
* Link layout elements from XML and setup the progress dialog
*/
public void initializeScreen() {
mEditTextUsernameCreate = (EditText) findViewById(R.id.edit_text_username_create);
mEditTextEmailCreate = (EditText) findViewById(R.id.edit_text_email_create);
mEditTextPasswordCreate = (EditText) findViewById(R.id.edit_text_password_create);
LinearLayout linearLayoutCreateAccountActivity = (LinearLayout) findViewById(R.id.linear_layout_create_account_activity);
initializeBackground(linearLayoutCreateAccountActivity);
/* Setup the progress dialog that is displayed later when authenticating with Firebase */
mAuthProgressDialog = new ProgressDialog(this);
mAuthProgressDialog.setTitle(getResources().getString(R.string.progress_dialog_loading));
mAuthProgressDialog.setMessage(getResources().getString(R.string.progress_dialog_creating_user_with_firebase));
mAuthProgressDialog.setCancelable(false);
}
/**
* Open LoginActivity when user taps on "Sign in" textView
*/
public void onSignInPressed(View view) {
Intent intent = new Intent(CreateAccountActivity.this, LoginActivity.class);
intent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_CLEAR_TASK);
startActivity(intent);
finish();
}
/**
* Create new account using Firebase email/password provider
*/
public void onCreateAccountPressed(View view) {
mEmail = mEditTextEmailCreate.getText().toString();
mName = mEditTextUsernameCreate.getText().toString();
mPassword = mEditTextPasswordCreate.getText().toString();
boolean validEmail = isEmailValid(mEmail);
boolean validUserName = isUserNameValid(mName);
boolean validPassword = isPasswordValid(mPassword);
if (!validEmail || !validUserName || !validPassword) return;
mAuthProgressDialog.show();
mAuth.createUserWithEmailAndPassword(mEmail, mPassword).addOnCompleteListener(this, new OnCompleteListener<AuthResult>() {
@Override
public void onComplete(@NonNull Task<AuthResult> task) {
if (task.isSuccessful()) {
mAuthProgressDialog.dismiss();
Log.i(LOG_TAG, getString(R.string.log_message_auth_successful));
String uid = task.getResult().getUser().getUid();
createUserInFirebaseHelper(uid);
Toast.makeText(CreateAccountActivity.this,"Account Created",Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(CreateAccountActivity.this, "Authentication failed.",
Toast.LENGTH_SHORT).show();
Log.d(LOG_TAG, getString(R.string.log_error_occurred) + task.getException());
mAuthProgressDialog.dismiss();
/* Display the appropriate error message
if (firebaseError.getCode() == FirebaseError.EMAIL_TAKEN) {
mEditTextEmailCreate.setError(getString(R.string.error_email_taken));
} else {
showErrorToast(task.getException();
*/
}
}
});
}
/**
* Creates a new user in Firebase from the Java POJO
*/
private void createUserInFirebaseHelper(String uid) {
final DatabaseReference userLocation = FirebaseDatabase.getInstance().getReferenceFromUrl(Constants.FIREBASE_URL_USERS).child(uid);
/**
* See if there is already a user (for example, if they already logged in with an associated
* Google account.
*/
userLocation.addListenerForSingleValueEvent(new ValueEventListener() {
@Override
public void onDataChange(DataSnapshot dataSnapshot) {
/* If there is no user, make one */
if (dataSnapshot.getValue() == null) {
/* Set raw version of date to the ServerValue.TIMESTAMP value and save into dateCreatedMap */
HashMap<String, Object> timestampJoined = new HashMap<>();
timestampJoined.put(Constants.FIREBASE_PROPERTY_TIMESTAMP, ServerValue.TIMESTAMP);
User newUser = new User(mName, mEmail, timestampJoined);
userLocation.setValue(newUser);
}
}
@Override
public void onCancelled(DatabaseError firebaseError) {
Log.d(LOG_TAG, getString(R.string.log_error_occurred) + firebaseError.getMessage());
}
});
}
private boolean isEmailValid(String email) {
boolean isGoodEmail =
(email != null && android.util.Patterns.EMAIL_ADDRESS.matcher(email).matches());
if (!isGoodEmail) {
mEditTextEmailCreate.setError(String.format(getString(R.string.error_invalid_email_not_valid),
email));
return false;
}
return isGoodEmail;
}
private boolean isUserNameValid(String userName) {
if (userName.equals("")) {
mEditTextUsernameCreate.setError(getResources().getString(R.string.error_cannot_be_empty));
return false;
}
return true;
}
private boolean isPasswordValid(String password) {
if (password.length() < 6) {
mEditTextPasswordCreate.setError(getResources().getString(R.string.error_invalid_password_not_valid));
return false;
}
return true;
}
/**
* Show error toast to users
*/
private void showErrorToast(String message) {
Toast.makeText(CreateAccountActivity.this, message, Toast.LENGTH_LONG).show();
}
}
<file_sep>package com.udacity.firebase.shoppinglistplusplus.ui.activeListDetails;
/**
* Created by shinu on 7/18/2017.
*/
import android.app.Dialog;
import android.os.Bundle;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ServerValue;
import com.udacity.firebase.shoppinglistplusplus.R;
import com.udacity.firebase.shoppinglistplusplus.model.ShoppingList;
import com.udacity.firebase.shoppinglistplusplus.utils.Constants;
import java.util.HashMap;
/**
* Lets user edit the list name for all copies of the current list
*/
public class EditListNameDialogFragment extends EditListDialogFragment {
private static final String LOG_TAG = ActiveListDetailsActivity.class.getSimpleName();
String mListName;
/**
* Public static constructor that creates fragment and passes a bundle with data into it when adapter is created
*/
public static EditListNameDialogFragment newInstance(ShoppingList shoppingList,String listId,String encodedEmail) {
EditListNameDialogFragment editListNameDialogFragment = new EditListNameDialogFragment();
Bundle bundle = EditListDialogFragment.newInstanceHelper(shoppingList, R.layout.dialog_edit_list,listId,encodedEmail);
// TODO add any values you need here from the shopping list to make this change.
// Once you put a value in the bundle, it available to you in onCreate
bundle.putString(Constants.KEY_LIST_NAME,shoppingList.getListName());
editListNameDialogFragment.setArguments(bundle);
return editListNameDialogFragment;
}
/**
* Initialize instance variables with data from bundle
*/
@Override
public void onCreate(Bundle savedInstanceState) {
// TODO Extract any arguments you put in the bundle when the newInstance method
// created the dialog. You can store these in an instance variable so that they
// are available to you.
super.onCreate(savedInstanceState);
mListName=getArguments().getString(Constants.KEY_LIST_NAME);
}
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
/** {@link EditListDialogFragment#createDialogHelper(int)} is a
* superclass method that creates the dialog
**/
Dialog dialog = super.createDialogHelper(R.string.positive_button_edit_item);
// TODO You can use the helper method in the superclass I made (EditListDialogFragment)
// called helpSetDefaultValueEditText. This will allow you to set what text the
// user sees when the dialog opens.
helpSetDefaultValueEditText(mListName);
return dialog;
}
/**
* Changes the list name in all copies of the current list
*/
protected void doListEdit() {
// TODO Do the actual edit operation here.
// Remember, you need to update the timestampLastChanged for
// the shopping list.
String mInputListName=mEditTextForList.getText().toString();
if(!mInputListName.equals("")) {
if (mListName != null&&mListId!=null) {
if (!mInputListName.equals(mListName)) {
DatabaseReference refShoppingList = FirebaseDatabase.getInstance().getReferenceFromUrl(Constants.FIREBASE_URL_ACTIVE_LIST);
HashMap<String, Object> updatedProps = new HashMap<>();
updatedProps.put(Constants.KEY_LIST_NAME, mInputListName);
HashMap<String, Object> changedTimesSTamp = new HashMap<>();
changedTimesSTamp.put(Constants.FIREBASE_PROPERTY_TIMESTAMP, ServerValue.TIMESTAMP);
updatedProps.put(Constants.FIREBASE_PROPERTY_TIMESTAMP_LAST_CHANGED, changedTimesSTamp);
refShoppingList.updateChildren(updatedProps);
}
}
}
}
}
<file_sep>apply plugin: 'com.android.application'
android {
compileSdkVersion 25
buildToolsVersion "25.0.1"
defaultConfig {
applicationId "com.udacity.firebase.shoppinglistplusplus"
minSdkVersion 21
targetSdkVersion 25
versionCode 1
versionName "1.0"
multiDexEnabled true
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
debug {
}
}
buildTypes.each{
it.buildConfigField 'String','UNIQUE_FIREBASE_ROOT_URL', UniqueFirebaseRootUrl
}
packagingOptions {
exclude 'META-INF/LICENSE'
exclude 'META-INF/LICENSE-FIREBASE.txt'
exclude 'META-INF/NOTICE'
}
}
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
compile 'com.android.support:multidex:1.0.1'
compile 'com.android.support:appcompat-v7:25.3.1'
compile 'com.android.support:design:25.3.1'
compile 'com.android.support:support-v4:25.3.1'
compile 'com.android.support:cardview-v7:25.3.1'
/* For Google Play Services */
compile 'com.google.android.gms:play-services:11.2.0'
// compile 'com.google.android.gms:play-services-auth:9.8.0'
// compile 'com.firebase:firebase-client-android:2.3.1+'
compile 'com.firebaseui:firebase-ui:2.2.0'
compile 'com.google.firebase:firebase-auth:11.2.0'
compile 'com.google.firebase:firebase-core:11.2.0'
compile 'com.google.firebase:firebase-database:11.2.0'
compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.8.4'
}
apply plugin: 'com.google.gms.google-services'<file_sep>package com.udacity.firebase.shoppinglistplusplus.ui.activeLists;
import android.content.Intent;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ListView;
import android.widget.TextView;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.udacity.firebase.shoppinglistplusplus.R;
import com.udacity.firebase.shoppinglistplusplus.model.ShoppingList;
import com.udacity.firebase.shoppinglistplusplus.ui.activeListDetails.ActiveListDetailsActivity;
import com.udacity.firebase.shoppinglistplusplus.utils.Constants;
import com.udacity.firebase.shoppinglistplusplus.utils.Utils;
import java.util.Date;
/**
* A simple {@link Fragment} subclass that shows a list of all shopping lists a user can see.
* Use the {@link ShoppingListsFragment#newInstance} factory method to
* create an instance of this fragment.
*/
public class ShoppingListsFragment extends Fragment {
private ListView mListView;
//private TextView mTextViewListName;
//private TextView mTextViewListOwner;
//private TextView mTextViewEditTime;
private ActiveListAdapter mActiveListAdapter;
public ShoppingListsFragment() {
/* Required empty public constructor */
}
/**
* Create fragment and pass bundle with data as it's arguments
* Right now there are not arguments...but eventually there will be.
*/
public static ShoppingListsFragment newInstance() {
ShoppingListsFragment fragment = new ShoppingListsFragment();
Bundle args = new Bundle();
fragment.setArguments(args);
return fragment;
}
/**
* Initialize instance variables with data from bundle
*/
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (getArguments() != null) {
}
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
/**
* Initalize
* UI elements
*/
View rootView = inflater.inflate(R.layout.fragment_shopping_lists, container, false);
initializeScreen(rootView);
DatabaseReference activeListsRef = FirebaseDatabase.getInstance().getReferenceFromUrl(Constants.FIREBASE_URL_ACTIVE_LISTS);
mActiveListAdapter = new ActiveListAdapter(getActivity(), ShoppingList.class, R.layout.single_active_list, activeListsRef);
mListView.setAdapter(mActiveListAdapter);
mListView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> adapterView, View view, int position, long l) {
ShoppingList selectedList =mActiveListAdapter.getItem(position);
if(selectedList!=null){
Intent intent = new Intent(getActivity(),ActiveListDetailsActivity.class);
String listId = mActiveListAdapter.getRef(position).getKey();
intent.putExtra(Constants.KEY_LIST_ID,listId);
startActivity(intent);
//finish();
}
}
});
return rootView;
}
@Override
public void onDestroy() {
super.onDestroy();
mActiveListAdapter.cleanup();
}
/**
* Link listView elements from XML
*/
private void initializeScreen(View rootView) {
mListView = (ListView) rootView.findViewById(R.id.list_view_active_lists);
}
} | b43bed2ae71b95f9142069d9010cf91cb6927266 | [
"Java",
"Gradle"
] | 4 | Java | shubhamgarg12/shoppingplus | 738c3f41a90d1588d7699b84a44bb0b61eac4d37 | b77aa494ad205274cd9de8dd1170cf2fa1a657b2 |
refs/heads/master | <repo_name>snd/dejavu<file_sep>/installation-and-usage.md
# installation
checkout:
```
cd ~/workspace
git clone https://github.com/worldveil/dejavu.git
cd dejavu
```
install python 3:
```
brew install python3
```
create virtualenv for python3:
```
virtualenv -p python3 .
```
activate virtualenv:
```
source bin/activate
```
```
brew install portaudio
pip3 install pyaudio
```
```
brew install ffmpeg
pip3 install pydub
```
```
pip3 install numpy
pip3 install scipy
pip3 install matplotlib
```
install x11 which is needed by mathplotlib from http://xquartz.macosforge.org/landing/
```
brew install mysql
pip3 install mysqlclient
```
```
pip3 install git+git://github.com/WarrenWeckesser/wavio.git
```
```
mysql.server start
mysql -u root
```
in mysql:
```
CREATE DATABASE IF NOT EXISTS dejavu;
```
# usage
this should now run without errors:
```
python3 example.py
```
inspect the database that was filled with songs and fingerprints by `example.py`:
```
mysql -u root dejavu
```
in mysql:
```
show tables;
show columns from songs;
show columns from fingerprints;
select * from songs;
select count(*) from fingerprints;
select count(*) from fingerprints where song_id = 1;
```
to clear the database:
```
delete from songs;
delete from fingerprints;
```
```
python3 run_tests.py --secs 5 --temp ./temp_audio --log-file ./results/dejavu-test.log --padding 8 --seed 42 --results ./results ./mp3
```
<file_sep>/recognize.py
import sys
import warnings
warnings.filterwarnings("ignore")
import argparse
import timeit
from dejavu import Dejavu
from dejavu.timer import Timer
from dejavu.recognize import FileRecognizer
parser = argparse.ArgumentParser()
parser.add_argument("file", help="the file to recognize")
parser.add_argument(
"-s",
"--secs",
help="how many seconds to fingerprint for recognition",
type=int)
args = parser.parse_args()
# load config from a JSON file (or anything outputting a python dictionary)
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "",
"db": "dejavu"
}
}
if args.secs:
config["fingerprint_limit"] = args.secs
if __name__ == '__main__':
# create a Dejavu instance
djv = Dejavu(config)
# Recognize audio from a file
print("start recognizing")
with Timer("djv.recognize") as t:
song = djv.recognize(FileRecognizer, args.file)
print("From file we recognized: %s\n" % song)
<file_sep>/dejavu/timer.py
import time
class Timer(object):
def __init__(self, description=None):
self.description = description
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.s = self.end - self.start
self.ms = self.s * 1000
if self.description:
print('elapsed time %s: %f s' % (self.description, self.s))
| 1a20f5b5a5bcddc93fb2e0710e79833f4c737ada | [
"Markdown",
"Python"
] | 3 | Markdown | snd/dejavu | edcf8af18b4d62b15bbb7bc59aa9e85b9b4cf1f6 | 5c02c2079fbf25ce9df95c863db1c67b0975ef34 |
refs/heads/master | <repo_name>ZedanDredal/CaptchaDa_z<file_sep>/README.md
# CaptchaDa
## Presentation
Haha, i know, that look useless for most of people getting there, because of all current systems already available across the web who have this feature, such as the **ReCaptcha of google**, but i wanted to made Mine, *Any libraries/framework/API/EndPoint needed*! A from scratch system of captcha to protectyour web pages and your forms!
This is the Captcha i used on my project [AntiDDOS-System](https://github.com/sanix-darker/antiddos-system).
## How to use it
CaptchaDa have 2 modes:
- You can get only the image of the captcha:
You just need to call CaptchaDa.php as a source file
Ex:
```html
<img src="CaptchaDa.php">
```
or
- You can get the complete form verification by requesting and pass the parameter _?getbox=_
Of course, in the server, you have the current $_SESSION Code value to compare with what the user enter:
```php
// The generated code is saved in this current variable
$_SESSION['CAPTCHADA'] = $code;
```
## How it's works
CaptchaDa use the *GD* library in PHP to generate and image of caracter in randomly colors and position, so that it will be completly difficult for robots to pass forms.
## Rendering
<img src="render.png">
## Author
- [Sanix-darker](https://github.com/sanix-darker)<file_sep>/CaptchaDa.php
<?php
/**
* Author: <NAME>
* [CaptchaDa description]
* @param [type] $width [description]
* @param [type] $height [description]
* @param [type] $codelenght [description]
* @param [type] $space_between_caracter [description]
*/
function CaptchaDa($width,$height,$codelenght, $space_between_caracter){
if(!isset($space_between_caracter))
$space_between_caracter = 10;
if(!isset($_SESSION))
session_start();
$liste = md5('134679ACEFGHIJLMNPRTUVWXY');
$code = '';
$image = @imagecreate($width, $height) or die('Impossible d\'initializer GD');
for( $i=0; $i<$space_between_caracter; $i++ )
imageline($image,mt_rand(0,$width), mt_rand(0,$height),mt_rand(0,$width), mt_rand(0,$height),imagecolorallocate($image, mt_rand(230,255),mt_rand(230,255),mt_rand(230,255)));
for( $i=0, $x=0; $i<$codelenght; $i++ )
{
$charactere = substr($liste, rand(0, strlen($liste)-1), 1);
$x += $space_between_caracter + mt_rand(0,$space_between_caracter);
imagechar($image, mt_rand(3,4), $x, mt_rand(4,20), $charactere,
imagecolorallocate($image, mt_rand(0,155), mt_rand(0,155), mt_rand(0,155)));
$code .= ($charactere);
}
header('Content-Type: image/jpeg');
imagejpeg($image);
imagedestroy($image);
$_SESSION['CAPTCHADA'] = $code;
}
function getBox(){
echo '<form action=""><img style="width: 100%;" src="CaptchaDa.php"><br><input type="text" name="CAPTCHADA" placeholder="Write the Code here and press Enter." style="width: 100%;"></form>';
}
(isset($_REQUEST['getbox']))? getBox(): CaptchaDa(120,50,7,10);
?><file_sep>/index.php
<span>CaptchaDa</span>
<div class="CaptchaDa" id="cd<?=md5(rand(0,999999)."".rand(0,999999))?>" data-id="1" data-width="" data-height="" data-length="" data-lang="fr" style="width: 260px;"></div>
<script type="text/javascript">
var xh;
if (window.XMLHttpRequest)
xh = new XMLHttpRequest();
else if (window.ActiveXObject)
xh = new ActiveXObject('Microsoft.XMLHTTP');
else
alert('JavaScript : Ce navigateur ne supporte pas les objets XMLHttpRequest...');
xh.open('GET','CaptchaDa.php?getbox=true',true);
xh.onreadystatechange = function()
{
if(xh.readyState == 4){
document.getElementsByClassName('CaptchaDa')[0].innerHTML = xh.responseText;
}
}
xh.send(null);
</script>
| 3819d1ad8b99cf5f4503d4b8872ebc38ea8757df | [
"Markdown",
"PHP"
] | 3 | Markdown | ZedanDredal/CaptchaDa_z | a009953992782efc864a9f32265add94325c9e54 | 5ad7acbf5162158458bde1b7d17d77dddcff3d77 |
refs/heads/master | <file_sep>// document.addEventListener('DOMContentLoaded', init, false);
// function init(){
// function message () {
// alert("Hello!");
// }
// var button = document.getElementsByClassName('container');
// button.addEventListener('click', message, true);
// }
// var body = document.querySelector('body');
// body.addEventListener('mouseover', function(){
// if(event.target.tagName.toLowerCase() === 'input'){
// this.style.backgroundColor = "red";
// }
// });
// var publications = document.getElementsByClassName('publications');
// publications.addEventListener('mouseover', function(){
// publications.style.backgroundColor = "green";
// });
// document.querySelector('body').addEventListener('click', function(event) {
// console.log('Clicked body');
// if (event.target.className.toLowerCase() === 'card-body') {
// console.log('Clicked card-body');
// // do your action on your 'li' or whatever it is you're listening for
// event.target.classList.add('hoverResearchItem');
// }
// event.stopPropagation();
// });
/* --- for research publication items --- */
// var cardBody = document.querySelectorAll('.card-body');
// for(var i=0; i<cardBody.length; i++){
// cardBody[i].addEventListener('mouseover', function(){
// this.classList.add('hoverResearchItem');
// });
// cardBody[i].addEventListener('mouseout', function(){
// this.classList.remove('hoverResearchItem');
// });
// }
// body.addEventListener('mouseover', function(){
// this.style.backgroundColor = "red";
// });
// document.querySelector('body').addEventListener('click', function(event) {
// if (event.target.className === 'food') {
// event.target.classList.add('rice');
// }
// event.stopPropagation();
// });
<file_sep>var express = require("express"),
mongoose = require("mongoose"),
bodyParser = require("body-parser"),
methodOverride = require("method-override"),
Publication = require("./models/publications"),
Project = require("./models/projects"),
User = require("./models/user"),
upload = require("./models/storage"),
seedDB = require("./seed"),
expressSanitizer = require("express-sanitizer"),
passport = require("passport"),
LocalStrategy = require("passport-local"),
flash = require("connect-flash");
var app = express();
/* packages for adding uploading files (particularly images) in our projects */
var path = require("path"),
crypto = require("crypto"),
multer = require("multer"),
GridFsStorage = require("multer-gridfs-storage"),
Grid = require("gridfs-stream");
// const { MongoClient } = require("mongodb");
// const uri = process.env.BLOGDATATBASEURL;
// MongoClient.connect(uri,{ useNewUrlParser: true });
/* old connection approach*/
// mongoose.connect('mongodb://localhost/blogDB');
mongoose.connect(process.env.BLOGDATATBASEURL, { useNewUrlParser: true });
// console.log('database -----\n' + process.env.BLOGDATATBASEURL);
/* -- mongo connection set -- new way -- also adding gridfs --*/
// Mongo URI
// console.log('mongo uri');
// const mongoURI = 'mongodb://localhost/blogDB';
const mongoURI = process.env.BLOGDATATBASEURL;
// console.log(process.env.BLOGDATATBASEURL);
// Create mongo connection
const conn = mongoose.createConnection(mongoURI);
// init gfs
let gfs;
// connect, and ensure it is openned before assigning gfs
conn.once('open', () => {
// initiation stream
gfs = Grid(conn.db, mongoose.mongo);
gfs.collection('uploads');
});
app.use(bodyParser.json());
/*------------------------------------------------------------*/
app.use(bodyParser.urlencoded({extended: true}));
app.set('view engine', 'ejs');
// app.use(express.static('public'));
app.use(express.static(__dirname + "/public"));
app.use(methodOverride("_method"));
// Adds my publications to the database - only required once
// seedDB();
app.use(expressSanitizer());
// using connect-flash for flash messages
app.use(flash());
/********PASSPORT Configuration*********/
app.use(require("express-session")({
secret: "Regina is the precious",
resave: false,
saveUninitialized : false
}));
app.use(passport.initialize());
app.use(passport.session());
passport.use(new LocalStrategy(User.authenticate()));
passport.serializeUser(User.serializeUser());
passport.deserializeUser(User.deserializeUser());
// specifying a middle to send current user to all routes so that
// login, logout, and sign up buttons can be displayed based on the
// login status of the user, i.e., if they are logged in or not
app.use(function(req, res, next){
// adding users to all ejs (html) templates
res.locals.currentUser = req.user;
//Adding flash messages to all ejs (html) templates
res.locals.error = req.flash("error");
res.locals.success = req.flash("success");
next();
});
// @route GET /image/:filename
// @desc Display image with the given filename
app.get('/image/:filename', (req, res)=>{
gfs.files.findOne({filename: req.params.filename}, (err, file)=>{
if(!file || file.length === 0){
return res.status(404).json({
err: 'No file exists'
});
}
// console.log(file.contentType);
// check if file is an image
if(file.contentType === "image/jpeg" || file.contentType === "image/png"|| file.contentType === 'image/jpeg'){
const readstream = gfs.createReadStream(file.filename);
readstream.pipe(res);
} else {
// var valueType = typeof file.contentType;
res.status(404).json({
err: 'Not an image',
type: file.contentType,
typeOf: typeof file.contentType,
imgDetails: file
});
}
});
});
// @route POST /upload
// @desc uploads file to db
// app.post('/upload', upload.single('file'), (req, res)=>{
// // res.json({file: req.file});
// res.redirect('/');
// })
/* adding routes */
var researchRoutes = require("./routes/research");
var projectRoutes = require("./routes/project");
var experienceRoutes = require("./routes/experience");
var awardRoutes = require("./routes/award");
var indexRoutes = require("./routes/index");
app.use("/", researchRoutes);
app.use("/", projectRoutes);
app.use("/", experienceRoutes);
app.use("/", awardRoutes );
app.use("/", indexRoutes);
app.listen(process.env.PORT, process.env.IP, function(){
console.log('Blog App Server listening');
})<file_sep>var mongoose = require("mongoose");
var publicationSchema = new mongoose.Schema({
title: String,
authors: String,
venue: String,
type: String,
year: String,
abstract: String,
url: String,
doi: String,
rank: String,
created: {type: Date, default: Date.now}
});
module.exports = mongoose.model('Publication', publicationSchema);<file_sep>var mongoose = require("mongoose");
var projectSchema = new mongoose.Schema({
title: String,
content: String,
// image: String,
images: [String],
// images: [{
// image: String
// }],
status: String,
hide: String,
summary: String,
created: {type: Date, default: Date.now}
});
module.exports = mongoose.model('Project', projectSchema);<file_sep>
var request = require("request");
var Publication = require("./models/publications");
/* --- using dblp API to retrieve my publication information --- */
function seedDB(){
var source = "http://dblp.org/search/publ/api?q=wondoh+john&format=json";
request(source, function(error, response, body){
if(!error && response.statusCode==200){
var data = JSON.parse(body);
var dataArray = data.result.hits.hit;
for(var i=0; i<dataArray.length; i++){
var dataObject = {
title: dataArray[i].info.title,
authors: buildString(dataArray[i].info.authors.author),
venue: dataArray[i].info.venue,
year: dataArray[i].info.year,
type: dataArray[i].info.type,
doi: dataArray[i].info.doi,
url: dataArray[i].info.url
};
Publication.create(dataObject, function(err, createdPublication) {
if(err){
console.log(err);
} else {
console.log(createdPublication);
}
});
}
}
});
}
function buildString(stringArray){
var newString = '';
for(var i=0; i<stringArray.length;i++){
if(i === (stringArray.length-1)){
newString += stringArray[i];
} else {
newString += stringArray[i] +', ';
}
}
return newString;
}
// console.log('building string');
// var strArray = ['John', 'Wondoh', 'Kofi'];
// console.log(buildString(strArray));
module.exports = seedDB;
/*===================================================================*/
/* Here for testing */
/*===================================================================*/
// var publicationArray =[
// {
// title: 'Dynamic temporal constraints in business processes.',
// authors: '<NAME>, <NAME>, <NAME>',
// venue: 'ACSW',
// year: '2017',
// type: 'Conference and Workshop Papers',
// doi: '10.1145/3014812.3014848',
// url: 'https://dblp.org/rec/conf/acsw/WondohGS17',
// },
// {
// title: 'Utilising bitemporal information for business process contingency management.',
// authors: '<NAME>, <NAME>, <NAME>',
// venue: 'ACSW',
// year: '2016',
// type: 'Conference and Workshop Papers',
// doi: '10.1145/2843043.2843045',
// url: 'https://dblp.org/rec/conf/acsc/WondohGS16',
// },
// {
// title: 'Contingency Management for Event-Driven Business Processes.',
// authors: '<NAME>, <NAME>, <NAME>',
// venue: 'OTM Conferences',
// year: '2017',
// type: 'Conference and Workshop Papers',
// doi: '10.1007/978-3-319-69462-7_21',
// url: 'https://dblp.org/rec/conf/otm/WondohGS17',
// },
// {
// title: 'Propagation of Event Content Modification in Business Processes.',
// authors: '<NAME>, <NAME>, <NAME>',
// venue: 'ICSOC',
// year: '2016',
// type: 'Conference and Workshop Papers',
// doi: '10.1007/978-3-319-46295-0_5',
// url: 'https://dblp.org/rec/conf/icsoc/WondohGS16',
// },
// {
// title: 'Bitemporal Support for Business Process Contingency Management.',
// authors: '<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>',
// venue: 'ER Workshops',
// year: '2015',
// type: 'Conference and Workshop Papers',
// doi: '10.1007/978-3-319-25747-1_11',
// url: 'https://dblp.org/rec/conf/er/WondohGGRSS15',
// }
// ];<file_sep># Personal Website
This project is my personal website where I have entries about my projects, research, work experience and awards. It uses Bootstrap, Express, and Passport as the main frameworks. It supports Authentication, Authorization, and it is a full CRUD RESTful web app. Nodejs is used on the backend. It has been deployed.
This project is very useful for most people who want to have a website and want to be able to manage it without having to look at the HTML. It gives you the flexibility to work on your website like a social media platform.
## Authentication and Authorization
Since I am the only user required to make changes to the site, there is no need for a signup button. I, therefore, only included a login in a button that authenticates me as the owner of the website. After I login in, I can make changes to the entries on the website. These include:
* Changing my profile picture
* Adding projects and research items
* Editing added entries
* Deleting added entries
For authentication, I used the passport package and for the method of authentication, I used the local strategy.
## Pictures
The profile picture and project pictures are stored in the database using multer-gridfs-storage. This allows us to store files in our mongo database without a size restriction.
## APIs
I used the dblp API to seed the initial entries for my research papers in the database. This API does not provide the abstract of the paper or the ranking of the conference. These are added later. The following is the API link to my research papers: http://dblp.org/search/publ/api?q=john+wondoh&format=json
I also create an API to stream my photos so that they can be displayed on the website. This API is used for displaying the profile picture and the pictures for each project.
<file_sep>var express = require("express");
var router = express.Router();
var upload = require("../models/storage");
// var fs = require('fs');
// const fileUpload = require('express-fileupload');
// router.use(fileUpload());// use express-fileupload as default parser for multipart/form-data encoding
// var Experience = require("../models/projects");
var expressSanitizer = require("express-sanitizer");
router.use(expressSanitizer());
var passport = require("passport");
var User = require("../models/user");
// var Picture = require("../models/picture");
/*--------------------------------------------------*/
var mongoose = require("mongoose"),
path = require("path"),
crypto = require("crypto"),
multer = require("multer"),
GridFsStorage = require("multer-gridfs-storage"),
Grid = require("gridfs-stream");
// Mongo URI
// const mongoURI = 'mongodb://localhost/blogDB';
// mongoose.connect(process.env.BLOGDATATBASEURL);
const mongoURI = process.env.BLOGDATATBASEURL;
// Create mongo connection
const conn = mongoose.createConnection(mongoURI);
// init gfs
let gfs;
conn.once('open', () => {
// initiation stream
gfs = Grid(conn.db, mongoose.mongo);
gfs.collection('uploads');
});
/*--------------------------------------------------*/
/********* Routes *************/
// Home page route
router.get('/', function(req, res){
var photo;
User.find({}, function(err, foundUsers){
if(err){
console.log(err);
} else {
if(foundUsers.length !== 0){
if(foundUsers[0].photo !=null){
photo = foundUsers[0].photo;
} else {
photo = false;
}
}
return res.render('home', {photo: photo});
}
});
});
// old version
// router.get('/', function(req, res){
// gfs.files.find().toArray((err, files)=>{
// //check if files exist
// if(!files || files.length === 0){
// res.render('home', {file: false});
// } else {
// // map is a high level JS array -- you can learn more about this
// files.map(file => {
// if(file.contentType ==="image/jpeg" || file.contentType === "image/png"){
// file.isImage = true;
// } else {
// file.isImage = false;
// }
// });
// for(var i=files.length-1; i >= 0; i--){
// if(files[i].isImage){
// return res.render('home', {file: files[i]});
// }
// }
// res.render('home', {files: files});
// }
// });
// });
// profile pictutre post
router.post('/upload', upload.single('file'), (req, res)=>{
// res.json({file: req.file});
// console.log(req.file);
if(req.user.photo){
console.log('removing old photo')
gfs.remove({_id: req.user.photo, root: 'uploads'}, (err, gridStore)=>{
if(err){
console.log(err);
// return res.status(404).json({
// err: err
// });
}
})
}
req.user.photo = req.file.filename;
req.user.save();
res.redirect('/');
});
// setting up local user registration
function userSignUp(username, password){
console.log('function called');
var newUser = new User({username: username});
User.register(newUser, password, function(err, user){
console.log('in user.register');
if(err){
console.log('Error occurred while creating new user');
console.log(err);
} else{
console.log('New User is being created');
passport.authenticate('local')(function(){
console.log(user);
});
}
});
}
// console.log('We are in the index route');
// userSignUp('johnwondoh', 'blaque2010');
// console.log('after sign up' );
// login route
router.get('/login', function(req, res){
res.render('login');
});
//handling login
router.post('/login', passport.authenticate('local',
{
successRedirect: '/',
failureRedirect: '/login'
}), function(req, res){
});
// handling logout
router.get('/logout', function(req, res) {
req.logout();
// req.flash("success", "logged you out");
res.redirect('/');
});
// // temporary image route
// router.get('/image', function(req, res) {
// Picture.find({}, function(err, foundPictures){
// if(err){
// console.log(err);
// } else {
// res.render('image', {pictures: foundPictures});
// }
// });
// // res.render('image');
// });
// // temporary post request
// router.post('/image', function(req, res){
// var picture = new Picture;
// // console.log(req.files);
// // console.log(req.body.file);
// // picture.img.data = req.files.data;
// picture.img.data = fs.readFileSync(req.files.data);
// picture.img.contentType = 'image/png';
// picture.save(function (err, savedPicture){
// if (err) {
// console.log('----------------------------------------');
// console.log('An Error Occurred While Saving the Image');
// console.log('----------------------------------------');
// console.log(err);
// res.redirect('/');
// } else {
// console.log('----------------------------------------');
// console.log(' Image was Saved Successfully');
// console.log('----------------------------------------');
// console.log(savedPicture);
// res.redirect('/');
// }
// });
// });
/*=====================================================================================*/
/*--
mongoose.connection.on('open', function () {
console.error('mongo is open');
// empty the collection
Picture.remove(function (err) {
if (err) throw err;
console.error('removed old docs');
// store an img in binary in mongo
var a = new A;
a.img.data = fs.readFileSync(imgPath);
a.img.contentType = 'image/png';
a.save(function (err, a) {
if (err) throw err;
console.error('saved img to mongo');
// start a demo server
var server = express.createServer();
server.get('/', function (req, res, next) {
A.findById(a, function (err, doc) {
if (err) return next(err);
res.contentType(doc.img.contentType);
res.send(doc.img.data);
});
});
server.on('close', function () {
console.error('dropping db');
mongoose.connection.db.dropDatabase(function () {
console.error('closing db connection');
mongoose.connection.close();
});
});
server.listen(3333, function (err) {
var address = server.address();
console.error('server listening on http://%s:%d', address.address, address.port);
console.error('press CTRL+C to exit');
});
process.on('SIGINT', function () {
server.close();
});
});
});
});
--*/
/*=====================================================================================*/
module.exports = router; | 83764752dc8dc68edf220e87376b7584cb7b0d62 | [
"JavaScript",
"Markdown"
] | 7 | JavaScript | johnwondoh/PersonalWebDev | a15fc3e61750ed70d5af9ceddc3cae569259c4a8 | 1d22bcb70ec07a6443cddcf007833f76ba294f8c |
refs/heads/master | <repo_name>22edwardr/kokoroball<file_sep>/insertarPublicacion.php
<?php
include 'conexion.php';
session_start();
$titulo=$_POST['titulo'];
$descripcion=$_POST['descripcion'];
if($titulo==NULL || $descripcion == NULL ){
if($titulo==NULL){
header('Location: index.php?mensaje=1');
}
if($descripcion == NULL){
header('Location: index.php?mensaje=2');
}
}
else{
$sql = "SET time_zone ='-5:00'";
if($conexion->query($sql) === FALSE){
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos: ". $sql . "<br>" . $conexion->error;
}
$sql = "INSERT INTO publicacion(titulo,descripcion,fecha,id_usuario) VALUES ('".$titulo."','".$descripcion."',now(),'".$_SESSION['id']."')";
if($conexion->query($sql) === TRUE){
header('Location: index.php?mensaje=3');
} else {
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos: ". $sql . "<br>" . $conexion->error;
}
}
mysqli_close($conexion);
?><file_sep>/modificarPlaylist.php
<?php
include 'conexion.php';
include 'sesion.php';
$id=$_POST['id'];
$nombre=$_POST['nombre'];
$descripcion=$_POST['descripcion'];
$link=$_POST['link'];
$accion=$_POST['accion'];
$playlist = $_POST['playlist'];
if($accion !="Eliminar"){
if($nombre == "" || $link==""){
header('Location: playlist.php?playlist='.$playlist.'&mensaje=4');
}
$link = youtube_parse_youtube_id($link);
if($link == false || strlen($link) != 11){
header('Location: playlist.php?playlist='.$playlist.'&mensaje=5');
}
}
$sql = "SET time_zone ='-5:00'";
if($conexion->query($sql) === FALSE){
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos: ". $sql . "<br>" . $conexion->error;
}else{
$mensaje=0;
if($accion=="Guarda"){
$sql = "UPDATE playlist SET nombre='".$nombre."', descripcion='".$descripcion."',link='".$link."',fecha=now(),id_usuario='".$_SESSION['id']."' WHERE id=".$id;
$mensaje=1;
}
else if($accion=="Inserta"){
$sql ="INSERT INTO playlist(nombre,descripcion,link,fecha,id_usuario,lista_playlist) VALUES ('".$nombre."','".$descripcion."','".$link."',now(),'".$_SESSION['id']."','".$playlist."')";
$mensaje=2;
}
else{
$sql= "DELETE FROM playlist WHERE id=".$id;
$mensaje=3;
}
if($conexion->query($sql) === TRUE){
header('Location: playlist.php?playlist='.$playlist.'&mensaje='.$mensaje);
} else {
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos". $sql . "<br>" . $conexion->error;
}
}
mysqli_close($conexion);
function youtube_parse_youtube_id( $data )
{
if( strlen($data) == 11 )
{
return $data;
}
preg_match( "/^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|\&v=)([^#\&\?]*).*/", $data, $matches);
return isset($matches[2]) ? $matches[2] : false;
}
?> <file_sep>/lista_playlist.php
<?php
include 'sesion.php';
include 'conexion.php';
require_once 'i18n.init.php';
$sql = 'SELECT lp.id,lp.nombre,lp.descripcion,lp.id_usuario,lp.fecha FROM lista_playlist lp order by RAND()';
?>
<html lang="es">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
<link rel="stylesheet" href="css/bootstrap.min.css"/>
<link rel="stylesheet" href="css/bootstrap-theme.min.css"/>
<link rel="stylesheet" href="css/styles.css"/>
<style>
a:hover {
color: white;
}
/* selected link */
a:active {
color: white;
}
a{
color: white;
}
</style>
<link rel="icon" type="image/png" sizes="32x32" href="images/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="images/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="images/favicon-16x16.png">
<title>:v</title>
<header id="header">
</header>
</head>
<body>
<div class="container-fluid">
<div class="row">
<div class="col-md-2 hidden-xs hidden-sm fondo-lado">
<div class="fondo-lado sidebar-nav-fixed pull-right affix">
<img src="images/delDESTINO.png" alt="delDESTINO" class="img-responsive">
</div>
</div>
<div class="col-md-10 espacio-contenido">
<div id="mensaje" style="display:none;" class="alert alert-dismissible" role="alert">
<strong><?php echo L::Mensaje; ?></strong><div style="display:inline;" id="mensajeTexto"></div>
</div>
<div align="center">
<button type="submit" id="irAInsertar" data-toggle="modal" data-target="#myModal" class="btn btn-success" ><?php echo L::Inserta_una_nueva_lista; ?></button>
<br/>
<br/>
</div>
<?php
if($result = mysqli_query($conexion, $sql)){
if(mysqli_num_rows($result) > 0){
$count = 1;?>
<table class="table table-responsive">
<thead style="background-color: #303030;
color: white;">
<tr align="center">
<td><?php echo L::Numero; ?></td>
<td><?php echo L::Nombre; ?></td>
<td><?php echo L::Descripcion; ?></td>
<td><?php echo L::Fecha; ?></td>
<td><?php echo L::Vamonos; ?></td>
<td><?php echo L::Cambiale_si_quieres; ?></td>
</tr>
</thead>
<?php
while($row2 = mysqli_fetch_array($result)){?>
<form action="modificar_lista_playlist.php" method="POST">
<?php if($row2['id_usuario'] == 1){?>
<tr align="center" style="background-color: #843D65;
color: white;">
<?php
}
else{
?>
<tr align="center" style="background-color: #7D4C75;
color: white;">
<?php
}?>
<td><?php echo $count;?></td>
<td id="nombreCelda<?php echo $row2['id']; ?>"><?php echo $row2['nombre']; ?></td>
<td id="descripcionCelda<?php echo $row2['id']; ?>"><?php echo $row2['descripcion']; ?></td>
<td><?php echo $row2['fecha']; ?> </td>
<td><button type="button" id="irAPlaylist<?php echo $row2['id'];?>" class="btn btn-primary"><?php echo L::Vamonos; ?></button></td>
<td><button type="button" class="btn btn-warning" id="editar<?php echo $row2['id'];?>" data-toggle="modal" data-target="#myModal"><?php echo L::Cambiale_we; ?></button>
<input type="hidden" name="id" value="<?php echo $row2['id']; ?>"/>
<button type="submit" name="accion" onclick="return confirmar();" value="Eliminar" class="btn btn-danger"><?php echo L::Quitalo_YA; ?></button></td>
</tr>
</form>
<?php
$count++;
}
}
}
?>
<!--<tr class="table-success">.-->
<!--<tr class="table-primary">-->
</table>
<!-- Trigger the modal with a button -->
<div id="myModal" class="modal fade" role="dialog">
<div class="modal-dialog">
<!-- Modal content-->
<div class="modal-content">
<form action="modificar_lista_playlist.php" method="POST">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">×</button>
<h2 class="letra-negra modal-title "><?php echo L::Datos_de_la_lista; ?></h2>
</div>
<div class="modal-body">
<input type="hidden" name="id" id="idPlaylist" value=""/>
<label class="letra-negra" for="nombre"><?php echo L::Nombre; ?></label>
<input id="nombre" type="text" name="nombre" class="form-control" maxlength="50"/>
<br/>
<label class="letra-negra" for="descripcion"><?php echo L::Descripcion; ?></label>
<input id="descripcion" type="text" name="descripcion" class="form-control" maxlength="100"/>
<br/>
</div>
<div class="modal-footer">
<button type="submit" id="guardar" class="btn btn-success" name="accion" value=""><?php echo L::Guardar; ?></button>
<button type="button" class="btn btn-danger" data-dismiss="modal"><?php echo L::Cancelar; ?></button>
</div>
</form>
</div>
</div>
</div>
</div>
</div>
</div>
<script src="js/jquery-3.2.1.min.js"> </script>
<script src="js/bootstrap.min.js"> </script>
<script type="text/javascript">
$(function(){
$('#header').load('header.php',function(){
document.getElementById("nombreUsuario").innerHTML = '<?php echo $_SESSION['name'] ?> <span class="caret"></span>';
document.getElementById("playlist").className += "active";
$('.cambioIdioma').each(function(index){
$(this).attr('href',$(this).attr('href') + '<?php echo basename($_SERVER['PHP_SELF']); ?>' +location.search)
});
//class="active"
});
var codigoMensaje = null;
tmp = [];
location.search.substr(1).split("&").forEach(function (item) {
tmp = item.split("=");
if (tmp[0] === "mensaje")
codigoMensaje = decodeURIComponent(tmp[1]);
});
if(codigoMensaje != ''){
if(codigoMensaje == '1'){
document.getElementById("mensaje").className += " alert-success";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::Sugoi; ?>';
}
if(codigoMensaje == '2'){
document.getElementById("mensaje").className += " alert-success";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::Se_la_metiste_con_toda; ?>';
}
if(codigoMensaje=='3'){
document.getElementById("mensaje").className += " alert-success";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::No_dejaste_nada; ?>';
}
if(codigoMensaje=='4'){
document.getElementById("mensaje").className += " alert-danger";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::Que_GRAN_Fallo; ?>';
}
}
$('button').each(function(){
if($( this ).attr('id')){
if($( this ).attr('id').match(/editar/) ) {
$( this ).click(function(){
var video = $(this).attr('id').replace("editar","");
document.getElementById('idPlaylist').value= video;
document.getElementById('nombre').value = document.getElementById('nombreCelda'+video).innerText
document.getElementById('descripcion').value = document.getElementById('descripcionCelda'+video).innerText
document.getElementById('guardar').value="Guarda";
});
}
if($( this ).attr('id').match(/irAPlaylist/) ) {
$( this ).click(function(){
var playlist = $(this).attr('id').replace("irAPlaylist","");
var win = window.open("playlist.php?playlist="+playlist);
});
}
if($( this ).attr('id')== "irAInsertar"){
document.getElementById('guardar').value="Inserta";
}
}
});
});
function confirmar() {
return confirm('<?php echo L::Esta_seguro_que_quiere_eliminar_esta_lista; ?>');
}
</script>
</body>
</html><file_sep>/mobile/actualizar_ganador.php
<?php
if($_SERVER["REQUEST_METHOD"]=='POST'){
require_once('base.php');
$estado = $_POST['estado'];
$intentos = $_POST['intentos'];
$id = $_POST['id'];
$query = "UPDATE loteria SET ganador='$estado',intentos='$intentos' WHERE id='$id' ";
$final = mysqli_query($con, $query);
if(!$final){
echo 'No se pudo probar el resultado';
}else{
if($estado=='N'){
echo 'Lo siento kokoro no esta vez';
}elseif($estado=='S'){
echo 'Tenemos un ganador!!!!!!!!!, reclamale a tu kokoro tu premio';
}elseif($estado=='F'){
echo 'Tenemos un ganador con FUUUUUUUAAAAAAA!!!!!!!!!,reclamale a tu kokoro tu premio';
}
}
mysqli_close($con);
}
?><file_sep>/galeria.php
<?php
include 'sesion.php';
require_once 'i18n.init.php';
?>
<html lang="es">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
<link rel="stylesheet" href="css/bootstrap.min.css"/>
<link rel="stylesheet" href="css/bootstrap-theme.min.css"/>
<link rel="stylesheet" href="css/styles.css"/>
<link rel="icon" type="image/png" sizes="32x32" href="images/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="images/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="images/favicon-16x16.png">
<title>:v</title>
<header id="header">
</header>
</head>
<body>
<div class="container-fluid">
<div class="row">
<div class="col-md-2 hidden-xs hidden-sm fondo-lado">
<div class="fondo-lado sidebar-nav-fixed pull-right affix">
<img src="images/delDESTINO.png" alt="delDESTINO" class="img-responsive">
</div>
</div>
<div class="col-md-10 espacio-contenido">
</div>
</div>
</div>
<script src="js/jquery-3.2.1.min.js"> </script>
<script src="js/bootstrap.min.js"> </script>
<script type="text/javascript">
$(function(){
$('#header').load('header.php',function(){
document.getElementById("nombreUsuario").innerHTML = '<?php echo $_SESSION['name'] ?> <span class="caret"></span>';
document.getElementById("galeria").className += "active";
$('.cambioIdioma').each(function(index){
$(this).attr('href',$(this).attr('href') + '<?php echo basename($_SERVER['PHP_SELF']); ?>' +location.search)
});
//class="active"
});
});
</script>
</body>
</html><file_sep>/index.php
<?php
include 'sesion.php';
include 'conexion.php';
require_once 'i18n.init.php';
$sql = 'SELECT publicacion.id,descripcion,fecha,titulo,nombre FROM publicacion JOIN usuario ON usuario.id=publicacion.id_usuario order by fecha desc,id desc';
?>
<html lang="es">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
<link rel="stylesheet" href="css/bootstrap.min.css"/>
<link rel="stylesheet" href="css/bootstrap-theme.min.css"/>
<link rel="stylesheet" href="css/styles.css"/>
<link rel="icon" type="image/png" sizes="32x32" href="images/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="images/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="images/favicon-16x16.png">
<title>:v</title>
<header id="header">
</header>
</head>
<body>
<div class="container-fluid">
<div class="row">
<div class="col-md-2 hidden-xs hidden-sm fondo-lado">
<div class="fondo-lado sidebar-nav-fixed pull-right affix">
<img src="images/delDESTINO.png" alt="delDESTINO" class="img-responsive">
</div>
</div>
<div class="col-md-10 espacio-contenido">
<div id="mensaje" style="display:none;" class="alert alert-dismissible" role="alert">
<strong><?php echo L::Mensaje; ?> </strong><div style="display:inline;" id="mensajeTexto"></div>
</div>
<form action="insertarPublicacion.php" method="POST">
<div class="form-group">
<label for="titulo"><?php echo L::Aqui_Un_Titulo_Genial; ?></label>
<input id="titulo" type="text" name="titulo" class="form-control" maxlength="50"/>
</div>
<div class="form-group">
<label for="descripcion"><?php echo L::Escribe_lo_que_quieras_Amor; ?></label>
<textarea id="descripcion" name="descripcion" class="form-control" rows="5" maxlength="1000"></textarea>
</div>
<div class="form-group">
<button type="submit" class="btn btn-primary"><?php echo L::Mandalo; ?></button>
<button type="button" id="noti" class="btn btn-danger"><?php echo L::Notificale_a_tu_kokoro; ?></button>
</div>
</form>
<?php
if($result = mysqli_query($conexion, $sql)){
if(mysqli_num_rows($result) > 0){
while($row = mysqli_fetch_array($result)){?>
<form action="modificarPublicacion.php" method="POST">
<br/>
<div class="form-group" style="padding-bottom: 3%;">
<div class="col-md-4">
<input id="titulo<?php echo $row['id'];?>" type="text" name="titulo" class="form-control" value="<?php echo $row['titulo'];?>" maxlength="50" disabled/>
</div>
<div class="col-md-4">
<input id="fecha<?php echo $row['id'];?>" type="text" name="fecha" class="form-control" value="<?php echo $row['fecha'];?>" maxlength="50" disabled/>
</div>
<div class="col-md-4">
<input id="usuario<?php echo $row['id'];?>" type="text" name="usuario" class="form-control" value="<?php echo $row['nombre'];?>" maxlength="50" disabled/>
</div>
</div>
<div class="form-group">
<input type="hidden" name="id" value="<?php echo $row['id'];?>"/>
<textarea style="display:none;" id="descripcion<?php echo $row['id'];?>" name="descripcion" class="form-control" rows="5" maxlength="1000" ><?php echo $row['descripcion'];?></textarea>
</div>
<div class="form-group">
<button style="display:none;" type="submit" id="guardar<?php echo $row['id'];?>" class="btn btn-success" name="accion" value="Guarda"><?php echo L::Guardar; ?></button>
<button style="display:none;" type="submit" id="eliminar<?php echo $row['id'];?>" class="btn btn-danger" name="accion" value="Elimina"><?php echo L::Eliminar; ?></button>
</div>
</form>
<br/>
<div id="html<?php echo $row['id'];?>" contenteditable="false"><?php echo $row['descripcion'];?></div>
<br/>
<a id="editar<?php echo $row['id'];?>" href="#titulo<?php echo $row['id'];?>" class="btn btn-warning"><?php echo L::Cual_editado_papu_V; ?></a>
<?php
}
}
}
mysqli_close($conexion);
?>
</div>
</div>
</div>
<script src="js/jquery-3.2.1.min.js"> </script>
<script src="js/bootstrap.min.js"> </script>
<script type="text/javascript">
$(function(){
$('#header').load('header.php',function(){
document.getElementById("nombreUsuario").innerHTML = '<?php echo $_SESSION['name'] ?> <span class="caret"></span>';
document.getElementById("index").className += "active";
$('.cambioIdioma').each(function(index){
$(this).attr('href',$(this).attr('href') + '<?php echo basename($_SERVER['PHP_SELF']); ?>' +location.search)
});
});
var codigoMensaje = location.search.substr(1).split("?");
codigoMensaje = codigoMensaje[0].replace("mensaje=","");
if(codigoMensaje != ''){
document.getElementById('mensaje').style.display ='block' ;
if(codigoMensaje == '1'){
document.getElementById("mensaje").className += " alert-danger";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::No_seas_we_v_Te_falta_el_titulo; ?>';
}
if(codigoMensaje == '2'){
document.getElementById("mensaje").className += " alert-danger";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::Estas_bien_pendejo_ni_publicacion_le_hiciste; ?>';
}
if(codigoMensaje=='3'){
document.getElementById("mensaje").className += " alert-success";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::Todo_bonito; ?>';
}
if(codigoMensaje=='4'){
document.getElementById("mensaje").className += " alert-success";
document.getElementById('mensajeTexto').innerHTML = '<?php echo L::Oki; ?>';
}
}
$('#noti').click(function(){
var win = window.open("https://console.firebase.google.com/", '_blank');
win.focus();
});
$('a').each(function(){
if($( this ).attr('id')){
if($( this ).attr('id').match(/editar/) ) {
$( this ).click(function(){
var publicacion = $(this).attr('id').replace("editar","");
if(document.getElementById('descripcion'+publicacion).style.display == 'block'){
document.getElementById('descripcion'+publicacion).style.display ='none' ;
document.getElementById('guardar'+publicacion).style.display ='none' ;
document.getElementById('eliminar'+publicacion).style.display ='none' ;
document.getElementById('html'+publicacion).style.display ='block' ;
$('#titulo'+publicacion).prop("disabled", true);
}
else{
document.getElementById('descripcion'+publicacion).style.display ='block' ;
document.getElementById('guardar'+publicacion).style.display ='inline' ;
document.getElementById('eliminar'+publicacion).style.display ='inline' ;
document.getElementById('html'+publicacion).style.display ='none' ;
$('#titulo'+publicacion).prop("disabled", false);
}
});
}
}
});
});
</script>
</body>
</html><file_sep>/sesion.php
<?php
session_start();
if(!(isset($_SESSION['name']) && isset($_SESSION['id']))){
header('Location: loginVista.php');
}
$playlist = "[{title:'Paramore-Decode',url:'https://www.youtube.com/watch?v=RvnkAtWcKYg'},{title:'Silverchair-Ana song',url:'https://www.youtube.com/watch?v=zNK_r2QAXAo'}]";
?><file_sep>/mobile/login.php
<?php
if($_SERVER['REQUEST_METHOD']=='POST'){
require_once('base.php');
$email=$_POST['email'];
$password=$_POST['<PASSWORD>'];
$statement = mysqli_prepare($con, "SELECT id, nombre,correo FROM usuario WHERE correo=? AND contrasena =?");
mysqli_stmt_bind_param($statement, "ss", $email,$password);
mysqli_stmt_execute($statement);
mysqli_stmt_store_result($statement);
mysqli_stmt_bind_result($statement,$id,$nombre,$correo);
$response = array();
while(mysqli_stmt_fetch($statement)){
$response[] = array("nombre"=>$nombre,"id"=>$id,"correo"=>$correo);
}
echo json_encode(array('response'=>$response));
}
?><file_sep>/mobile/select_loteria.php
<?php
if($_SERVER['REQUEST_METHOD']=='POST'){
require_once('base.php');
mysqli_query($con,'SET CHARACTER SET utf8');
$statement = mysqli_prepare($con, "SELECT id,valor,mensaje,valor_maximo,palabra,intentos FROM loteria WHERE intentos>0 AND ganador='N' ORDER BY fecha DESC LIMIT 1");
mysqli_stmt_execute($statement);
mysqli_stmt_store_result($statement);
mysqli_stmt_bind_result($statement,$id,$valor,$mensaje,$valor_maximo,$palabra,$intentos);
$response = array();
while(mysqli_stmt_fetch($statement)){
$response[] = array("id"=>$id,"ganador"=>$valor,"mensaje"=>$mensaje,"maximo"=>$valor_maximo,"palabra"=>$palabra,"intentos"=>$intentos);
}
echo json_encode(array('response'=>$response));
}
?><file_sep>/mobile/ingresar_noti_prueba.php
<?php
require "base.php";
global $con;
$query = "SELECT cell_token from usuario where id=2";
$result = mysqli_query($con, $query);
$number_of_rows = mysqli_num_rows($result);
$tokens = array();
if($number_of_rows >= 0) {
while ($row = mysqli_fetch_assoc($result)) {
$tokens[] = $row['cell_token'];
}
}
$message = array("message" => "Kokorooo ");
$message_status = send_notification($tokens, $message);
function send_notification ($tokens, $message)
{
$url = 'https://fcm.googleapis.com/fcm/send';
$fields = array(
'registration_ids' => $tokens,
'data' => $message
);
$headers = array(
'Authorization:key = AIzaSyBEyFo4jaGGIdyyoJOadxzU1Lzl15dOtmE',
'Content-Type: application/json'
);
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, $url);
curl_setopt($ch, CURLOPT_POST, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt ($ch, CURLOPT_SSL_VERIFYHOST, 0);
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);
curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($fields));
$result = curl_exec($ch);
if ($result === FALSE) {
die('Curl failed: ' . curl_error($ch));
}
curl_close($ch);
echo $result;
return $result;
}
?><file_sep>/i18n.init.php
<?php
require_once 'i18n.class.php';
$i18n = new i18n('lang/lang_{LANGUAGE}.json', 'langcache/', 'es');
$i18n->init();
?><file_sep>/header.php
<?php
include 'sesion.php';
include 'i18n.init.php';
?>
<nav class="navbar navbar-inverse margin-0 navbar-fixed-top">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar1" aria-expanded="false">
<span class="sr-only"><?php echo L::Navegacion; ?> </span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
</div>
<div class="collapse navbar-collapse" id="navbar1">
<ul class="nav navbar-nav">
<li id="index"><a id="muro" href="index.php"><?php echo L::Muro; ?> </a></li>
<li id="playlist"><a href="lista_playlist.php"><?php echo L::Playlist; ?> </a></li>
<li id="galeria"><a href="galeria.php"><?php echo L::Galeria; ?> </a></li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li><a class="cambioIdioma" href="<?php echo 'cambiarIdioma.php?lang=es&pag='; ?>"><img src="images/mexicoflag.png" alt="Español"></a></li>
<li><a class="cambioIdioma" href="<?php echo 'cambiarIdioma.php?lang=en&pag='; ?>"><img src="images/britishflag.png" alt="Español"></a></li>
<li><a class="cambioIdioma" href="<?php echo 'cambiarIdioma.php?lang=ja&pag='; ?>"><img src="images/japanflag.png" alt="Español"></a></li>
<li class="dropdown">
<a id="nombreUsuario" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false"></a>
<ul class="dropdown-menu">
<li><a href="#" id="texto"><?php echo L::Modificar_Perfil; ?> </a></li>
<li><a href="cerrarSesion.php" id="cerrarSesion"><?php echo L::Cerrar_Sesion; ?> </a></li>
</ul>
</li>
</ul>
</div>
</div>
</nav>
<file_sep>/modificarPublicacion.php
<?php
include 'conexion.php';
$id=$_POST['id'];
$descripcion=$_POST['descripcion'];
$titulo=$_POST['titulo'];
$accion=$_POST['accion'];
if($accion=="Guarda"){
$sql = "SET time_zone ='-5:00'";
if($conexion->query($sql) === FALSE){
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos: ". $sql . "<br>" . $conexion->error;
}
$sql = "UPDATE publicacion SET descripcion='".$descripcion."', titulo='".$titulo."', fecha= now() WHERE id=".$id;
}
else{
$sql= "DELETE FROM publicacion WHERE id=".$id;
}
if($conexion->query($sql) === TRUE){
header('Location: index.php?mensaje=4');
} else {
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos". $sql . "<br>" . $conexion->error;
}
mysqli_close($conexion);
?> <file_sep>/cambiarIdioma.php
<?php
include 'sesion.php';
if(isset($_GET['lang']))
$_SESSION['lang'] = $_GET['lang'];
if(isset($_GET['pag']))
header('Location: '.$_GET['pag']);
else
header('Location: index.php');
?><file_sep>/login.php
<?php
include 'conexion.php';
$usuario=$_POST['usuario'];
$contrasena=$_POST['contrasena'];
if ($usuario==NULL || $contrasena==NULL) {
header('Location: loginVista.php?mensaje=1');
}
else{
$sql="SELECT correo,contrasena,nombre,id FROM usuario WHERE correo='$usuario'";
$result= mysqli_query($conexion,$sql);
echo mysqli_error($conexion);
while ($row = mysqli_fetch_row($result)){
$correo=$row[0];
$password=$row[1];
$nombre=$row[2];
$id=$row[3];
}
if($password==$contrasena){
session_start();
$_SESSION['name']=$nombre;
$_SESSION['id']=$id;
header('Location: index.php');
}
else {
header('Location: loginVista.php?mensaje=2');
}
}
?><file_sep>/modificar_lista_playlist.php
<?php
include 'conexion.php';
include 'sesion.php';
$id=$_POST['id'];
$nombre=$_POST['nombre'];
$descripcion=$_POST['descripcion'];
$accion=$_POST['accion'];
if($accion !="Eliminar" && ($nombre == "")){
header('Location: lista_playlist.php?mensaje=4');
}
else{
$sql = "SET time_zone ='-5:00'";
if($conexion->query($sql) === FALSE){
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos: ". $sql . "<br>" . $conexion->error;
}else{
$mensaje=0;
if($accion=="Guarda"){
$sql = "UPDATE lista_playlist SET nombre='".$nombre."', descripcion='".$descripcion."',fecha=now(),id_usuario='".$_SESSION['id']."' WHERE id=".$id;
$mensaje=1;
}
else if($accion=="Inserta"){
$sql ="INSERT INTO lista_playlist(nombre,descripcion,fecha,id_usuario) VALUES ('".$nombre."','".$descripcion."',now(),'".$_SESSION['id']."')";
$mensaje=2;
}
else{
$sql= "DELETE FROM playlist WHERE lista_playlist=".$id;
if($conexion->query($sql) === TRUE){
$sql= "DELETE FROM lista_playlist WHERE id=".$id;
$mensaje=3;
}
}
if($conexion->query($sql) === TRUE){
header('Location: lista_playlist.php?mensaje='.$mensaje);
} else {
echo "Contactese con el administrador del sistema error al realizar operacion en base de datos". $sql . "<br>" . $conexion->error;
}
}
}
mysqli_close($conexion);
?> <file_sep>/mobile/base.php
<?php
$con = mysqli_connect("fdb15.biz.nf","2238231_kokoball","pikazard2108");
if (!$con) {
die("No se ha podido conectar con base de datos ".mysqli_error());
}
mysqli_select_db($con,"2238231_kokoball");
?><file_sep>/conexion.php
<?php
$conexion = mysqli_connect("fdb15.biz.nf","2238231_kokoball","pikazard2108");
if (!$conexion) {
die("No se ha podido conectar con base de datos ".mysqli_error());
}
mysqli_select_db($conexion,"2238231_kokoball");
/*$conexion = mysqli_connect("localhost","root","");
if (!$conexion) {
die("No se ha podido conectar con base de datos ".mysqli_error());
}
mysqli_select_db($conexion,"2238231_kokoball");*/
/*
fdb15.biz.nf
pikazard2108
*/
?>
| 654b8683607c0e3ed1edd323e81c20e2f5bcdc69 | [
"PHP"
] | 18 | PHP | 22edwardr/kokoroball | 3249f07da47e3287f531fd89ebbf9654b4ff250f | 103e8e308f1051eaed72e16e572200a26d32308b |
refs/heads/master | <repo_name>niisara/angular-observable<file_sep>/src/app/next.component.ts
import { Component, Input } from '@angular/core';
import { Observable } from 'rxjs';
@Component({
selector: 'next',
template: `
<h1>Next</h1>
`,
styles: [
`
h1 {
font-family: Lato;
}
`
]
})
export class NextComponent {
ngOnInit() {
const sqnc = new Observable(countOnetoTen);
sqnc.subscribe(x => {
console.log(x);
});
function countOnetoTen(observer) {
for (var i = 1; i <= 10; i++) {
observer.next(i);
}
return { unsubscribe() {} };
}
}
}
| ea88c6a0639d934a99804d93685270c79ea4079d | [
"TypeScript"
] | 1 | TypeScript | niisara/angular-observable | e64c35c6592ea13166b4b16ac5a3c4167f3acf94 | a2499402a50e3a5f793f87a3c17bd76401dfb682 |
refs/heads/master | <repo_name>Sleicreider/ShaderTest<file_sep>/ShaderTest/Classes/MenuScene.h
//
// MenuScene.h
// ShaderTest
//
// Created by <NAME> on 29.06.15.
//
//
#include "FrameworkScene.h"
#ifndef __ShaderTest__MenuScene__
#define __ShaderTest__MenuScene__
class MenuScene : public FrameworkScene
{
public:
MenuScene();
virtual ~MenuScene();
static Scene* createScene();
virtual bool init();
CREATE_FUNC(MenuScene);
private:
protected:
virtual void Tick(float delta);
};
#endif /* defined(__ShaderTest__MenuScene__) */
<file_sep>/ShaderTest/Classes/SpecialSprite.cpp
//
// SpecialSprite.cpp
// Hazelnut
//
// Created by <NAME> on 21.01.15.
//
//
#include "SpecialSprite.h"
SpecialSprite::SpecialSprite()
: currScale(0.6)
, onIncrease(true)
, withAnimation(false)
{
this->scheduleUpdate();
}
SpecialSprite::~SpecialSprite()
{
}
bool SpecialSprite::init()
{
if(!Node::init())
{
return false;
}
return true;
}
SpecialSprite* SpecialSprite::CreateSprite(const std::string& filename)
{
SpecialSprite* sprite = new (std::nothrow) SpecialSprite();
if (sprite && sprite->initWithFile(filename))
{
sprite->autorelease();
return sprite;
}
CC_SAFE_DELETE(sprite);
return nullptr;
}
void SpecialSprite::update(float delta)
{
if(withAnimation)
{
if(sprite_animation_ == SCALE)
{
if(onIncrease)
{
currScale += 0.02;
}
else
{
currScale -= 0.02;
}
if(onIncrease && currScale >= 1.0)
{
onIncrease = false;
}
else if(!onIncrease && currScale <= 0.6)
{
onIncrease = true;
}
this->cocos2d::Node::setScale(currScale);
}
else if(sprite_animation_ == ROTATE)
{
current_rotation_value_ += 2.0;
if(current_rotation_value_ >= 360)
{
current_rotation_value_ = 0;
}
this->setRotation(current_rotation_value_);
}
}
}
void SpecialSprite::Reset()
{
current_rotation_value_ = 0.0;
currScale = 1.0;
setScale(currScale);
setRotation(current_rotation_value_);
}
<file_sep>/ShaderTest/Classes/AppDelegate.cpp
#include "AppDelegate.h"
#include "MenuScene.h"
USING_NS_CC;
AppDelegate::AppDelegate() {
}
AppDelegate::~AppDelegate()
{
}
//if you want a different context,just modify the value of glContextAttrs
//it will takes effect on all platforms
void AppDelegate::initGLContextAttrs()
{
//set OpenGL context attributions,now can only set six attributions:
//red,green,blue,alpha,depth,stencil
GLContextAttrs glContextAttrs = {8, 8, 8, 8, 24, 8};
GLView::setGLContextAttrs(glContextAttrs);
}
// If you want to use packages manager to install more packages,
// don't modify or remove this function
static int register_all_packages()
{
return 0; //flag for packages manager
}
typedef struct tagResource
{
cocos2d::Size size;
char directory[100];
}Resource;
static Resource smallResource = { cocos2d::Size(480, 320), "iphone" };
static Resource mediumResource = { cocos2d::Size(1024, 768), "ipad" };
static Resource largeResource = { cocos2d::Size(2048, 1536), "ipadhd" };
static Resource largeResourceS5 = { cocos2d::Size(1920, 1080), "galaxyS5" };
static cocos2d::Size designResolutionSize = cocos2d::Size(1920, 1080);
bool AppDelegate::applicationDidFinishLaunching() {
// initialize director
auto director = Director::getInstance();
auto glview = director->getOpenGLView();
if(!glview) {
glview = GLViewImpl::create("My Game");
director->setOpenGLView(glview);
}
#if (CC_TARGET_PLATFORM == CC_PLATFORM_WIN32 || CC_TARGET_PLATFORM == CC_PLATFORM_MAC)
// glview->setFrameSize(1920, 1080); //HD
//glview->setFrameSize(1280, 800); //HD READY
// glview->setFrameSize(1024, 768); //iPad
//glview->setFrameSize(2048, 1536); //iPad 3
glview->setFrameSize(960, 640); //iPhone4
// glview->setFrameSize(1136, 640); //iPhone5
//
// glview->setFrameSize(858, 480); // Samsung Galaxy S1
// glview->setFrameSize(1920, 1080); // Samsung Galaxy S4
// glview->setFrameSize(1280, 754); // Samsung 10" Tablet
// glview->setFrameSize(480, 320); // Samsung Galaxy Ace
// glview->setFrameSize(1024, 600); // Kindle Fire
// glview->setFrameSize(960, 540); // HD READY
// glview->setFrameSize(780, 480); // HD READY
// glview->setFrameSize(1190, 720); // HD READY
// glview->setFrameSize(780, 480); // Motorola Nexus
// glview->setFrameSize(2048, 1440); // Nexus 6/9 Tablet
//glview->setFrameSize(1024, 720); // Nexus 6/9 Tablet small
#endif
// Set the design resolution
glview->setDesignResolutionSize(designResolutionSize.width, designResolutionSize.height, kResolutionFixedWidth);
Size frameSize = glview->getFrameSize();
CCLOG("Display = %f x %f",frameSize.width,frameSize.height);
if (frameSize.height > largeResourceS5.size.height)
{
//searchPath.push_back(largeResource.directory);
director->setContentScaleFactor(largeResource.size.height/designResolutionSize.height);
//glview->setFrameSize(2048, 1536);
}
else if (frameSize.height > mediumResource.size.height)
{
director->setContentScaleFactor(largeResourceS5.size.height/designResolutionSize.height);
//glview->setFrameSize(1920, 1080);
}
// if the frame's height is larger than the height of small resource size, select medium resource.
else if (frameSize.height > smallResource.size.height)
{
//searchPath.push_back(mediumResource.directory);
director->setContentScaleFactor(mediumResource.size.height/designResolutionSize.height);
//glview->setFrameSize(800, 480);
}
// if the frame's height is smaller than the height of medium resource size, select small resource.
else
{
//searchPath.push_back(smallResource.directory);
director->setContentScaleFactor(smallResource.size.height/designResolutionSize.height);
//glview->setFrameSize(800, 480);
}
director->setContentScaleFactor(largeResourceS5.size.height/designResolutionSize.height);
float ratio = frameSize.width / frameSize.height;
std::string path = "";
#ifdef ETC1
path = "ETC1/";
#else
path = "PNG/";
#endif
std::vector<std::string> search_paths;
if(frameSize.width <= 1280)
{
search_paths.push_back(path + "MID");
//FileUtils::getInstance()->addSearchPath(path + "MID");
FileUtils::getInstance()->setSearchResolutionsOrder(search_paths);
director->setContentScaleFactor(540/designResolutionSize.height);
}
else
{
search_paths.push_back(path + "HI");
//FileUtils::getInstance()->addSearchPath(path + "HI");
FileUtils::getInstance()->setSearchResolutionsOrder(search_paths);
director->setContentScaleFactor(1080/designResolutionSize.height);
}
// turn on display FPS
director->setDisplayStats(true);
// set FPS. the default value is 1.0/60 if you don't call this
director->setAnimationInterval(1.0 / 60);
register_all_packages();
// create a scene. it's an autorelease object
auto scene = MenuScene::createScene();
// run
director->runWithScene(scene);
return true;
}
// This function will be called when the app is inactive. When comes a phone call,it's be invoked too
void AppDelegate::applicationDidEnterBackground() {
Director::getInstance()->stopAnimation();
// if you use SimpleAudioEngine, it must be pause
// SimpleAudioEngine::getInstance()->pauseBackgroundMusic();
}
// this function will be called when the app is active again
void AppDelegate::applicationWillEnterForeground() {
Director::getInstance()->startAnimation();
// if you use SimpleAudioEngine, it must resume here
// SimpleAudioEngine::getInstance()->resumeBackgroundMusic();
}
<file_sep>/ShaderTest/Classes/FStateMachine.h
//
// FStateMachine.h
// Hazelnut
//
// Created by <NAME> on 25.03.15.
//
//
#ifndef __Hazelnut__FStateMachine__
#define __Hazelnut__FStateMachine__
#include <cocos2d.h>
#include <unordered_map>
#include "ITickable.h"
#include <vector>
USING_NS_CC;
#pragma warn message("STATE MACHINE RUNS EVEN IF THE CLASS WHO CALLS IT DOESNT EXIST? OR ISN'T RUNNING TEST PLS!")
template <typename T> class FStateMachine : public ITickable
{
private:
std::unordered_map<std::string,void (T::*)(float)> state_map_;
T* instance_;
std::string current_state_;
std::string previous_state_;
public:
FStateMachine()
: current_state_("")
, previous_state_("")
{
}
virtual ~FStateMachine();
void Register(const std::string& id,T* instance,void(T::*func)(float));
void SetState(const std::string& id);
std::string& GetCurrentState();
std::string& GetPreviousState();
private:
virtual void Tick(float delta);
};
template<class T>
inline
FStateMachine<T>::~FStateMachine()
{
}
template<typename T>
inline
void FStateMachine<T>::Tick(float delta)
{
if(state_map_.size() == 0 || current_state_ == "") { return; } //|| (state_map_.find(current_state_) == state_map_.end())
(instance_->*state_map_.at(current_state_))(delta);
}
template<typename T>
inline
void FStateMachine<T>::Register(const std::string& id,T* instance,void(T::*func)(float))
{
instance_ = instance;
state_map_[id] = func;
}
template<typename T>
inline
void FStateMachine<T>::SetState(const std::string& id)
{
std::string tmp = current_state_;
current_state_ = id;
previous_state_ = tmp;
}
template<typename T>
inline
std::string& FStateMachine<T>::GetCurrentState()
{
return current_state_;
}
template<typename T>
inline
std::string& FStateMachine<T>::GetPreviousState()
{
return previous_state_;
}
#endif /* defined(__Hazelnut__FStateMachine__) */
<file_sep>/ShaderTest/Classes/FUtil.h
//
// FUtil.h
// Hazelnut
//
// Created by <NAME> on 19.06.15.
//
//
#ifndef __Hazelnut__FUtil__
#define __Hazelnut__FUtil__
#include <cocos2d.h>
USING_NS_CC;
namespace FUtil
{
void SetActiveAndVisible(Node* node, bool active_and_visible);
void NodeRecursion(Node* node, bool active_and_visible);
};
#endif /* defined(__Hazelnut__FUtil__) */
<file_sep>/ShaderTest/Classes/FUtil.cpp
//
// FUtil.cpp
// Hazelnut
//
// Created by <NAME> on 19.06.15.
//
//
#include "FUtil.h"
void FUtil::SetActiveAndVisible(Node* node, bool active_and_visible)
{
if(node == nullptr) { CCLOGERROR("SetActiveAndVisible didn't work, node is a nullptr"); return; }
NodeRecursion(node, active_and_visible);
}
void FUtil::NodeRecursion(Node* node, bool active_and_visible)
{
if(!active_and_visible)
{
node->pause();
node->setVisible(false);
}
else
{
node->resume();
node->setVisible(true);
}
auto& children = node->getChildren();
for (size_t i = 0; i < children.size(); i++)
{
NodeRecursion(children.at(i), active_and_visible);
}
}
<file_sep>/ShaderTest/Classes/FrameworkScene.h
#ifndef FRAMEWORKLAYER_H
#define FRAMEWORKLAYER_H
#include <cocos2d.h>
#include <CocosGUI.h>
#include "AppEnum.h"
#include "SimpleAudioEngine.h"
#include "FTimeframe.h"
#include "FStateMachine.h"
#define STRINGISE_IMPL(x) #x
#define STRINGISE(x) STRINGISE_IMPL(x)
#define FORCEINLINE __attribute__((always_inline))
#if (CC_TARGET_PLATFORM == CC_PLATFORM_WIN32)
# define FORCEINLINE __forceinline
#else
# define FORCEINLINE __attribute__((always_inline))
#endif
// Use: #pragma message WARN("My message")
#if CC_TARGET_PLATFORM == CC_PLATFORM_WIN32
# define FILE_LINE_LINK __FILE__ "(" STRINGISE(__LINE__) ") : "
# define WARN(exp) (FILE_LINE_LINK "WARNING: " exp)
#else//__GNUC__ - may need other defines for different compilers
# define WARN(exp) ("WARNING: " exp)
#endif
//class Gamepopup;
class FrameworkButton;
USING_NS_CC;
typedef void(*touchListenerFunction)(void);
class FrameworkScene : public Layer
{
public:
FrameworkScene();
virtual ~FrameworkScene();
void NodeRecursion(Node* node, bool active_and_visible)
{
if(!active_and_visible)
{
node->pause();
node->setVisible(false);
}
else
{
node->resume();
node->setVisible(true);
}
auto& children = node->getChildren();
for (size_t i = 0; i < children.size(); i++)
{
NodeRecursion(children.at(i), active_and_visible);
}
}
/**
* Creates Button in the FrameworkScene at given width and height. Button might hold a sprite,
* but can also be used without a texture/sprite for touch areas.
*
* @param width Width of the button.
* @param height Height of the button.
* @param spirite Sprite to give the button a texture, not necessarily needed.
* @return FrameworkButton* Returns a FrameworkButton*.
*/
FrameworkButton* CreateButton(int width, int height, Sprite* sprite = nullptr);
/**
* Creates A Object/Sprite for the Scene/World. The Object will be stored in a World Container and can be
* Retrieved anywhere inside the scene by id.
* Should be used for persistend/long living objects.
*
* @param id Is the id inside the scene/world.
* @param file_name Is the file name of the Sprite in the Resource folder
* @param object_position Vec2 value which sets the x,y position of the object
* @return Sprite* Returns a pointer of the created object/sprite.
*/
#pragma message WARN("TODO: USE FRAMEWORKSPRITE, MAP NOT NEEDED JUST RETURN POINTER, AND CREATE NODE* AND NOT SPRITE*")
#pragma message WARN("TODO: CREATE WORLD OBJECT NOT NEEDED! redesign this usage, just use Sprite* sprite, we do not need to keep track of sprite in scene, they are destroyed anyway. SAME FOR TEXT ETC., just provide different ::create(parameter1,parameter2) versions")
Sprite* CreateWorldObject(const std::string& id,const std::string& file_name, const Vec2& object_position);
/**
* Creates a Object/Sprite which will not be stored in a scene/world container, must be manually removed from the scene.
* Should be used for short living objects.
* @param Sprite* Sprite which should be added to the scene.
*/
void CreateObject(Node* Node);
/**
* Creates a text in the scene, which will be stored in a container by id.
* The Text can be retrieved through container by id with GetTextObject(const std::string& id);
*
* @param id String value which sets the id of the text object in the text container.
* @param pos_x X position of the text.
* @param pos_y Y position of the text.
* @param font_size Font size of the text.
* @param ttf_file TrueTypeFont(TTF) file name in the resource folder.
* @param color Color3B type which sets the color of the text in RGB.
*/
#pragma message WARN("TODO: CREATED TEXT SHOULD BE A ONNE TIME FUNCTION FOR EACH TEXT, PROVIDE A SETTEXT FOR EACH TEXT OBJECT")
#pragma message WARN("TODO: PROVIDE TEXT SHOULD NOT BE PROCESSED ALL THE TIME / EACH FRAME")
#pragma message WARN("TODO: MAYBE PROVIDE OBJECT FOR THE TEXT AND RETURN IT HERE ----- NEW COCOS NODE TEXT OBJECT OR LABEL? Label* CreateText(..)")
#pragma message WARN("TODO: MAP MAYBE NOT NEEDED!")
Label* CreateText(const std::string& id, const std::string& text, const float pos_x, const float pos_y, const int fontSize,const std::string& ttfFile,const Color3B& color);
/**
* Removes the text from the scene/world by id.
* @param id Text id inside the container.
*/
#pragma message WARN("TODO: CHECK FOR NULL")
void RemoveText(const std::string& id);
void SetActiveAndVisible(Node* node, bool active);
// Bool IsActiveAndVisible(Node* node);
/**
* Returns the text from the text container by the given id
* @return Label* Text from the text container in the scene.
*/
Label* GetTextObject(const std::string& id);
/**
* Returns the object which was created with CreateWorldObject by the given id.
* @return Sprite* Sprite which was added to the scene as world object.
*/
Sprite* GetWorldObject(const std::string& id);
/**
* Updates all FrameworkButtons in the FrameworkScene. Checks for touches on active buttons.
* Is used by the FrameworkScene only, it is not necessary to call this function.
*/
#pragma message WARN("TODO:MAYBE PRIVATE")
void UpdateButtons();
bool updateDelayControl(float delta, float checkpoint);
bool isDelayControlEnabled();
void setDelayControlEnabled(bool value);
void AddTickable(ITickable& tickable);
void AddButton(FrameworkButton* button)
{
vec_buttons_.push_back(button);
}
private:
/**
* Updates FrameworkScene and it's objects. Items are FrameworkObjects and the Tick(float) function.
*/
virtual void update(float delta);
/**
* Triggers when a touch in the scene happened.
*/
virtual bool onTouchBegan(Touch* touches, Event* event);
/**
* Triggers when touch in the scene is moving while being pressed.
*/
virtual void onTouchMoved(Touch* touche, Event* event);
/**
* Triggers when touch in the is released.
*/
virtual void onTouchEnded(Touch* touche, Event* event);
/**
* Triggers when the touch never will reach its release state in the scene.
*/
virtual void onTouchCancelled(Touch* touche, Event* event);
private:
Map<std::string, Sprite*> map_world_objects_; /**< Container which holds pointers to all objects which are created as world object in the scene by string id */
Map<std::string, Label*> map_texts_; /**< Container which holds pointers to all texts which are created in the scene by string id */
std::vector<FrameworkButton*> vec_buttons_; /**< Container which holds pointers to all buttons which are created in the scene by string id*/
std::vector<ITickable*> vec_tickables_;
#pragma message WARN("TODO: MAP_INACTIVE_BUTTONS NOT USED RIGHT NOW")
// std::vector<FrameworkButton*> map_inactive_buttons;
bool bTouchHappened_; /**< True when a button is touched */
int touch_x_; /**< Current touch x position */
int touch_y_; /**< Current touch y position */
bool bReleaseHappened_; /**< True when a button is released */
bool bWasPressed_; /**< True for one tick when a button touch ended/is released*/
int higher_z_order_; /**< Helper variable for z_order comparison */
int curr_target_z_order_; /**< Z Order of the currently observed button */
#pragma message WARN("TODO: button_click_ MAYBE NOT NEEDED")
bool bButtonClick_; /**< True if a touch happened */
FrameworkButton* active_button_; /**< The pressed button */
// Delay Control
float sumDelta;
void initDelayControl();
bool delayControlEnabled;
protected:
/**
* Updates the Scene and it's object every frame.
*/
virtual void Tick(float delta);
};
inline void FrameworkScene::CreateObject(Node* node) { this->addChild(node); }
inline Sprite* FrameworkScene::GetWorldObject(const std::string& id) { return map_world_objects_.at(id); }
inline Label* FrameworkScene::GetTextObject(const std::string& id) { return map_texts_.at(id); }
inline void FrameworkScene::SetActiveAndVisible(Node* node, bool active_and_visible)
{
if(node == nullptr) { CCLOGERROR("SetActiveAndVisible didn't work, node is a nullptr"); return; }
// if(active_and_visible)
// {
// // Node* extra;
// // while(extra->getChildren().size() > 0)
// // {
// //
// // }
// for(int i = 0; i < node->getChildren().size(); i++)
// {
// node->getChildren().at(i)->resume();
// }
// node->resume();
// node->setVisible(true);
//
// }
// else
// {
// for(int i = 0; i < node->getChildren().size(); i++)
// {
// node->getChildren().at(i)->pause();
// }
// node->pause();
// node->setVisible(false);
// }
NodeRecursion(node, active_and_visible);
}
//inline Bool FrameworkScene::IsActiveAndVisible(Node* node)
//{
// if(node->isVisible())
// {
//
// }
//
//}
inline void FrameworkScene::AddTickable(ITickable &tickable) { vec_tickables_.push_back(&tickable); }
inline void FrameworkScene::RemoveText(const std::string &id)
{
this->removeChild(map_texts_.at(id));
map_texts_.erase(id);
}
#endif // FRAMEWORKLAYER_H<file_sep>/ShaderTest/Classes/SpecialSprite.h
//
// SpecialSprite.h
// Hazelnut
//
// Created by <NAME> on 21.01.15.
//
//
#ifndef __Hazelnut__SpecialSprite__
#define __Hazelnut__SpecialSprite__
#include <stdio.h>
#include <cocos2d.h>
USING_NS_CC;
class SpecialSprite : public Sprite
{
public:
SpecialSprite();
virtual ~SpecialSprite();
enum EAnimation {SCALE,ROTATE};
static SpecialSprite* CreateSprite(const std::string& filename);
void WithAnimation(bool active, EAnimation sprite_animation);
/**
* Resets the SpecialSprite including it's animation and animation values-
*/
void Reset();
CREATE_FUNC(SpecialSprite);
private:
float currScale;
bool onIncrease;
bool withAnimation;
float current_rotation_value_;
EAnimation sprite_animation_;
protected:
virtual bool init();
virtual void update(float delta);
};
inline void SpecialSprite::WithAnimation(bool active,EAnimation sprite_animation) { withAnimation = active; sprite_animation_ = sprite_animation;}
#endif /* defined(__Hazelnut__SpecialSprite__) */
<file_sep>/ShaderTest/Classes/MenuScene.cpp
//
// MenuScene.cpp
// ShaderTest
//
// Created by <NAME> on 29.06.15.
//
//
#include "MenuScene.h"
#include <cocos2d.h>
USING_NS_CC;
MenuScene::MenuScene()
{
}
MenuScene::~MenuScene()
{
}
Scene* MenuScene::createScene()
{
auto scene = Scene::create();
auto layer = MenuScene::create();
scene->addChild(layer);
return scene;
}
bool MenuScene::init()
{
if(!Layer::init())
{
return false;
}
Sprite* background = Sprite::create("HelloWorld.png");
background->setPosition(1920/2,1080/2);
addChild(background);
Sprite* b = Sprite::create("CloseSelected.png");
b->setPosition(1920 / 2, 1080 / 2);
addChild(b);
GLProgram* glp = new GLProgram();
glp->initWithVertexShaderFilename("Shaders/test.vsh","Shaders/test.fsh");
glp->addAttribute(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION);
glp->addAttribute(GLProgram::ATTRIBUTE_NAME_COLOR, GLProgram::VERTEX_ATTRIB_COLOR);
glp->addAttribute(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORD);
glp->link();
glp->updateUniforms();
b->setShaderProgram(glp);
// GLProgram* p = new GLProgram();
//
// p->initWithVertexShaderFilename("../../testv.vsh", "../../test.fsh");
// p->addAttribute(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION);
// p->addAttribute(GLProgram::ATTRIBUTE_NAME_COLOR, GLProgram::VERTEX_ATTRIB_COLOR);
// p->addAttribute(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORD);
// p->link();
// p->updateUniforms();
//
// background_->setShaderProgram(p);
return true;
}
void MenuScene::Tick(float delta)
{
}
| 39db393e884a1df973d4a114756a8ac2e6713f07 | [
"C++"
] | 9 | C++ | Sleicreider/ShaderTest | 1e979605ca64350b4a93c75890976b2293994203 | ef0cd9f648015f6fd9036a8fd334942283c50b3e |
refs/heads/master | <repo_name>Cmd0r/fontAwesomeFX-finder<file_sep>/src/main/java/fr/cmdor/ViewerController.java
package fr.cmdor;
import de.jensd.fx.glyphs.GlyphIcon;
import de.jensd.fx.glyphs.GlyphIcons;
import de.jensd.fx.glyphs.emojione.EmojiOne;
import de.jensd.fx.glyphs.emojione.EmojiOneView;
import de.jensd.fx.glyphs.fontawesome.FontAwesomeIcon;
import de.jensd.fx.glyphs.fontawesome.FontAwesomeIconView;
import de.jensd.fx.glyphs.icons525.Icons525;
import de.jensd.fx.glyphs.icons525.Icons525View;
import de.jensd.fx.glyphs.materialdesignicons.MaterialDesignIcon;
import de.jensd.fx.glyphs.materialdesignicons.MaterialDesignIconView;
import de.jensd.fx.glyphs.materialicons.MaterialIcon;
import de.jensd.fx.glyphs.materialicons.MaterialIconView;
import de.jensd.fx.glyphs.octicons.OctIcon;
import de.jensd.fx.glyphs.octicons.OctIconView;
import de.jensd.fx.glyphs.weathericons.WeatherIcon;
import de.jensd.fx.glyphs.weathericons.WeatherIconView;
import javafx.event.ActionEvent;
import javafx.fxml.FXML;
import javafx.fxml.Initializable;
import javafx.scene.control.*;
import javafx.scene.effect.ColorAdjust;
import javafx.scene.effect.GaussianBlur;
import javafx.scene.input.Clipboard;
import javafx.scene.input.ClipboardContent;
import javafx.scene.input.KeyCode;
import javafx.scene.layout.FlowPane;
import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.ResourceBundle;
/**
* Created on 08/09/2018.
*
* @author <NAME>
*/
public class ViewerController implements Initializable {
private final LinkedHashMap<Class<? extends GlyphIcon>, Collection<GlyphIcons>> fonts = new LinkedHashMap<>();
@FXML
private Accordion libraries;
@FXML
private Slider sizeSlider;
@FXML
private Label sizeLabel;
@FXML
private SplitMenuButton actionBtn;
@FXML
private TextField search;
@FXML
private MaterialDesignIconView searchIcon;
@FXML
private FlowPane searchResultPane;
private GaussianBlur blur;
private GlyphIcons selectedIcon;
private float iconPrefSize = 24.0F;
private int flowPrefGap = 12;
@Override
public void initialize(URL location, ResourceBundle resources) {
blur = new GaussianBlur();
ColorAdjust colorAdjust = new ColorAdjust();
colorAdjust.setBrightness(-0.3D);
this.blur.setInput(colorAdjust);
this.sizeSlider.valueProperty().addListener((o, old, n) -> {
this.sizeLabel.setText(String.format("%.3f px.", n.floatValue()));
this.iconPrefSize = n.floatValue();
this.libraries.getPanes().forEach(p ->
((FlowPane) ((ScrollPane) p.getContent()).getContent()).getChildren().forEach(i -> {
((GlyphIcon<?>) i).setGlyphSize(this.iconPrefSize);
}));
});
this.actionBtn.setOnMouseClicked(e -> {
if (this.selectedIcon != null) {
ClipboardContent cc = new ClipboardContent();
cc.putString(selectedIcon.name());
Clipboard.getSystemClipboard().setContent(cc);
}
});
this.search.textProperty().addListener((o, old, n) -> {
if (n.equals(old)) return;
this.searchResultPane.getParent().getParent().getParent().setVisible(!n.isEmpty());
this.libraries.setEffect(n.isEmpty() ? null : this.blur);
this.searchIcon.setGlyphName(
n.isEmpty() ? MaterialDesignIcon.MAGNIFY.name() : MaterialDesignIcon.CLOSE_CIRCLE.name());
this.search(n);
});
this.search.setOnKeyPressed(e -> {
if (e.getCode() == KeyCode.ESCAPE) this.search.setText("");
});
this.searchIcon.setOnMouseClicked(e -> {
if (this.searchIcon.getGlyphName().equals(MaterialDesignIcon.CLOSE_CIRCLE.name())) {
this.search.setText("");
}
});
this.generateFontsPane();
}
@FXML
private void changeAction(ActionEvent e) {
// TODO
}
private void generateFontsPane() {
fonts.put(EmojiOneView.class, Arrays.asList(EmojiOne.values()));
this.createFontView(EmojiOne.values(), EmojiOneView.class, "EmojiOne");
fonts.put(FontAwesomeIconView.class, Arrays.asList(FontAwesomeIcon.values()));
this.createFontView(FontAwesomeIcon.values(), FontAwesomeIconView.class, "FontAwesome");
fonts.put(Icons525View.class, Arrays.asList(Icons525.values()));
this.createFontView(Icons525.values(), Icons525View.class, "Icon 525");
fonts.put(MaterialDesignIconView.class, Arrays.asList(MaterialDesignIcon.values()));
this.createFontView(MaterialDesignIcon.values(), MaterialDesignIconView.class, "Material Design");
fonts.put(MaterialIconView.class, Arrays.asList(MaterialIcon.values()));
this.createFontView(MaterialIcon.values(), MaterialIconView.class, "Material");
fonts.put(OctIconView.class, Arrays.asList(OctIcon.values()));
this.createFontView(OctIcon.values(), OctIconView.class, "Oct");
fonts.put(WeatherIconView.class, Arrays.asList(WeatherIcon.values()));
this.createFontView(WeatherIcon.values(), WeatherIconView.class, "Weather");
}
private <E extends Enum<E> & GlyphIcons> void createFontView(E[] e, Class<? extends GlyphIcon> v, String name) {
FlowPane p = new FlowPane();
p.setHgap(this.flowPrefGap);
p.setVgap(this.flowPrefGap);
Arrays.asList(e).forEach(i -> {
p.getChildren().add(this.createIconView(i, v));
});
ScrollPane s = new ScrollPane(p);
s.setFitToHeight(true);
s.setFitToWidth(true);
this.libraries.getPanes().add(new TitledPane(name, s));
}
private GlyphIcon<?> createIconView(GlyphIcons i, Class<? extends GlyphIcon> v) {
GlyphIcon<?> viewer;
try {
viewer = v.newInstance();
viewer.setGlyphName(i.name());
} catch (InstantiationException | IllegalAccessException e) {
viewer = new FontAwesomeIconView(FontAwesomeIcon.QUESTION_CIRCLE);
}
viewer.setGlyphSize(this.iconPrefSize);
viewer.setOnMouseClicked(e -> {
this.selectedIcon = i;
this.actionBtn.setText(i.name());
});
Tooltip.install(viewer, new Tooltip(i.name()));
return viewer;
}
private void search(final String text) {
if (text == null || text.isEmpty()) return;
final String searchText = text.toLowerCase();
this.searchResultPane.getChildren().clear();
this.fonts.forEach((v, icons) -> {
icons.forEach(i -> {
if (i.name().toLowerCase().contains(searchText)) {
this.searchResultPane.getChildren().add(this.createIconView(i, v));
}
});
});
}
}
<file_sep>/README.md
# fontAwesomeFX-finder
**Presentation**
A simple and efficient JavaFx viewer for the FontAwesomeFX library by [
Jerady](link=https://bitbucket.org/Jerady).
This lib allow you to use a lots a différents icons font in your JavaFx project (like FontAwesome, EmojiOne, Icon 525, ...).
Examples:
<img src="main_screen.png" alt="main view">
<img src="search_screen.png" alt="search example">
**Details**
This project is a good sample for a Maven Java 8 / JavaFx sample, with a Maven configuration for FontAwesomeFX use.
**TODO**
- [ ] Improve performances
- [ ] Add copy actions (like, copy for fxml, copy for class, ...)
| 15dcbe5ffa9ffbdd1761587f2413400ec0341335 | [
"Markdown",
"Java"
] | 2 | Java | Cmd0r/fontAwesomeFX-finder | 66f5c250d5ec76aea7573df3cd646076b376cdcf | 5dbd413be848b7c082d3dcaccf3584c7245b5525 |
refs/heads/master | <file_sep>import torch
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform_(self.conv1.weight.data, 1.)
nn.init.xavier_uniform_(self.conv2.weight.data, 1.)
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.LeakyReLU(0.02),
# nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.02),
self.conv2,
nn.BatchNorm2d(out_channels)
)
self.bypass = nn.Sequential()
def forward(self, x):
return self.model(x) + self.bypass(x)
class ResBlockTranspose(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockTranspose, self).__init__()
self.conv1 = nn.ConvTranspose2d(
in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.ConvTranspose2d(
out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform_(self.conv1.weight.data, 1.)
nn.init.xavier_uniform_(self.conv2.weight.data, 1.)
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.LeakyReLU(0.02),
# nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.02),
self.conv2,
nn.BatchNorm2d(out_channels)
)
self.bypass = nn.Sequential()
def forward(self, x):
return self.model(x) + self.bypass(x)
class GeneratorZ1(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.lr = nn.LeakyReLU(0.02)
self.c1 = SpectralNorm(nn.Conv2d(ch, zd1 // 4, 3, 1, 1))
self.c2 = SpectralNorm(nn.Conv2d(zd1 // 4, zd1 // 2, 3, 2, 1))
self.rn1 = ResBlock(zd1 // 2, zd1 // 2)
self.rn2 = ResBlock(zd1 // 2, zd1 // 2)
self.c3 = SpectralNorm(nn.Conv2d(zd1 // 2, zd1, 3, 2, 1))
self.bn3 = nn.BatchNorm2d(zd1)
self.c4 = SpectralNorm(nn.Conv2d(zd1, zd1 * 2, 3, 1, 1))
def forward(self, x):
x = self.lr(self.c1(x))
x = self.c2(x)
x = self.rn1(x)
x = self.rn2(x)
x = self.bn3(self.c3(x))
x = self.c4(x)
return x
class GeneratorZ2(nn.Module):
def __init__(self, zd=128, zd1=64):
super().__init__()
self.c1 = SpectralNorm(nn.Conv2d(zd1, zd // 2, 3, 2))
self.bn1 = nn.BatchNorm2d(zd // 2)
self.lr = nn.LeakyReLU(0.02)
self.c2 = SpectralNorm(nn.Conv2d(zd // 2, zd, 3, 1, 1))
self.rn1 = ResBlock(zd, zd)
self.c3 = SpectralNorm(nn.Conv2d(zd, zd * 2, 3, 1))
def forward(self, x):
x = self.lr(self.bn1(self.c1(x)))
x = self.c2(x)
x = self.rn1(x)
x = self.c3(x)
return x
class GeneratorX1(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.rl = nn.LeakyReLU(0.02)
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1))
self.rn1 = ResBlockTranspose(zd, zd)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1))
self.bn2 = nn.BatchNorm2d(zd)
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd, zd1 * 2, 3, 1))
self.bn3 = nn.BatchNorm2d(zd1 * 2)
self.conv4 = SpectralNorm(nn.ConvTranspose2d(zd1 * 2, zd1 * 2, 2, 1))
self.bn4 = nn.BatchNorm2d(zd1 * 2)
self.conv5 = SpectralNorm(nn.ConvTranspose2d(zd1 * 2, zd1 * 2, 2, 1))
def forward(self, x):
x = self.rl(self.conv1(x))
x = self.rn1(x)
x = self.rl(self.bn2(self.conv2(x)))
x = self.rl(self.bn3(self.conv3(x)))
x = self.conv4(x)
return x
class GeneratorX2(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.rl = nn.LeakyReLU(0.02)
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1, 3, 1, 1))
self.rn1 = ResBlockTranspose(zd1, zd1)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1, 3, 2, 1))
self.bn2 = nn.BatchNorm2d(zd1)
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1 // 2, 3, 1, 1))
self.bn3 = nn.BatchNorm2d(zd1 // 2)
self.conv4 = SpectralNorm(nn.ConvTranspose2d(zd1 // 2, ch, 2, 1))
self.tanh = nn.Tanh()
def forward(self, x):
x = self.rl(self.conv1(x))
x = self.rn1(x)
x = self.rn1(x)
x = nn.functional.interpolate(
x, mode='bilinear', scale_factor=2, align_corners=False)
x = self.rl(self.bn2(self.conv2(x)))
x = self.rl(self.bn3(self.conv3(x)))
x = self.tanh(self.conv4(x))
return x
class GeneratorX1_interpolate(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.rl = nn.LeakyReLU(0.02)
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1, 1))
self.rn1 = ResBlockTranspose(zd, zd)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1, 1))
self.bn2 = nn.BatchNorm2d(zd)
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd, zd1 * 2, 3, 1, 1))
self.bn3 = nn.BatchNorm2d(zd1 * 2)
self.conv4 = SpectralNorm(
nn.ConvTranspose2d(zd1 * 2, zd1 * 2, 3, 1, 1))
def forward(self, x):
x = self.rl(self.conv1(x))
x = nn.functional.interpolate(
x, mode='bilinear', scale_factor=2, align_corners=False)
x = self.rn1(x)
x = self.rl(self.bn2(self.conv2(x)))
x = nn.functional.interpolate(
x, mode='bilinear', scale_factor=2, align_corners=False)
x = self.rl(self.bn3(self.conv3(x)))
x = nn.functional.interpolate(
x, mode='bilinear', scale_factor=2, align_corners=False)
x = self.conv4(x)
return x
class GeneratorX2_interpolate(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.rl = nn.LeakyReLU(0.02)
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1, 3, 1, 1))
self.rn1 = ResBlockTranspose(zd1, zd1)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1, 3, 1, 1))
self.bn2 = nn.BatchNorm2d(zd1)
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1 // 2, 3, 1, 1))
self.bn3 = nn.BatchNorm2d(zd1 // 2)
self.conv4 = SpectralNorm(nn.ConvTranspose2d(zd1 // 2, ch, 3, 1, 1))
self.tanh = nn.Tanh()
def forward(self, x):
x = self.rl(self.conv1(x))
x = nn.functional.interpolate(
x, mode='bilinear', scale_factor=2, align_corners=False)
x = self.rn1(x)
x = self.rn1(x)
x = nn.functional.interpolate(
x, mode='bilinear', scale_factor=2, align_corners=False)
x = self.rl(self.bn2(self.conv2(x)))
x = self.rl(self.bn3(self.conv3(x)))
x = self.tanh(self.conv4(x))
return x
class GeneratorX1_convolve(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.rl = nn.LeakyReLU(0.02)
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1))
self.rn1 = ResBlockTranspose(zd, zd)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1))
self.bn2 = nn.BatchNorm2d(zd)
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd, zd1 * 2, 3, 1))
self.bn3 = nn.BatchNorm2d(zd1 * 2)
self.conv4 = SpectralNorm(nn.ConvTranspose2d(zd1 * 2, zd1 * 2, 2, 1))
def forward(self, x):
x = self.rl(self.conv1(x))
x = self.rn1(x)
x = self.rl(self.bn2(self.conv2(x)))
x = self.rl(self.bn3(self.conv3(x)))
x = self.conv4(x)
return x
class GeneratorX2_convolve(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.rl = nn.LeakyReLU(0.02)
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1, 3, 2, 1))
self.rn1 = ResBlockTranspose(zd1, zd1)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1, 3, 2, 1))
self.bn2 = nn.BatchNorm2d(zd1)
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd1, zd1 // 2, 3, 1))
self.bn3 = nn.BatchNorm2d(zd1 // 2)
self.conv4 = SpectralNorm(nn.ConvTranspose2d(zd1 // 2, ch, 2, 1))
self.tanh = nn.Tanh()
def forward(self, x):
x = self.rl(self.conv1(x))
x = self.rn1(x)
x = self.rn1(x)
x = self.rl(self.bn2(self.conv2(x)))
x = self.rl(self.bn3(self.conv3(x)))
x = self.tanh(self.conv4(x))
return x
class Discriminator(nn.Module):
def __init__(self, ch=3, ch2=128, zd=256, zd1=64):
super(Discriminator, self).__init__()
self.Dx = DiscriminatorX(zd=zd, ch=ch, zd1=zd1)
self.Dz1 = DiscriminatorZ1(zd=zd, ch=ch2, zd1=zd1)
self.Dxz = DiscriminatorXZ(zd=zd)
def forward(self, x_input, z1_input, z2_input):
eps = 1e-5
noise = Variable(
(torch.Tensor(x_input.size()).normal_(0, 0.1 * 0.01))).cuda()
dx_out = self.Dx(x_input + noise)
noise = Variable(
(torch.Tensor(dx_out.size()).normal_(0, 0.1 * 0.01))).cuda()
Dz1_out = self.Dz1(torch.cat((dx_out, z1_input + noise), dim=1))
d_out = self.Dxz(torch.cat((Dz1_out, z2_input), dim=1)) + eps
return d_out
class DiscriminatorX(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.lr = nn.LeakyReLU(0.02)
self.c1 = SpectralNorm(nn.Conv2d(ch, zd1 // 2, 3, 1))
self.c2 = SpectralNorm(nn.Conv2d(zd1 // 2, zd1, 3, 2, 1))
self.c3 = SpectralNorm(nn.Conv2d(zd1, zd1, 3, 2, 1))
self.c4 = SpectralNorm(nn.Conv2d(zd1, zd1, 3, 1, 1))
self.c5 = SpectralNorm(nn.Conv2d(zd1, zd1, 3, 1, 1))
self.lr = nn.LeakyReLU(0.02)
self.d = nn.Dropout2d(0.2)
def forward(self, x):
x = self.lr(self.c1(x))
x = self.d(self.lr(self.c2(x)))
x = self.d(self.lr(self.c3(x)))
x = self.lr(self.c4(x))
x = self.c5(x)
return x
class DiscriminatorZ1(nn.Module):
def __init__(self, zd=128, ch=1, zd1=64):
super().__init__()
self.c1 = SpectralNorm(nn.Conv2d(zd1 * 2, zd1 * 2, 3, 2))
self.lr = nn.LeakyReLU(0.02)
self.c2 = SpectralNorm(nn.Conv2d(zd1 * 2, zd, 3, 2))
self.c3 = SpectralNorm(nn.Conv2d(zd, zd, 3, 1, 1))
self.c4 = SpectralNorm(nn.Conv2d(zd, zd, 3, 1, 1))
self.c5 = SpectralNorm(nn.Conv2d(zd, zd, 3, 1, 1))
self.d = nn.Dropout2d(0.5)
def forward(self, x):
x = self.lr(self.c1(x))
x = self.d(self.lr(self.c2(x)))
x = self.lr(self.c3(x))
x = self.d(self.lr(self.c4(x)))
x = self.c5(x)
return x
class DiscriminatorXZ(nn.Module):
def __init__(self, zd=128):
super().__init__()
self.net = nn.Sequential(
SpectralNorm(nn.Conv2d(zd * 2, zd * 4, 1, 1)),
nn.LeakyReLU(0.02),
nn.Dropout2d(0.2),
SpectralNorm(nn.Conv2d(zd * 4, zd, 1, 1)),
nn.LeakyReLU(0.02),
nn.Dropout2d(0.2),
SpectralNorm(nn.Conv2d(zd, 1, 1, 1)),
)
def forward(self, x):
return self.net(x)
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(
torch.mv(torch.t(w.view(height, -1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
<file_sep>import sys
import os
import numpy as np
import math
import torch
from torchvision.transforms import functional
import models.HALI as hali
import models.ALI as ali
from torch.optim.optimizer import Optimizer, required
from utils.model_utils import get_ae_dataloaders
from torch.autograd import Variable
from torch import nn
from torch import Tensor
from torch.nn import Parameter
from utils.custom_optimizers import *
from itertools import chain
from torchvision.utils import save_image
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import re
from models.clustering import *
from utils.utils import *
import copy
from random import randint
import numpy as np
from os import listdir
from torch.nn import init
from os.path import isfile, isdir, join
def initialize_ali(configs, data):
"""
initialize_ali is a function for initializing all models and loaders needed in training
:param configs: a dictionary with all params necessary for training
:param data: Data used to create the loader.
:return Gx: Decoder/Generator
:return Gz: Encoder
:return Disc: Discriminator
:return optim_g: Optimizer for Generator params
:return optim_d: Optimizer for Discriminator params
:return train_loader: the dataloader we will use for training
:return cuda: whether we are running on cuda
:return configs: updated with correct model and image path
"""
IMAGE_PATH = '../experiments/' + configs['experiment'] + '/images'
MODEL_PATH = '../experiments/' + configs['experiment'] + '/models'
configs['IMAGE_PATH'] = IMAGE_PATH
configs['MODEL_PATH'] = MODEL_PATH
if not os.path.exists(IMAGE_PATH):
print('mkdir ', IMAGE_PATH)
os.mkdir(IMAGE_PATH)
if not os.path.exists(MODEL_PATH):
print('mkdir ', MODEL_PATH)
os.mkdir(MODEL_PATH)
Zdim = configs['Zdim']
BS = configs['batch_size']
Gx = ali.GeneratorX(zd=Zdim, ch=3)
Gz = ali.GeneratorZ(zd=Zdim, ch=3)
Disc = ali.Discriminator(ch=3, zd=Zdim)
if 'continue_from' in configs:
if configs['continue_from'] == -1:
start_epoch = get_max_epoch(configs) - 1
Gx.load_state_dict(torch.load(
MODEL_PATH + '/Gx-' + str(start_epoch) + '.pth'))
Gz.load_state_dict(torch.load(
MODEL_PATH + '/Gz-' + str(start_epoch) + '.pth'))
Disc.load_state_dict(torch.load(
MODEL_PATH + '/Dict-' + str(start_epoch) + '.pth'))
else:
Gx.load_state_dict(torch.load(
MODEL_PATH + '/Gx-' + str(configs['continue_from']) + '.pth'))
Gz.load_state_dict(torch.load(
MODEL_PATH + '/Gz-' + str(configs['continue_from']) + '.pth'))
Disc.load_state_dict(torch.load(
MODEL_PATH + '/Dict-' + str(configs['continue_from']) + '.pth'))
cuda = True if torch.cuda.is_available() else False
if cuda:
Gx.cuda()
Gz.cuda()
Disc.cuda()
gen = chain(Gx.parameters(), Gz.parameters())
decay = 0
beta1 = 0.5
beta2 = 0.999
amsgrad = False
if 'decay' in configs:
decay = configs['decay']
if 'amsgrad' in configs:
amsgrad = configs['amsgrad']
if ('beta1' in configs) and ('beta2' in configs):
beta1 = configs['beta1']
beta2 = configs['beta2']
if configs['optim'] == 'Adam':
optim_d = torch.optim.Adam(Disc.parameters(), lr=configs[
'lr_d'], betas=(beta1, beta2), weight_decay=decay)
optim_g = torch.optim.Adam(gen, configs['lr_g'], betas=(
beta1, beta2), weight_decay=decay)
elif configs['optim'] == 'OAdam':
optim_d = OAdam(Disc.parameters(), lr=configs['lr_d'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
optim_g = OAdam(gen, configs['lr_g'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
elif configs['optim'] == 'OMD':
optim_d = OptMirrorAdam(Disc.parameters(), lr=configs['lr_d'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
optim_g = OptMirrorAdam(gen, configs['lr_g'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
train_loader = DataLoader(data, batch_size=BS, shuffle=True)
return Gx, Gz, Disc, optim_g, optim_d, train_loader, cuda, configs
def train_epoch_ali(Gz, Gx, Disc, optim_d, optim_g, loader, epoch, cuda, configs):
"""
:param Gz: Encoder
:param Gx: Decoder/Generator
:param Disc: Discriminator
:param optim_d: Optimizer for Discriminator params
:param optim_g: Optimizer for Generator params
:param loader: data loader
:param epoch: number of current epoch
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return g_loss: Generator loss
:return d_loss: Decoder loss
:return d_true: Prediction for x
:return d_false: Prediction for x_hat
"""
ncritic = configs['n_critic']
cnt = 0
gcnt = 0
df = 0
dt = 0
dl = 0
gl = 0
for i, (imgs) in enumerate(loader):
loss_d = runloop_d_ali(imgs, Gx, Gz, Disc, optim_d, cuda, configs)
if i % ncritic == 0:
loss_g, d_true, d_fake = runloop_g_ali(
imgs, Gx, Gz, Disc, optim_g, cuda, configs)
gl = gl + loss_g
df = df + d_fake.item()
dt = dt + d_true.data.mean().item()
gcnt = gcnt + 1
cnt = cnt + 1
dl = dl + loss_d
g_loss = gl / gcnt
d_loss = dl / cnt
d_true = dt / gcnt
d_false = df / gcnt
return g_loss, d_loss, d_true, d_false
def training_loop_ali(Gz, Gx, Disc, optim_d, optim_g, train_loader, configs, experiment, cuda):
"""
:param Gz: Encoder
:param Gx: Decoder/Generator
:param Disc: Discriminator
:param optim_d: Optimizer for Discriminator params
:param optim_g: Optimizer for Generator params
:param loader: data loader
:param configs: a dictionary with all params necessary for training
:param experiment: comet_ml experiment variable to store results
:param cuda: whether we are running on cuda
"""
Zdim = configs['Zdim']
if 'continue_from' in configs:
if configs['continue_from'] == -1:
start_epoch = get_max_epoch(configs) - 1
end_epoch = start_epoch + configs['n_epochs']
else:
start_epoch = configs['continue_from']
end_epoch = start_epoch + configs['n_epochs']
else:
start_epoch = 0
end_epoch = configs['n_epochs']
if 'lr_scheduler' in configs:
if configs['lr_scheduler'] == 'cyclic':
milestones = [int(np.floor((2**x) * 1.2)) for x in range(7)]
if configs['scheduler_mode'] == 'both':
scheduler1 = CyclicCosAnnealingLR(
optim_d, milestones=milestones, eta_min=1e-5)
scheduler2 = CyclicCosAnnealingLR(
optim_g, milestones=milestones, eta_min=1e-5)
else:
scheduler1 = CyclicCosAnnealingLR(
optim_d, milestones=milestones, eta_min=1e-5)
if 'continue_from' in configs and configs['continue_from'] == -1:
optim_d.load_state_dict(torch.load(
configs['MODEL_PATH'], 'optim_d.pth'))
optim_g.load_state_dict(torch.load(
configs['MODEL_PATH'], 'optim_g.pth'))
scheduler1.load_state_dict(torch.load(
os.path.join(configs['MODEL_PATH'], 'scheduler1.pth')))
if configs['scheduler_mode'] == 'both':
scheduler1.load_state_dict(torch.load(
os.path.join(configs['MODEL_PATH'], 'scheduler2.pth')))
for epoch in range(start_epoch, end_epoch):
if 'lr_scheduler' in configs:
scheduler1.step()
if configs['scheduler_mode'] == 'both':
scheduler2.step()
g_loss, d_loss, d_true, d_false = train_epoch_ali(
Gz, Gx, Disc, optim_d, optim_g, train_loader, epoch, cuda, configs
)
if 'lr_scheduler' in configs:
torch.save(scheduler1.state_dict(), os.path.join(
configs['MODEL_PATH'], 'scheduler1.pth'))
if configs['scheduler_mode'] == 'both':
torch.save(scheduler1.state_dict(), os.path.join(
configs['MODEL_PATH'], 'scheduler2.pth'))
torch.save(optim_d.state_dict(), os.path.join(
configs['MODEL_PATH'], 'optim_d.pth'))
torch.save(optim_g.state_dict(), os.path.join(
configs['MODEL_PATH'], 'optim_g.pth'))
save_models_ali(Gz, Gx, Disc, configs['MODEL_PATH'], epoch)
sys.stdout.write("\r[%5d / %5d]: G: %.4f D: %.4f D(x,Gz(x)): %.4f D(Gx(z),z): %.4f" %
(epoch, configs['n_epochs'], g_loss, d_loss, d_true, d_false))
experiment.log_metric('g_loss', g_loss)
experiment.log_metric('d_loss', d_loss)
experiment.log_metric('d_true', d_true)
experiment.log_metric('d_fake', d_false)
print()
def runloop_g_ali(imgs, Gx, Gz, Disc, optim_g, cuda, configs):
"""
:param imgs: data for generator loop
:param Gz: Encoder
:param Gx: Decoder/Generator
:param Disc: Discriminator
:param optim_g: Optimizer for Generator params
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return g_loss: Generator loss
:return d_true: Prediction for x
:return d_false: Prediction for x_hat
"""
softplus = nn.Softplus()
Zdim = configs['Zdim']
batch_size = imgs.size(0)
if cuda:
imgs = imgs.cuda()
imgs = Variable(imgs)
batch_size = imgs.size(0)
z = torch.FloatTensor(batch_size, Zdim, 1, 1).normal_(0, 1)
zv = Variable(z).cuda()
encoded1 = Gz(imgs)
z = reparameterize(encoded1)
imgs_fake = Gx(zv)
def g_closure():
Gx.zero_grad()
Gz.zero_grad()
d_true = Disc(imgs, z)
d_fake = Disc(imgs_fake, zv)
loss_g = torch.mean(softplus(d_true) + softplus(-d_fake))
loss_g.backward(retain_graph=True)
return loss_g.data.item(), d_fake.data.mean(), d_true.data.mean()
loss_g, d_fake, d_true = optim_g.step(g_closure)
return loss_g, d_true, d_fake
def runloop_d_ali(imgs, Gx, Gz, Disc, optim_d, cuda, configs):
"""
:param imgs: data for generator loop
:param Gz: Encoder
:param Gx: Decoder/Generator
:param Disc: Discriminator
:param optim_g: Optimizer for Generator params
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return d_loss: Discriminator loss
"""
softplus = nn.Softplus()
Zdim = configs['Zdim']
batch_size = imgs.size(0)
if cuda:
imgs = imgs.cuda()
imgs = Variable(imgs)
batch_size = imgs.size(0)
z = torch.FloatTensor(batch_size, Zdim, 1, 1).normal_(0, 1)
zv = Variable(z).cuda()
encoded1 = Gz(imgs)
z = reparameterize(encoded1)
imgs_fake = Gx(zv)
def d_closure():
Disc.zero_grad()
batch_size = imgs.size(0)
d_true = Disc(imgs, z)
d_fake = Disc(imgs_fake, zv)
if configs['gp']:
gp = calc_gradient_penalty_ali(
Disc, imgs, imgs_fake, zv, z, configs['gp_lambda'])
loss_d = torch.mean(softplus(-d_true) + softplus(d_fake)) + gp
else:
loss_d = torch.mean(softplus(-d_true) + softplus(d_fake))
loss_d.backward(retain_graph=True)
return loss_d.data.item()
loss_d = optim_d.step(d_closure)
return loss_d
def initialize_hali(configs, data):
"""
initialize_hali is a function for initializing all models and loaders needed in training HALI
:param configs: a dictionary with all params necessary for training
:param data: Data used to create the loader.
:return Gx1: Level 1 of Decoder/Generator
:return Gx2: Level 2 of Decoder/Generator
:return Gz1: Level 1 of Encoder
:return Gz2: Level 2 of Encoder
:return Disc: Discriminator
:return optim_g: Optimizer for Generator params
:return optim_d: Optimizer for Discriminator params
:return train_loader: the dataloader we will use for training
:return cuda: whether we are running on cuda
:return configs: updated with correct model and image path
"""
IMAGE_PATH = '../experiments/' + configs['experiment'] + '/images'
MODEL_PATH = '../experiments/' + configs['experiment'] + '/models'
configs['IMAGE_PATH'] = IMAGE_PATH
configs['MODEL_PATH'] = MODEL_PATH
if not os.path.exists(IMAGE_PATH):
print('mkdir ', IMAGE_PATH)
os.mkdir(IMAGE_PATH)
if not os.path.exists(MODEL_PATH):
print('mkdir ', MODEL_PATH)
os.mkdir(MODEL_PATH)
Zdim = configs['Zdim']
zd1 = configs['z1dim']
BS = configs['batch_size']
Gz1 = hali.GeneratorZ1(zd=Zdim, ch=3, zd1=zd1)
Gz2 = hali.GeneratorZ2(zd=Zdim, zd1=zd1)
Disc = hali.Discriminator(ch=3, zd=Zdim, zd1=zd1)
if 'genx' in configs:
if configs['genx'] == 'interpolate':
print('interpolate')
Gx1 = hali.GeneratorX1_interpolate(zd=Zdim, ch=3, zd1=zd1)
Gx2 = hali.GeneratorX2_interpolate(zd=Zdim, ch=3, zd1=zd1)
else:
print('convolve')
Gx1 = hali.GeneratorX1_convolve(zd=Zdim, ch=3, zd1=zd1)
Gx2 = hali.GeneratorX2_convolve(zd=Zdim, ch=3, zd1=zd1)
else:
Gx1 = hali.GeneratorX1(zd=Zdim, ch=3, zd1=zd1)
Gx2 = hali.GeneratorX2(zd=Zdim, ch=3, zd1=zd1)
if 'continue_from' in configs:
if configs['continue_from'] == -1:
start_epoch = get_max_epoch(configs) - 1
Gx1.load_state_dict(torch.load(
MODEL_PATH + '/Gx1-' + str(start_epoch) + '.pth'))
Gx2.load_state_dict(torch.load(
MODEL_PATH + '/Gx2-' + str(start_epoch) + '.pth'))
Gz1.load_state_dict(torch.load(
MODEL_PATH + '/Gz1-' + str(start_epoch) + '.pth'))
Gz2.load_state_dict(torch.load(
MODEL_PATH + '/Gz2-' + str(start_epoch) + '.pth'))
Disc.load_state_dict(torch.load(
MODEL_PATH + '/Disc-' + str(start_epoch) + '.pth'))
else:
Gx1.load_state_dict(torch.load(
MODEL_PATH + '/Gx1-' + str(configs['continue_from']) + '.pth'))
Gx2.load_state_dict(torch.load(
MODEL_PATH + '/Gx2-' + str(configs['continue_from']) + '.pth'))
Gz1.load_state_dict(torch.load(
MODEL_PATH + '/Gz1-' + str(configs['continue_from']) + '.pth'))
Gz2.load_state_dict(torch.load(
MODEL_PATH + '/Gz2-' + str(configs['continue_from']) + '.pth'))
Disc.load_state_dict(torch.load(
MODEL_PATH + '/Disc-' + str(configs['continue_from']) + '.pth'))
cuda = True if torch.cuda.is_available() else False
if cuda:
Gx1.cuda()
Gz1.cuda()
Gx2.cuda()
Gz2.cuda()
Disc.cuda()
gen = chain(Gx1.parameters(), Gx2.parameters(),
Gz1.parameters(), Gz2.parameters())
decay = 0
beta1 = 0.5
beta2 = 0.9999
amsgrad = False
if 'decay' in configs:
decay = configs['decay']
if 'amsgrad' in configs:
amsgrad = configs['amsgrad']
if ('beta1' in configs) and ('beta2' in configs):
beta1 = configs['beta1']
beta2 = configs['beta2']
if configs['optim'] == 'Adam':
optim_d = torch.optim.Adam(Disc.parameters(), lr=configs[
'lr_d'], betas=(beta1, beta2), weight_decay=decay)
optim_g = torch.optim.Adam(gen, configs['lr_g'], betas=(
beta1, beta2), weight_decay=decay)
elif configs['optim'] == 'OAdam':
optim_d = OAdam(Disc.parameters(), lr=configs['lr_d'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
optim_g = OAdam(gen, configs['lr_g'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
elif configs['optim'] == 'OMD':
optim_d = OptMirrorAdam(Disc.parameters(), lr=configs['lr_d'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
optim_g = OptMirrorAdam(gen, configs['lr_g'], betas=(
beta1, beta2), weight_decay=decay, amsgrad=amsgrad)
train_loader = DataLoader(data, batch_size=BS, shuffle=True)
return Gx1, Gx2, Gz1, Gz2, Disc, optim_g, optim_d, train_loader, cuda, configs
def train_epoch_hali(Gz1, Gz2, Gx1, Gx2, Disc, optim_d, optim_g, loader, epoch, cuda, configs):
"""
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param Gx1: Level 1 of Decoder/Generator
:param Gx2: Level 2 of Decoder/Generator
:param Disc: Discriminator
:param optim_d: Optimizer for Discriminator params
:param optim_g: Optimizer for Generator params
:param loader: data loader
:param epoch: number of current epoch
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return g_loss: Generator loss
:return d_loss: Decoder loss
:return d_true: Prediction for x
:return d_false: Prediction for x_hat
"""
ncritic = configs['n_critic']
cnt = 0
gcnt = 0
df = 0
dt = 0
dl = 0
gl = 0
for i, (imgs) in enumerate(loader):
loss_d = runloop_d_hali(imgs, Gx1, Gx2, Gz1, Gz2,
Disc, optim_d, cuda, configs)
if i % ncritic == 0:
if 'unrolled_steps' in configs and configs['unrolled_steps'] > 1:
loss_g, d_true, d_fake, Disc = runloop_g_hali_unrolled(
imgs, Gx1, Gx2, Gz1, Gz2, Disc, optim_g, optim_d, cuda, configs, loader)
else:
loss_g, d_true, d_fake = runloop_g_hali(
imgs, Gx1, Gx2, Gz1, Gz2, Disc, optim_g, cuda, configs)
gl = gl + loss_g
df = df + d_fake.item()
dt = dt + d_true.data.mean().item()
gcnt = gcnt + 1
cnt = cnt + 1
dl = dl + loss_d
g_loss = gl / gcnt
d_loss = dl / cnt
d_true = dt / gcnt
d_false = df / gcnt
return g_loss, d_loss, d_true, d_false
def training_loop_hali(Gz1, Gz2, Gx1, Gx2, Disc, optim_d, optim_g, train_loader, configs, experiment, cuda):
"""
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param Gx1: Level 1 of Decoder/Generator
:param Gx2: Level 2 of Decoder/Generator
:param Disc: Discriminator
:param optim_d: Optimizer for Discriminator params
:param optim_g: Optimizer for Generator params
:param train_loader: data loader
:param configs: a dictionary with all params necessary for training
:param experiment: comet_ml experiment variable to store results
:param cuda: whether we are running on cuda
"""
Zdim = configs['Zdim']
if 'continue_from' in configs:
if configs['continue_from'] == -1:
start_epoch = get_max_epoch(configs) - 1
end_epoch = start_epoch + configs['n_epochs']
else:
start_epoch = configs['continue_from']
end_epoch = start_epoch + configs['n_epochs']
else:
start_epoch = 0
end_epoch = configs['n_epochs']
if 'lr_scheduler' in configs:
if configs['lr_scheduler'] == 'cyclic':
milestones = [int(np.floor((2**x) * 1.2)) for x in range(7)]
if configs['scheduler_mode'] == 'both':
scheduler1 = CyclicCosAnnealingLR(
optim_d, milestones=milestones, eta_min=1e-5)
scheduler2 = CyclicCosAnnealingLR(
optim_g, milestones=milestones, eta_min=1e-5)
else:
scheduler1 = CyclicCosAnnealingLR(
optim_d, milestones=milestones, eta_min=1e-5)
if 'continue_from' in configs and configs['continue_from'] == -1:
optim_d.load_state_dict(torch.load(
configs['MODEL_PATH'], 'optim_d.pth'))
optim_g.load_state_dict(torch.load(
configs['MODEL_PATH'], 'optim_g.pth'))
scheduler1.load_state_dict(torch.load(
os.path.join(configs['MODEL_PATH'], 'scheduler1.pth')))
if configs['scheduler_mode'] == 'both':
scheduler1.load_state_dict(torch.load(
os.path.join(configs['MODEL_PATH'], 'scheduler2.pth')))
for epoch in range(start_epoch, end_epoch):
if 'lr_scheduler' in configs:
scheduler1.step()
if configs['scheduler_mode'] == 'both':
scheduler2.step()
g_loss, d_loss, d_true, d_false = train_epoch_hali(
Gz1, Gz2, Gx1, Gx2, Disc, optim_d, optim_g, train_loader, epoch, cuda, configs
)
if 'lr_scheduler' in configs:
torch.save(scheduler1.state_dict(), os.path.join(
configs['MODEL_PATH'], 'scheduler1.pth'))
if configs['scheduler_mode'] == 'both':
torch.save(scheduler1.state_dict(), os.path.join(
configs['MODEL_PATH'], 'scheduler2.pth'))
torch.save(optim_d.state_dict(), os.path.join(
configs['MODEL_PATH'], 'optim_d.pth'))
torch.save(optim_g.state_dict(), os.path.join(
configs['MODEL_PATH'], 'optim_g.pth'))
save_models_hali(Gz1, Gz2, Gx1, Gx2, Disc,
configs['MODEL_PATH'], epoch)
sys.stdout.write("\r[%5d / %5d]: G: %.4f D: %.4f D(x,Gz(x)): %.4f D(Gx(z),z): %.4f" %
(epoch, configs['n_epochs'], g_loss, d_loss, d_true, d_false))
experiment.log_metric('g_loss', g_loss)
experiment.log_metric('d_loss', d_loss)
experiment.log_metric('d_true', d_true)
experiment.log_metric('d_fake', d_false)
print()
def runloop_g_hali(imgs, Gx1, Gx2, Gz1, Gz2, Disc, optim_g, cuda, configs):
"""
:param imgs: data for generator loop
:param Gx1: Level 1 of Decoder/Generator
:param Gx2: Level 2 of Decoder/Generator
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param Disc: Discriminator
:param optim_g: Optimizer for Generator params
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return loss_g: Generator loss
:return d_false: Prediction for x_hat
:return d_true: Prediction for x
"""
softplus = nn.Softplus()
Zdim = configs['Zdim']
batch_size = imgs.size(0)
if cuda:
imgs = imgs.cuda()
imgs = Variable(imgs)
batch_size = imgs.size(0)
z = torch.FloatTensor(batch_size, Zdim, 1, 1).normal_(0, 1)
zv = Variable(z).cuda()
encoded1 = Gz1(imgs)
z1 = reparameterize(encoded1)
encoded2 = Gz2(z1)
z2 = reparameterize(encoded2)
zv_enc = Gx1(zv)
zv1 = reparameterize(zv_enc)
imgs_fake = Gx2(zv1)
def g_closure():
Gx1.zero_grad()
Gx1.zero_grad()
Gz1.zero_grad()
Gz2.zero_grad()
d_true = Disc(imgs, z1, z2)
d_fake = Disc(imgs_fake, zv1, zv)
loss_g = torch.mean(softplus(d_true) + softplus(-d_fake))
loss_g.backward(retain_graph=True)
return loss_g.data.item(), d_fake.data.mean(), d_true.data.mean()
loss_g, d_fake, d_true = optim_g.step(g_closure)
return loss_g, d_true, d_fake
def runloop_d_hali(imgs, Gx1, Gx2, Gz1, Gz2, Disc, optim_d, cuda, configs):
"""
:param imgs: data for generator loop
:param Gx1: Level 1 of Decoder/Generator
:param Gx2: Level 2 of Decoder/Generator
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param Disc: Discriminator
:param optim_d: Optimizer for Discriminator params
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return loss_d: Discriminator loss
"""
softplus = nn.Softplus()
Zdim = configs['Zdim']
batch_size = imgs.size(0)
if cuda:
imgs = imgs.cuda()
imgs = Variable(imgs)
batch_size = imgs.size(0)
z = torch.FloatTensor(batch_size, Zdim, 1, 1).normal_(0, 1).cuda()
zv = Variable(z)
encoded1 = Gz1(imgs)
z1 = reparameterize(encoded1)
encoded2 = Gz2(z1)
z2 = reparameterize(encoded2)
zv_enc = Gx1(zv)
zv1 = reparameterize(zv_enc)
imgs_fake = Gx2(zv1)
def d_closure():
Disc.zero_grad()
batch_size = imgs.size(0)
d_true = Disc(imgs, z1, z2)
d_fake = Disc(imgs_fake, zv1, zv)
if configs['gp']:
gp = calc_gradient_penalty_hali(
Disc, imgs, imgs_fake, zv1, z1, zv, z2, configs['gp_lambda'])
loss_d = torch.mean(softplus(-d_true) + softplus(d_fake)) + gp
else:
loss_d = torch.mean(softplus(-d_true) + softplus(d_fake))
loss_d.backward(retain_graph=True)
return loss_d.data.item()
loss_d = optim_d.step(d_closure)
return loss_d
def runloop_g_hali_unrolled(imgs, Gx1, Gx2, Gz1, Gz2, Disc, optim_g, optim_d, cuda, configs, loader):
"""
function runloop_g_hali_unrolled is a function that manages unrolled alternative training.
Training in this way is significantly slower, but more stable
:param imgs: data for generator loop
:param Gx1: Level 1 of Decoder/Generator
:param Gx2: Level 2 of Decoder/Generator
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param Disc: Discriminator
:param optim_g: Optimizer for Generator params
:param cuda: whether we are running on cuda
:param configs: a dictionary with all params necessary for training
:return loss_g: Generator loss
:return d_false: Prediction for x_hat
:return d_true: Prediction for x
"""
softplus = nn.Softplus()
Zdim = configs['Zdim']
backup_disc = copy.deepcopy(Disc.state_dict())
for i in range(configs['unrolled_steps']):
im = next(iter(loader))
loss_d = runloop_d_hali(im, Gx1, Gx2, Gz1, Gz2,
Disc, optim_d, cuda, configs)
loss_g, d_true, d_fake = runloop_g_hali(
imgs, Gx1, Gx2, Gz1, Gz2, Disc, optim_g, cuda, configs)
Disc.load_state_dict(backup_disc)
del backup_disc
return loss_g, d_true, d_fake, Disc
def get_max_epoch(configs):
"""
get_max_epoch is a function that returns the highest epoch trainined so far for evaluation
:param configs: a dictionary with all params necessary for training
:returns highest epoch in saved models directory
"""
onlyfiles = [f for f in listdir(configs['MODEL_PATH']) if isfile(
join(configs['MODEL_PATH'], f))]
epoch = []
for s in onlyfiles:
if ('scheduler' not in s) and ('optim' not in s):
n = re.findall(r'\d+', s)
epoch.append(int(n[len(n) - 1]))
return(max(epoch))
def get_experiments():
"""
get_experiments is a function that returns the names of all experiments in the experiment folder for evaluation especially in grid search scenario
:returns list of experiments in the experiments directory
"""
experiments = [f for f in listdir(
'experiments') if isdir(join('experiments', f))]
return experiments
def save_res_figure(configs, accuracies, f1_list):
"""
save_res_figure is a function that saves plotted results to image directory during evaluation.
it will fail on the cluster and is purely for convenience/local use.
:param configs: a dictionary with all params necessary for training
:param accuracies: a list of accuracy scores for all epochs
:param f1_list: a list of f1 scores for all epochs
"""
print()
# commented to avoid "stack smashing detected" error on cluster
# import matplotlib
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = plt.axes()
# ax.plot(f1_list, label='F1 Score')
# ax.plot(accuracies, label='Accuracy')
# ax.legend(loc='best')
# plt.title(configs['experiment'])
# formatter = matplotlib.ticker.StrMethodFormatter("{x:.0f}")
# plt.gca().xaxis.set_major_formatter(formatter)
# plt.savefig(configs['IMAGE_PATH'] + '/clustering_results.png')
def get_results_ali(configs, experiment, train, labeled, valid_data):
"""
get_results_ali is a function that evaluates classification performance by selecting the best of 5 linear SVMs at each training epoch.
:param configs: a dictionary with all params necessary for training
:param experiment: comet_ml experiment variable
:param train: unlabeled training data
:param labeled: labeled data
:param valid_data: validation data
:returns best_f1 : highest f1 score
:returns best_accuracy: highest accuracy
:returns best_model : best model epoch
"""
Gx, Gz, Disc, optim_g, optim_d, train_loader, cuda, configs = initialize_ali(
configs, train)
max_ep = get_max_epoch(configs)
best_accuracy = 0
best_model = 0
best_f1 = 0
accuracies = []
f1_scores = []
for i in range(1, max_ep + 1):
configs['continue_from'] = i
Gx, Gz, Disc, optim_g, optim_d, train_loader, cuda, configs = initialize_ali(
configs, train)
train_labeled_enc, train_labels = get_ali_embeddings(Gz, labeled)
valid_enc, val_labels = get_ali_embeddings(Gz, valid_data)
svm = SVMClustering(configs['seed'])
svm.train(train_labeled_enc, train_labels)
y_pred = svm.predict_cluster(valid_enc)
y_true = val_labels
accuracy, f1 = compute_metrics(y_true, y_pred)
accuracies.append(accuracy)
f1_scores.append(f1)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_f1 = f1
best_model = i
experiment.log_metric('accuracy', accuracy)
experiment.log_metric('f1_score', f1)
save_res_figure(configs, accuracies, f1_scores)
return(best_f1, best_accuracy, best_model)
def get_results_hali(configs, experiment, train, labeled, valid_data):
"""
get_results_hali is a function that evaluates classification performance by selecting the best of 5 linear SVMs at each training epoch.
:param configs: a dictionary with all params necessary for training
:param experiment: comet_ml experiment variable
:param train: unlabeled training data
:param labeled: labeled data
:param valid_data: validation data
:returns best_f1 : highest f1 score
:returns best_accuracy: highest accuracy
:returns best_model : best model epoch
"""
Gx1, Gx2, Gz1, Gz2, Disc, optim_g, optim_d, train_loader, cuda, configs = initialize_hali(
configs, train)
max_ep = get_max_epoch(configs)
best_accuracy = 0
best_model = 0
best_f1 = 0
accuracies = []
f1_list = []
for i in range(1, max_ep + 1):
configs['continue_from'] = i
Gx1, Gx2, Gz1, Gz2, Disc, optim_g, optim_d, train_loader, cuda, configs = initialize_hali(
configs, train)
train_labeled_enc, train_labels = get_hali_embeddings(
Gz1, Gz2, labeled, 'z1')
valid_enc, val_labels = get_hali_embeddings(Gz1, Gz2, valid_data, 'z1')
save_recon_hali(Gx1, Gx2, Gz1, Gz2, i, True,
configs['IMAGE_PATH'], labeled)
save_recon_hali(Gx1, Gx2, Gz1, Gz2, i, False,
configs['IMAGE_PATH'], labeled)
acc_tmp = []
f1_tmp = []
for i in range(5):
svm = SVMClustering(randint(0, 5000))
svm.train(train_labeled_enc, train_labels)
y_pred = svm.predict_cluster(valid_enc)
y_true = val_labels
accuracy, f1 = compute_metrics(y_true, y_pred)
acc_tmp.append(accuracy)
f1_tmp.append(f1)
accuracies.append(max(acc_tmp))
f1_list.append(max(f1_tmp))
if accuracy > best_accuracy:
best_accuracy = accuracy
best_f1 = f1
best_model = i
experiment.log_metric('accuracy', accuracy)
experiment.log_metric('f1_score', f1)
save_res_figure(configs, accuracies, f1_list)
return(best_f1, best_accuracy, best_model)
def get_hali_embeddings(Gz1, Gz2, data, mode):
"""
get_hali_embeddings is a function that returns embeddings from prescribed level of HALI's latent space
:param Gz1: Gz1 model
:param Gz2: Gz2 model
:param data: the data to be used
:param mode: which level of the latent space to use (z1, z2, or zcat)
:returns all_embeddings : returns list of embedding vectors
:returns all_targets: labels corresponding to embeddings list
"""
all_embeddings = []
all_targets = []
loader = DataLoader(data, batch_size=32, shuffle=True)
cuda = True if torch.cuda.is_available() else False
labeled = True
if loader.dataset.data.shape[0] > 500:
labeled = False
for imgs in loader:
if labeled:
(imgs, target) = imgs
if cuda:
data = Variable(imgs).cuda()
else:
data = Variable(imgs)
encoded = Gz1(data)
z1 = reparameterize(encoded)
v1 = [z1.view(data.size()[0], -1).cpu().data.numpy()]
enc_2 = Gz2(z1)
z2 = reparameterize(enc_2)
v2 = [z2.view(data.size()[0], -1).cpu().data.numpy()]
vcat = np.concatenate([v1, v2], axis=2)
if mode == 'cat':
vec = vcat
elif mode == 'z2':
vec = v2
else:
vec = v1
for l in range(np.shape(data)[0]):
all_embeddings.append(vec[0][l, :])
if labeled:
all_targets.append(target[l].numpy()[0])
return all_embeddings, all_targets
def get_ali_embeddings(Gz, data):
"""
get_ali_embeddings is a function that returns embeddings from prescribed level of ALI's latent space
:param Gz: Encoder
:param data: the data to be used
:returns all_embeddings : returns list of embedding vectors
:returns all_targets: labels corresponding to embeddings list
"""
all_embeddings = []
all_targets = []
loader = DataLoader(data, batch_size=32, shuffle=True)
cuda = True if torch.cuda.is_available() else False
labeled = True
if loader.dataset.data.shape[0] > 500:
labeled = False
for imgs in loader:
if labeled:
(imgs, target) = imgs
if cuda:
data = Variable(imgs).cuda()
else:
data = Variable(imgs)
encoded = Gz(data)
z = reparameterize(encoded)
v1 = [z.view(data.size()[0], -1).cpu().data.numpy()]
for l in range(np.shape(v1)[1]):
all_embeddings.append(v1[0][l, :])
if labeled:
all_targets.append(target[l].numpy()[0])
return all_embeddings, all_targets
def saveimages_hali(Gx1, Gx2, noise1, noise2, IMAGE_PATH):
"""
saveimages_hali is a function that saves generated images from both levels of the decoder/generator to the image path
:param Gx1: Level 1 of Generator
:param Gx2: Level 2 of Generator
:param noise1: Noise variable of size of z2
:param noise2: Noise variable of size of z1
:param IMAGE_PATH: Image path
"""
save_image(Gx2(noise2).cpu().data,
os.path.join(IMAGE_PATH, '%d_1.png' % (epoch + 1)),
nrow=9, padding=1,
normalize=False)
e1 = Gx2(reparameterize(Gx1(noise1)))
save_image(e1.data,
os.path.join(IMAGE_PATH, '%d_2.png' % (epoch + 1)),
nrow=9, padding=1,
normalize=False)
def save_recon_hali(Gx1, Gx2, Gz1, Gz2, epoch, from_z1, IMAGE_PATH, data):
"""
save_recon_hali is a function that saves HALI reconstructions
:param Gx1: Level 1 of Generator
:param Gx2: Level 2 of Generator
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param epoch: Number epoch
:param IMAGE_PATH: Image path
:param data: data to reproduce
"""
dataloader = DataLoader(data, batch_size=32, shuffle=True)
data = next(iter(dataloader))
data = data[0]
data = data.to('cuda')
if not from_z1:
latent = Gz1(data) # z_hat
z1 = reparameterize(latent)
bbs = np.shape(data)[0]
z_enc = Gz2(z1)
recon = Gx2(reparameterize(Gx1(reparameterize(z_enc))))
n = min(data.size(0), 8)
ss = np.shape(data)
comparison = torch.cat([data[:n],
recon.view(ss[0], ss[1], ss[2], ss[3])[:n]])
save_image(comparison.cpu(),
IMAGE_PATH + '/reconstruction_z2_' + str(epoch) + '.png', nrow=n)
else:
latent = Gz1(data) # z_hat
z1 = reparameterize(latent)
recon = Gx2(z1)
n = min(data.size(0), 8)
ss = np.shape(data)
comparison = torch.cat([data[:n],
recon.view(ss[0], ss[1], ss[2], ss[3])[:n]])
save_image(comparison.cpu(),
IMAGE_PATH + '/reconstruction_z1_' + str(epoch) + '.png', nrow=n)
def save_models_hali(Gz1, Gz2, Gx1, Gx2, Disc, MODEL_PATH, epoch):
"""
save_recon_hali is a function that saves HALI reconstructions
:param Gz1: Level 1 of Encoder
:param Gz2: Level 2 of Encoder
:param Gx1: Level 1 of Generator
:param Gx2: Level 2 of Generator
:param Disc: Discriminator
:param MODEL_PATH: Models path
:param epoch: Number epoch
"""
torch.save(Gx1.state_dict(),
os.path.join(MODEL_PATH, 'Gx1-%d.pth' % (epoch + 1)))
torch.save(Gx2.state_dict(),
os.path.join(MODEL_PATH, 'Gx2-%d.pth' % (epoch + 1)))
torch.save(Gz1.state_dict(),
os.path.join(MODEL_PATH, 'Gz1-%d.pth' % (epoch + 1)))
torch.save(Gz2.state_dict(),
os.path.join(MODEL_PATH, 'Gz2-%d.pth' % (epoch + 1)))
torch.save(Disc.state_dict(),
os.path.join(MODEL_PATH, 'Disc-%d.pth' % (epoch + 1)))
def save_models_ali(Gz, Gx, Disc, MODEL_PATH, epoch):
"""
save_recon_ali is a function that saves ALI reconstructions
:param Gz: Encoder
:param Gx: Generator
:param Disc: Discriminator
:param MODEL_PATH: Models path
:param epoch: Number epoch
"""
torch.save(Gx.state_dict(),
os.path.join(MODEL_PATH, 'Gx-%d.pth' % (epoch + 1)))
torch.save(Gz.state_dict(),
os.path.join(MODEL_PATH, 'Gz-%d.pth' % (epoch + 1)))
torch.save(Disc.state_dict(),
os.path.join(MODEL_PATH, 'Dict-%d.pth' % (epoch + 1)))
def calc_gradient_penalty_hali(discriminator, real_data, fake_data, z1, z_enc1, z2, z_enc2, gp_lambda):
"""Calculate Gradient Penalty HALI Variant 1
Computes interpolates of all encodings before passing to the discriminator to account for gradients of encoder
:param Disc: Discriminator
:param real_data: real data
:param fake_data: fake data
:param z1: empirical z1
:param z_enc1: encoded (fake) z1
:param z2: empirical z2
:param z_enc2: encoded (fake) z2
:param gp_lambda: chosen lambda for gradient penalty
:return gradient_penalty : the penalty
"""
assert real_data.size(0) == fake_data.size(0)
alpha = torch.rand(real_data.size(0), 1, 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
alpha_z1 = torch.rand(z1.size(0), 1, 1, 1)
alpha_z1 = alpha_z1.expand(z1.size())
alpha_z1 = alpha_z1.cuda()
alpha_z2 = torch.rand(z2.size(0), 1, 1, 1)
alpha_z2 = alpha_z2.expand(z2.size())
alpha_z2 = alpha_z2.cuda()
interpolates = Variable(alpha * real_data + ((1 - alpha) * fake_data),
requires_grad=True)
interpolate_z1 = Variable(alpha_z1 * z_enc1 + ((1 - alpha_z1) * z1),
requires_grad=True)
interpolate_z2 = Variable(alpha_z2 * z_enc2 + ((1 - alpha_z2) * z2),
requires_grad=True)
disc_interpolates = discriminator(
interpolates, interpolate_z1, interpolate_z2)
gradients = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * gp_lambda
return gradient_penalty
def calc_gradient_penalty_ali(discriminator, real_data, fake_data, z, z_enc,
gp_lambda):
"""Calculate Gradient Penalty HALI Variant 1
Computes interpolates of all encodings before passing to the discriminator to account for gradients of encoder
:param Disc: Discriminator
:param real_data: real data
:param fake_data: fake data
:param z: empirical z
:param z_enc: encoded (fake) z
:return gradient_penalty : the penalty
"""
assert real_data.size(0) == fake_data.size(0)
alpha = torch.rand(real_data.size(0), 1, 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
alpha_z = torch.rand(z.size(0), 1, 1, 1)
alpha_z = alpha_z.expand(z.size())
alpha_z = alpha_z.cuda()
interpolates = Variable(alpha * real_data + ((1 - alpha) * fake_data),
requires_grad=True)
interpolate_z = Variable(alpha_z * z_enc + ((1 - alpha_z) * z),
requires_grad=True)
disc_interpolates = discriminator(interpolates, interpolate_z)
gradients = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * gp_lambda
return gradient_penalty
def calc_gradient_penalty2_hali(discriminator, real_data, fake_data, encoder1, encoder2, gp_lambda):
"""Calculate Gradient Penalty HALI Variant 2.
Passes interpolates through both encoders to the discriminator to account for encoding
:param Disc: Discriminator
:param real_data: real data
:param fake_data: fake data
:param encoder1: encoder for z1
:param encoder2: encoder for z2
:param gp_lambda: chosen lambda for gradient penalty
:return gradient_penalty : the penalty
"""
assert real_data.size(0) == fake_data.size(0)
alpha = torch.rand(real_data.size(0), 1, 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
interpolates = Variable(alpha * real_data + ((1 - alpha) * fake_data),
requires_grad=True)
enc1 = encoder1(interpolates)
interpolate_z1 = reparameterize(enc1)
enc2 = encoder2(interpolate_z1)
interpolate_z2 = reparameterize(enc2)
disc_interpolates = discriminator(
interpolates, interpolate_z1.detach(), interpolate_z2.detach())
gradients = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * gp_lambda
return gradient_penalty
def calc_gradient_penalty2_ali(discriminator, real_data, fake_data, encoder, gp_lambda):
"""Calculate Gradient Penalty for ALI feeds interpolates through encoder to discriminator.
:param Disc: Discriminator
:param real_data: real data
:param fake_data: fake data
:param encoder: encoder
:param gp_lambda: chosen lambda for gradient penalty
:return gradient_penalty : the penalty
"""
assert real_data.size(0) == fake_data.size(0)
alpha = torch.rand(real_data.size(0), 1, 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
interpolates = Variable(alpha * real_data + ((1 - alpha) * fake_data),
requires_grad=True)
z_enc = encoder(interpolates)
interpolate_z = reparameterize(z_enc)
disc_interpolates = discriminator(interpolates, interpolate_z.detach())
gradients = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * gp_lambda
return gradient_penalty
def reparameterize(encoded):
"""Reparameterization trick of:
# <NAME>. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
:param Disc: Discriminator
:return reparameterized data
"""
zd = encoded.size(1) // 2
mu, logvar = encoded[:, :zd], encoded[:, zd:]
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
<file_sep>from models.encoders import CVAE
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
import torch
import torch.nn as nn
class DAMICClustering(nn.Module):
"""
Clustering network for DAMIC
Each cluster is reprensented by an autoencoder
A convolutional network map an input to a specific autoencoder
See 'Deep clustering based on a mixture of autoencoders' by Chazan, Gannot and Goldberger
"""
def __init__(self, n_clusters):
super().__init__()
self.n_clusters = n_clusters
self.ae1 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae2 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae3 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae4 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae5 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae6 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae7 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae8 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae9 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae10 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae11 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae12 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae13 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae14 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae15 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae16 = CVAE(latent_dim=10).apply(self.init_weights)
self.ae17 = CVAE(latent_dim=10).apply(self.init_weights)
self.clustering_network = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), # input is b, 3, 32, 32
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1), # input is b, 3, 32, 32
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1), # b, 32, 8,8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=4, stride=2, padding=1), # b, 32, 8,8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=1), # b, 16,4,4
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=4, stride=2, padding=1), # b, 16,4,4
nn.BatchNorm2d(16),
nn.ReLU()
).apply(self.init_weights)
self.output_layer_conv_net = nn.Linear(16 * 4 * 4, 17)
torch.nn.init.kaiming_uniform_(self.output_layer_conv_net.weight)
self.softmax_layer = nn.Softmax(dim=1)
def init_weights(self, m):
# Initialize weights using Kaiming algorithm
if type(m) == nn.Conv2d or type(m) == nn.Linear or type(m) == nn.ConvTranspose2d:
torch.nn.init.kaiming_uniform_(m.weight)
m.bias.data.fill_(0.01)
def forward(self, inputs):
output = self.clustering_network(inputs)
output = output.view(-1, 16 * 4 * 4)
output = self.output_layer_conv_net(output)
return self.softmax_layer(output)
def train_damic(self, inputs, batch_size):
output_cluster_network = self(inputs)
input_reconstruction_of_each_encoders = torch.FloatTensor(17, batch_size, 3, 32, 32).zero_()
input_reconstruction_of_each_encoders[0], _, _ = self.ae1(inputs)
input_reconstruction_of_each_encoders[1], _, _ = self.ae2(inputs)
input_reconstruction_of_each_encoders[2], _, _ = self.ae3(inputs)
input_reconstruction_of_each_encoders[3], _, _ = self.ae4(inputs)
input_reconstruction_of_each_encoders[4], _, _ = self.ae5(inputs)
input_reconstruction_of_each_encoders[5], _, _ = self.ae6(inputs)
input_reconstruction_of_each_encoders[6], _, _ = self.ae7(inputs)
input_reconstruction_of_each_encoders[7], _, _ = self.ae8(inputs)
input_reconstruction_of_each_encoders[8], _, _ = self.ae9(inputs)
input_reconstruction_of_each_encoders[9], _, _ = self.ae10(inputs)
input_reconstruction_of_each_encoders[10], _, _ = self.ae11(inputs)
input_reconstruction_of_each_encoders[11], _, _ = self.ae12(inputs)
input_reconstruction_of_each_encoders[12], _, _ = self.ae13(inputs)
input_reconstruction_of_each_encoders[13], _, _ = self.ae14(inputs)
input_reconstruction_of_each_encoders[14], _, _ = self.ae15(inputs)
input_reconstruction_of_each_encoders[15], _, _ = self.ae16(inputs)
input_reconstruction_of_each_encoders[16], _, _ = self.ae17(inputs)
return output_cluster_network, input_reconstruction_of_each_encoders
class KMeansClustering:
"""clustering with K-means"""
def __init__(self, n_clusters, seed):
self.kmeans = KMeans(init="k-means++", n_clusters=n_clusters, n_init=5, max_iter=1000, random_state=seed,
n_jobs=-1)
self.n_clusters = n_clusters
def train(self, data):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
self.kmeans.fit(data)
return self.kmeans
def predict_cluster(self, data):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
return self.kmeans.predict(data)
class GMMClustering:
"""clustering with gaussian mixture model"""
def __init__(self, n_clusters, seed):
self.gmm = GaussianMixture(n_components=n_clusters, random_state=seed)
self.n_clusters = n_clusters
def train(self, data):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
self.gmm.fit(data)
return self.gmm
def predict_cluster(self, data):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
return self.gmm.predict(data)
class RBFClustering:
"""clustering with Nonlinear SVM"""
def __init__(self, seed):
self.svm = SVC(C=4e-4, random_state=seed)
self.best_c = 0
def train(self, data, labels):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
self.svm.fit(data, labels)
return self.svm
def predict_cluster(self, data):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
return self.svm.predict(data)
class SVMClustering:
"""clustering with LinearSVC"""
def __init__(self, seed):
self.svm = LinearSVC(C=4e-4, random_state=seed)
self.best_c = 0
def train(self, data, labels):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
self.svm.fit(data, labels)
return self.svm
def predict_cluster(self, data):
if type(data) is torch.Tensor:
data = data.detach().cpu().numpy()
return self.svm.predict(data)
def get_best_c(self, data, targets):
for log_C in np.linspace(-20, 20, 50):
if log_C < -10 or log_C > 0:
continue
C = np.exp(log_C)
svm = LinearSVC(C=C)
svm.fit(random_batch, random_targets.ravel())
error_rate = 1 - np.mean([
svm.score(validation_embeddings[1000 * i:1000 * (i + 1)],
np.array(validation_targets[1000 * i:1000 * (i + 1)]).ravel())
for i in range(10)
])
if error_rate < best_error_rate:
best_error_rate = error_rate
best_C = C
print('C = {}, validation error rate = {} '.format(C, error_rate) +
'(best is {}, {})'.format(best_C, best_error_rate))
return best_C
<file_sep>import torch
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(
torch.mv(torch.t(w.view(height, -1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class Discriminator(nn.Module):
def __init__(self, ch=3, zd=256):
super(Discriminator, self).__init__()
self.Dx = DiscriminatorX(zd=zd, ch=ch)
self.Dxz = DiscriminatorXZ(zd=zd)
def forward(self, x_input, z_input):
eps = 1e-12
noise = Variable(
(torch.Tensor(x_input.size()).normal_(0, 0.1 * 0.01))).cuda()
dx_out = self.Dx(x_input + noise)
noise = Variable(
(torch.Tensor(dx_out.size()).normal_(0, 0.1 * 0.01))).cuda()
d_out = self.Dxz(torch.cat((dx_out, z_input + noise), dim=1)) + eps
return d_out
class DiscriminatorX(nn.Module):
def __init__(self, zd=128, ch=1):
super().__init__()
self.c1 = SpectralNorm(nn.Conv2d(ch, zd // 4, 3, 2))
self.lr = nn.LeakyReLU(0.02)
self.c2 = SpectralNorm(nn.Conv2d(zd // 4, zd // 2, 3, 2))
self.c3 = SpectralNorm(nn.Conv2d(zd // 2, zd, 3, 1))
self.c4 = SpectralNorm(nn.Conv2d(zd, zd, 3, 1))
self.c5 = SpectralNorm(nn.Conv2d(zd, zd, 3, 1))
self.d = nn.Dropout2d(0.2)
def forward(self, x):
x = self.c1(x)
x = self.lr(x)
x = self.d(x)
x = self.c2(x)
x = self.lr(x)
x = self.d(x)
x = self.c3(x)
x = self.lr(x)
x = self.c4(x)
x = self.lr(x)
x = self.d(x)
x = self.c5(x)
x = self.lr(x)
x = self.d(x)
return x
class DiscriminatorXZ(nn.Module):
def __init__(self, zd=128):
super().__init__()
self.net = nn.Sequential(
SpectralNorm(nn.Conv2d(zd * 2, zd * 2, 1, 1)),
nn.LeakyReLU(0.02),
nn.Dropout2d(0.2),
SpectralNorm(nn.Conv2d(zd * 2, zd, 1, 1)),
nn.LeakyReLU(0.02),
nn.Dropout2d(0.2),
SpectralNorm(nn.Conv2d(zd, 1, 1, 1)),
)
def forward(self, x):
return self.net(x)
class GeneratorZ(nn.Module):
def __init__(self, zd=128, ch=1):
super().__init__()
self.c1 = SpectralNorm(nn.Conv2d(ch, zd // 8, 3, 2))
self.bn1 = nn.BatchNorm2d(zd // 8)
self.lr = nn.LeakyReLU(0.02)
self.c2 = SpectralNorm(nn.Conv2d(zd // 8, zd // 4, 3, 2))
self.bn2 = nn.BatchNorm2d(zd // 4)
self.c3 = SpectralNorm(nn.Conv2d(zd // 4, zd // 2, 3, 1))
self.bn3 = nn.BatchNorm2d(zd // 2)
self.c4 = SpectralNorm(nn.Conv2d(zd // 2, zd, 3, 1))
self.bn4 = nn.BatchNorm2d(zd)
self.c5 = SpectralNorm(nn.Conv2d(zd, zd * 2, 3, 1))
self.bn5 = nn.BatchNorm2d(zd * 2)
self.c6 = SpectralNorm(nn.Conv2d(zd * 2, zd * 2, 3, 1))
self.c5 = nn.Conv2d(zd, zd * 2, 3, 1)
self.bn5 = nn.BatchNorm2d(zd * 2)
self.c6 = nn.Conv2d(zd * 2, zd * 2, 3, 1)
def forward(self, x):
x = self.c1(x)
x = self.bn1(x)
x = self.lr(x)
x = self.c2(x)
x = self.bn2(x)
x = self.lr(x)
x = self.c3(x)
x = self.bn3(x)
x = self.lr(x)
x = self.c4(x)
x = self.bn4(x)
x = self.lr(x)
x = self.c5(x)
return x
class GeneratorX(nn.Module):
def __init__(self, zd=128, ch=1):
super().__init__()
self.conv1 = SpectralNorm(nn.ConvTranspose2d(zd, zd, 3, 1))
self.bn1 = nn.BatchNorm2d(zd)
self.rl = nn.LeakyReLU(0.02)
self.conv2 = SpectralNorm(nn.ConvTranspose2d(zd, zd // 2, 3, 2))
self.bn2 = nn.BatchNorm2d(zd // 2)
# nn.LeakyReLU(0.02),
self.conv3 = SpectralNorm(nn.ConvTranspose2d(zd // 2, zd // 4, 3, 2))
self.bn3 = nn.BatchNorm2d(zd // 4)
# nn.LeakyReLU(0.02),
self.conv4 = SpectralNorm(nn.ConvTranspose2d(zd // 4, zd // 4, 3, 2))
self.bn4 = nn.BatchNorm2d(zd // 4)
# nn.LeakyReLU(0.02),
self.conv5 = SpectralNorm(nn.ConvTranspose2d(zd // 4, ch, 2, 1))
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.rl(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.rl(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.rl(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.rl(x)
x = self.conv5(x)
x = self.tanh(x)
return x
<file_sep>import numpy as np
from sklearn.svm import SVC
class SVMClassifier:
"""
SVM classifier for multiclass classification
"""
def __init__(self, C=1, kernel='rbf', gamma=.5):
super(SVMClassifier, self).__init__()
self.clf = SVC(C=C, kernel=kernel, gamma=gamma, class_weight='balanced')
def train_classifier(self, train_X, train_y):
"""
train_X: low dim representation of images, size is (n_samples, dim)
train_y: target class of image, size is (n_samples)
"""
train_X = train_X.cpu().numpy()
self.clf.fit(train_X, train_y)
y_pred = self.clf.predict(train_X)
return y_pred
def validate_classifier(self, valid_X):
"""
valid_X: low dim representation of images, size is (n_samples, dim)
"""
valid_X = valid_X.cpu().numpy()
y_pred = self.clf.predict(valid_X)
return y_pred
<file_sep>from comet_ml import OfflineExperiment
import os
import sys
import json
import torch
import argparse
import numpy as np
from time import time
from torch.utils import data
sys.path.append("../")
from models.encoders import *
from models.svm_classifier import SVMClassifier
from utils.model_utils import encode_dataset
from utils.utils import compute_metrics, __compute_metrics
from utils.constants import Constants
from utils.dataset import HoromaDataset
def main(datapath, encoding_model, batch_size, n_epochs, lr, device, train_split, valid_split, train_labeled_split,
experiment, path_to_model=None):
"""
:param datapath: path to the directory containing the samples
:param encoding_model: which encoding model to use, convolutional, variational or simple autoencoders.
:param batch_size: batch size
:param n_epochs: number of epochs
:param lr: learning rate for unsupervised part
:param train_split: dataset used for unsupervised part
:param valid_split: valid split for SVM
:param train_labeled_split: train split for SVM
:param device: use CUDA device if available else CPU .
:param experiment: track experiment
:param path_to_model: path to the directory containing saved models.
"""
full_dataset = HoromaDataset(datapath, split=train_split, flattened=flattened)
train_labeled = HoromaDataset(
datapath, split=train_labeled_split, flattened=flattened)
# Validation data(labeled) for the supervised task(Classification)
valid_data = HoromaDataset(
datapath, split=valid_split, flattened=flattened)
# split the full_dataset(labeled and unlabeled train data) into train and valid for autoencoder pre-training
n_train = int(0.90 * len(full_dataset))
n_valid = len(full_dataset) - n_train
train_dataset, valid_dataset = data.random_split(full_dataset, [n_train, n_valid])
# Train and apply encoding model
train_enc, encoding_model = encoding_model.fit(train_dataset, valid_dataset, batch_size=batch_size, n_epochs=n_epochs,
lr=lr, device=device, experiment=experiment)
# extract latent representation of train_labeled data
train_labeled_enc = encode_dataset(
encoding_model, train_labeled, batch_size, device, is_unlabeled=False)
print("Train labeled data encoding complete.\n")
# extract latent representation of validation data
valid_enc = encode_dataset(
encoding_model, valid_data, batch_size, device, is_unlabeled=False)
print("validation data encoding complete.\n")
start_time = time()
# Train SVM classifier
svm_classifier = SVMClassifier()
print("Traing SVM classifier...\n")
pred_train_y = svm_classifier.train_classifier(
train_labeled_enc, train_labeled.targets)
print("Computing metrics for train data\n")
train_accuracy, train_f1, __train_f1 = __compute_metrics(
train_labeled.targets, pred_train_y)
print("Prediction for validation data. \n")
pred_valid_y = svm_classifier.validate_classifier(
valid_enc)
print("Computing metrics for validation data\n")
valid_accuracy, valid_f1, __valid_f1 = __compute_metrics(
valid_data.targets, pred_valid_y)
print("Done in {:.2f} sec.".format(time() - start_time))
print(
"Train : Accuracy: {:.2f} | F1: {:.2f}".format(train_accuracy * 100, train_f1 * 100))
print(
"Train : F1 score for each class: {}".format(__train_f1 * 100))
print(
"Validation : Accuracy: {:.2f} | F1: {:.2f}".format(valid_accuracy * 100, valid_f1 * 100))
print(
"Validation : F1 score for each class: {}".format(__valid_f1 * 100))
experiment.log_metric('Train accuracy', train_accuracy)
experiment.log_metric('Train f1-score', train_f1)
experiment.log_metric('Validation accuracy', valid_accuracy)
experiment.log_metric('Validation f1-score', valid_f1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default=Constants.DATAPATH,
help="Path to dataset folder")
parser.add_argument("--encoder_path", type=str, default=None)
parser.add_argument("--config", type=str, default="CAE_SVM",
help="To select configuration from config.json")
args = parser.parse_args()
config_key = args.config
datapath = args.datapath
path_to_model = args.encoder_path
with open(Constants.CONFIG_PATH, 'r') as f:
configuration = json.load(f)[config_key]
# Parse configuration file
encoding_model = configuration['enc_model']
batch_size = configuration['batch_size']
seed = configuration['seed']
n_epochs = configuration['n_epochs']
lr = configuration['lr']
train_split = configuration['train_split']
valid_split = configuration['valid_split']
train_labeled_split = configuration['train_labeled_split']
latent_dim = configuration['latent_dim']
flattened = False # Default
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Set all seeds for full reproducibility
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set up Comet Experiment tracking
experiment = OfflineExperiment(
"z15Um8oxWZwiXQXZxZKGh48cl", workspace='swechhachoudhary', offline_directory="../swechhas_experiments")
experiment.set_name(
name=args.config + "_dim={}_overlapped={}".format(latent_dim, train_split))
experiment.log_parameters(configuration)
if encoding_model == 'pca':
encoding_model = PCAEncoder(seed)
flattened = True
elif encoding_model == 'vae':
encoding_model = VAE(latent_dim=latent_dim).to(device)
flattened = True
elif encoding_model == "ae":
encoding_model = AE(latent_dim=latent_dim).to(device)
flattened = True
elif encoding_model == "cae":
encoding_model = CAE(latent_dim=latent_dim).to(device)
flattened = False
elif encoding_model == "cvae":
encoding_model = CVAE(latent_dim=latent_dim).to(device)
flattened = False
elif encoding_model == "convae":
encoding_model = ConvAE(latent_dim=latent_dim).to(device)
flattened = False
else:
print('No encoding model specified. Using PCA.')
encoding_model = PCAEncoder(seed)
# Initiate experiment
main(datapath, encoding_model, batch_size, n_epochs, lr, device, train_split, valid_split, train_labeled_split,
experiment, path_to_model=path_to_model)
<file_sep>from comet_ml import OfflineExperiment
import json
import argparse
import torch
import os
import sys
import numpy as np
from torch.utils.data import DataLoader
sys.path.append("../")
from models.encoders import *
from models.mlp_classifier import MLPClassifier
from utils.constants import Constants
from utils.dataset import HoromaDataset
from utils.model_utils import train_semi_supervised_network
def main(datapath, encoding_model, classifier_model, batch_size, n_epochs, lr_unsup, lr_sup, device,
train_unlabeled_split, valid_split, train_labeled_split, patience,
experiment, path_to_model=None):
"""
:param datapath: path to the directory containing the samples
:param classifier_model: which classifier model to use
:param encoding_model: which encoding model to use, convolutional, variational or simple autoencoders.
:param batch_size: batch size
:param n_epochs: number of epochs
:param lr_unsup: learning rate for unsupervised part
:param lr_sup: learning rate for supervised part
:param train_unlabeled_split: unlabeled data used for unsupervised part
:param valid_split: valid split for MLP
:param train_labeled_split: train split for MLP
:param patience: patience for early stopping
:param device: use CUDA device if available else CPU .
:param experiment: track experiment
:param path_to_model: path to the directory containing saved models.
"""
train_unlabeled = HoromaDataset(datapath, split=train_unlabeled_split)
train_labeled = HoromaDataset(datapath, split=train_labeled_split)
valid_data = HoromaDataset(datapath, split=valid_split)
valid_loader = DataLoader(valid_data, batch_size=batch_size)
n_labeled_batch = len(train_labeled) // batch_size
n_unlabeled_batch = n_labeled_batch
# Semisupervised Training
train_semi_supervised_network(encoding_model, classifier_model, train_unlabeled, train_labeled, valid_loader,
n_epochs, batch_size, lr_unsup, lr_sup, device, n_labeled_batch, n_unlabeled_batch,
patience, experiment)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default=Constants.DATAPATH,
help="Path to dataset folder")
parser.add_argument("--encoder_path", type=str, default='results/best_model1.pth')
parser.add_argument("--config", type=str, default="CAE_MLP",
help="To select configuration from config.json")
args = parser.parse_args()
config_key = args.config
datapath = args.datapath
path_to_model = args.encoder_path
with open(Constants.CONFIG_PATH, 'r') as f:
configuration = json.load(f)[config_key]
encoding_model = configuration['enc_model']
classifier_model = configuration["classifier_model"]
batch_size = configuration['batch_size']
seed = configuration['seed']
n_epochs = configuration['n_epochs']
lr_unsup = configuration['lr_unsup']
lr_sup = configuration['lr_sup']
patience = configuration['patience']
train_unlabeled_split = configuration['train_unlabeled_split']
valid_split = configuration['valid_split']
train_labeled_split = configuration['train_labeled_split']
latent_dim = configuration['latent_dim']
hidden_size = configuration['hidden_size']
n_layers = configuration['n_layers']
flattened = False
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Set all seeds for full reproducibility
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set up Comet Experiment tracking # Replace this with appropriate comet
# workspaces
experiment = OfflineExperiment(
"z15Um8oxWZwiXQXZxZKGh48cl", workspace='swechhachoudhary', offline_directory="../swechhas_experiments")
experiment.set_name(
name=args.config + "_dim={}_split={}".format(latent_dim, train_unlabeled_split))
experiment.log_parameters(configuration)
if encoding_model == 'pca':
encoding_model = PCAEncoder(seed)
flattened = True
elif encoding_model == 'vae':
encoding_model = VAE(latent_dim=latent_dim).to(device)
flattened = True
elif encoding_model == "ae":
encoding_model = AE(latent_dim=latent_dim).to(device)
flattened = True
elif encoding_model == "cae":
encoding_model = CAE(latent_dim=latent_dim).to(device)
flattened = False
elif encoding_model == "cvae":
encoding_model = CVAE(latent_dim=latent_dim).to(device)
flattened = False
elif encoding_model == "convae":
encoding_model = ConvAE(latent_dim=latent_dim).to(device)
flattened = False
else:
print('No encoding model specified. Using PCA.')
encoding_model = PCAEncoder(seed)
if classifier_model == "MLPClassifier":
classifier_model = MLPClassifier(latent_dim=latent_dim, hidden_size=hidden_size, n_layers=n_layers).to(device)
# Initiate experiment
main(datapath, encoding_model, classifier_model, batch_size, n_epochs, lr_unsup, lr_sup, device,
train_unlabeled_split, valid_split, train_labeled_split, patience,
experiment, path_to_model=path_to_model)
<file_sep>class Constants:
DATAPATH = "/rap/jvb-000-aa/COURS2019/etudiants/data/horoma"
_DATAPATH = "../data/horoma/"
REGION_ID_PATH = DATAPATH + "valid_regions_id.txt"
REGION_ID_PATH_OVERLAPPED = DATAPATH + "valid_overlapped_regions_id.txt"
CONFIG_PATH = "../configs/config.json"
PATH_TO_MODEL = "final_models/"
<file_sep>from comet_ml import OfflineExperiment
import json
import argparse
import torch
import os
import sys
sys.path.append("../")
from models import *
from models.clustering import *
from utils.ali_utils import *
from utils.utils import *
from utils.utils import load_datasets
from utils.constants import Constants
from utils.dataset import HoromaDataset
def main(datapath, configs, experiment):
"""
:param datapath: path to the directory containing the samples
:param configs: dictionary containing hyperparameters for training.
:param experiment: comet ml experiment object for logging results
"""
train_split = configs['train_split']
valid_split = configs['valid_split']
train_labeled_split = configs['train_labeled_split']
train = HoromaDataset(datapath, split=train_split, subset=None,
flattened=False)
labeled = HoromaDataset(datapath, split=train_labeled_split, subset=None,
flattened=False)
valid_data = HoromaDataset(
datapath, split=valid_split, subset=None, flattened=False)
print("Shape of training set: ", train.data.shape)
print("Shape of validation set: ", valid_data.data.shape)
if configs['encode']:
if configs['enc_model'] == "hali":
Gx1, Gx2, Gz1, Gz2, Disc, optim_g, optim_d, train_loader, cuda, configs = initialize_hali(
configs, train)
training_loop_hali(Gz1, Gz2, Gx1, Gx2, Disc, optim_d,
optim_g, train_loader, configs, experiment, cuda)
else:
Gx, Gz, Disc, optim_g, optim_d, train_loader, cuda, configs = initialize_ali(
configs, train)
training_loop_ali(Gz, Gx, Disc, optim_d, optim_g,
train_loader, configs, experiment, cuda)
if configs['cluster']:
if configs['enc_model'] == "hali":
best_f1, best_acc, best_model = get_results_hali(
configs, experiment, train, labeled, valid_data)
else:
best_f1, best_acc, best_model = get_results_ali(
configs, experiment, train, labeled, valid_data)
experiment.log_metric('best_accuracy', best_acc)
experiment.log_metric('best_f1-score', best_f1)
experiment.log_metric('best_model_epoch', best_model)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default=Constants.DATAPATH,
help="Path to dataset folder")
parser.add_argument("--encoder_path", type=str, default=None)
parser.add_argument("--config", type=str, default="HALI",
help="To select configuration from config.json")
args = parser.parse_args()
config_key = args.config
datapath = args.datapath
path_to_model = args.encoder_path
with open(Constants.CONFIG_PATH, 'r') as f:
configuration = json.load(f)[config_key]
# Parse configuration file
batch_size = configuration['batch_size']
seed = configuration['seed']
n_epochs = configuration['n_epochs']
# Set all seeds for full reproducibility
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
latent_dim = configuration['Zdim']
if not os.path.exists('experiments'):
print('mkdir ', 'experiments')
os.mkdir('experiments')
if configuration['encode']:
experiment = OfflineExperiment(project_name="ali", workspace='timothynest',
offline_directory=str('../experiments/' + configuration['experiment']))
elif configuration['cluster']:
experiment = OfflineExperiment(project_name="ali", workspace='timothynest', offline_directory=str(
'../experiments/' + configuration['experiment'] + '/cluster'))
experiment.set_name(name=configuration['experiment'])
experiment.log_parameters(configuration)
experiment.add_tag(configuration['experiment'])
# Initiate experiment
main(datapath, configuration, experiment)
<file_sep>import numpy as np
from sklearn.metrics import recall_score, f1_score
# Note : this code comes from OM Signal block 2 baseline
# Some changes were made to be able to use this for Horoma
def scorePerformance(treeClass_pred, treeClass_true):
"""
Computes the combined multitask performance score. The 3 regression tasks are individually scored using Kendalls
correlation coeffficient. the user classification task is scored according to macro averaged recall,
with an adjustment for chance level. All performances are clipped at 0.0, so that zero indicates chance
or worse performance, and 1.0 indicates perfect performance. The individual performances are then combined by taking
the geometric mean.
:param treeClass_pred: 1D int32 numpy array. containing the predicted tree class label
:param treeClass_true: 1D int32 numpy array. containing the true tree class label
:return: The combined performance score on all tasks; 0.0 means at least one task has chance level performance or worse, 1.0 means all tasks are solved perfectly.
The individual task performance scores are also returned
"""
numElmts = None
# Input checking
if treeClass_true is not None:
assert isinstance(treeClass_pred, np.ndarray)
assert len(treeClass_pred.shape) == 1
assert treeClass_pred.dtype == np.int32
assert isinstance(treeClass_true, np.ndarray)
assert len(treeClass_true.shape) == 1
assert treeClass_true.dtype == np.int32
assert (len(treeClass_pred) == len(treeClass_true))
if numElmts is not None:
assert (len(treeClass_pred) == numElmts) and (
len(treeClass_true) == numElmts)
else:
numElmts = len(treeClass_pred)
if numElmts is None:
return 0.0, 0.0, 0.0, 0.0, 0.0
numVal = 0.0
# Accuracy is computed with macro averaged recall so that accuracy is computed as though the classes
# were balanced, even if they are not. Note that the training, validation and testing sets are balanced as given.
# Unbalanced classes would only be and issue if a new train/validation split is created.
# Any accuracy value worse than random chance will be clipped at zero.
if treeClass_true is not None:
numVal += 1.0
treeClassAccuracy = recall_score(treeClass_true, treeClass_pred, average='macro')
adjustementTerm = 1.0 / len(np.unique(treeClass_true))
treeClassAccuracy = (treeClassAccuracy - adjustementTerm) / \
(1 - adjustementTerm)
if treeClassAccuracy < 0 or np.isnan(treeClassAccuracy):
treeClassAccuracy = 0.0
treeClassAccuracyRep = treeClassAccuracy
treeClassF1 = f1_score(treeClass_true, treeClass_pred, average="weighted")
else:
treeClassAccuracy = 1.0
treeClassAccuracyRep = 0.0
# Compute the final performance score as the geometric mean of the individual task performances.
# A high geometric mean ensures that there are no tasks with very poor performance that are masked by good
# performance on the other tasks. If any task has chance performance or worse,
# the overall performance will be zero. If all tasks are perfectly solved,
# the overall performance will be 1.
combinedPerformanceScore = np.power(
treeClassAccuracy,
1.0 / max(1.0, numVal)
)
return (
combinedPerformanceScore,
treeClassAccuracyRep,
treeClassF1
)
<file_sep># IFT6759 Winter 2019
## Horoma Project Block 3
Authors:
<NAME>,
<NAME>,
<NAME>
Project Organization
------------
├── configs <- contains config files for the project
├── data <- Datasets used accross the project
├── evaluation <- contains eval.py
├── models <- all the models architecture
├── trainers <- training scripts for all the models
├── utils <- utilities used int the project
├── run.pbs <- script for training on cluster
└── README.md <- The top-level README for developers using this project.
--------
## To run scripts
Add the following command in run.pbs for training
* TransformerNet model
```
s_exec python train_transformer_net.py
```
* CAE-SVM model
```
s_exec python svm_trainer.py
```
* CAE-MLP model
```
s_exec python semisupervised_trainer.py
```
* HALI model
```
s_exec python ali_train.py --config HALI
```
* ALI model
```
s_exec python ali_train.py --config ALI
```
* DAMIC model
```
s_exec python train_damic.py
```
* CVAE model
```
s_exec python train.py --config CVAE_BASE --encoder_path experiment_models/cvae_base.pth
```
* CONV_AE model
```
s_exec python train.py --config CONV_AE --encoder_path experiment_models/conv_ae.pth
```
* Neural Rendering Model
```
s_exec python train_nrm.py
```
--------
DAMIC Trainer Usage
------------
```
usage: trainers/train_damic.py [-h] [--datapath] [--config]
Start a training for a DAMIC model
optional arguments:
-h, --help show this help message and exit
--datapath which config to load within config.json (CONV_AE, DAMIC or CVAE_BASE)
default: Constants._DATAPATH
--config which config to load within config.json (CONV_AE, DAMIC or CVAE_BASE)
default: DAMIC
-d DEVICE, --device DEVICE
indices of GPUs to enable (default: all)
```
Transformer Net Trainer Usage
------------
```
usage: trainers/train_transformer_net.py [-h] [--batch-size] [--eval-batch-size] [--iters] [--lr] [--momentum]
[--alpha] [--ema_decay] [--xi] [--eps] [--cr_type] [--ip] [--workers] [--seed] [--targets] [--data_dir]
[--checkpoint_dir] [--log-interval] [--chkpt-freq] [--no-entropy] [--reg-vat-var]
Start a training for a Transformer Net model
optional arguments:
-h, --help show this help message and exit
--batch-size input batch size for training
default: 16
--eval-batch-size input batch size for evaluation
default: 8
--iters number of iterations to train
default: 10 000
--lr learning rate
default: 0.001
--momentum SGD momentum
default: 0.9
--alpha regularization coefficient
default: 0.01
--ema_decay decay for exponential moving average
default: 0.999
--xi hyperparameter of VAT
default: 5.0
--eps hyperparameter of VAT
default: 1.0
--workers number of CPU
default: 8
--seed random seed
default: 1
--targets list of targets to use for training
default: ['tree_class']
--data_dir directory where to find the data
default:"/rap/jvb-000-aa/COURS2019/etudiants/data/horoma"
--checkpoint_dir directory where to checkpoints the models
default:"./transformer_net_models/"
--log-interval how many batches to wait before logging training status
default: 10
--chkpt-freq how many batches to wait before performing checkpointing
default: 100
--no-entropy enables Entropy based regularization
default: False
--reg-vat-var Assumed variance of the predicted Gaussian for regression tasks
default: 0.1
```
ALI/HALI Usage
------------
```
usage: trainers/ali_train.py [-h] [--datapath] [--config]
To start training an ALI-based model, be sure to specify encode=true and cluster=false.
This will save models for each epoch of unlabeled training in the experiment folder.
To evaluate, specify encode=false and cluster=true.
optional arguments:
-h, --help show this help message and exit
--config which config to load within config.json (HALI, ALI)
default: DAMIC
```
## Github conventions
* Each feature must have his own branch for development
* git checkout -b nameOfTheNewBranch
* When changes are made, push is only made to the feature's branch
* git add .
* git commit -m "Relevant message regarding the changes"
* git checkout master
* git pull --rebase
* git checkout nameOfTheNewBranch
* git merge master
* Fix conflicts if there is any
* git push origin nameOfTheNewBranch
* Once the changes in a personal branch are ready, do a pull request for the master branch
* go to the github page of the project https://github.com/swechhachoudhary/ift6759-horoma
* select your branch using the drop down button
* click on create pull request
* put master branch as the head
* confirm the creation of the pull request
<file_sep>import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data import sampler
# Note : this code comes from OM Signal block 2 baseline
class InfiniteSampler(sampler.Sampler):
def __init__(self, num_samples):
self.num_samples = num_samples
def __iter__(self):
while True:
order = np.random.permutation(self.num_samples)
for i in range(self.num_samples):
yield order[i]
def __len__(self):
return None
def get_iters(
train_labeled_dataset, train_unlabeled_dataset, valid_dataset,
test_dataset=None, l_batch_size=32, ul_batch_size=128,
val_batch_size=256, workers=8):
data_iterators = {
'labeled': iter(DataLoader(
train_labeled_dataset,
batch_size=l_batch_size, num_workers=workers,
sampler=InfiniteSampler(len(train_labeled_dataset)),
)) if train_labeled_dataset is not None else None,
'unlabeled': iter(DataLoader(
train_unlabeled_dataset,
batch_size=ul_batch_size, num_workers=workers,
sampler=InfiniteSampler(len(train_unlabeled_dataset)),
)) if train_unlabeled_dataset is not None else None,
'val': DataLoader(
valid_dataset, batch_size=val_batch_size,
num_workers=workers, shuffle=False
) if valid_dataset is not None else None,
'test': DataLoader(
test_dataset, batch_size=val_batch_size,
num_workers=workers, shuffle=False
) if test_dataset is not None else None,
}
return data_iterators
<file_sep>import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.scoring_function as scoreF
import utils.evaluator as evaluator
import utils.checkpoint_utils as checkpoint_utils
# Note : this code comes from OM Signal block 2 baseline
# Some changes were made to be able to use this for Horoma
class Trainer():
def __init__(
self, args, device, criterion_dict, target_labels, weight=None,
target_vat_dict=None, target_entropy_dict=None, init_iter=0):
self.args = args
self.init_iter = init_iter
self.device = device
self.target_labels = target_labels
self.criterion_dict = criterion_dict
self.target_entropy_dict = target_entropy_dict
self.target_vat_dict = target_vat_dict
self.criterion = [
self.criterion_dict[a] for a in self.target_labels
]
self.entropy = [
None if self.target_entropy_dict is None else self.target_entropy_dict[a]
for a in self.target_labels
]
self.vat_type = [
None if self.target_vat_dict is None else self.target_vat_dict[a]
for a in self.target_labels
]
self.is_entropy_based = False
for a in self.entropy:
if a is not None:
self.is_entropy_based = True
break
self.weight = weight
if self.weight is None:
self.weight = [1.0] * len(self.criterion)
assert len(self.weight) == len(self.criterion)
def train(self, model, data_iterators, optimizer, tb_prefix='exp/', prefix='neural_network', experiment=None):
sup_losses = [checkpoint_utils.AverageMeter() for _ in range(len(self.criterion) + 1)]
vat_losses = checkpoint_utils.AverageMeter()
perfs = [checkpoint_utils.AverageMeter() for _ in range(3)]
model.train()
criterion = self.criterion
score_param_index = evaluator.get_scoring_func_param_index(
self.target_labels)
weight = self.weight
if weight is None:
weight = [1.0] * len(criterion)
assert len(weight) == len(criterion)
tbIndex = 0
best_val_metric = 0.0
best_val_data = None
for k in tqdm(range(self.init_iter, self.args.iters)):
# reset
if k > 0 and k % self.args.log_interval == 0:
tbIndex += 1
val_mean_loss, val_metrics, _ = self.eval(
model,
data_iterators,
key='val'
)
if val_metrics[0] > best_val_metric:
best_val_metric = val_metrics[0]
best_val_data = val_metrics
filename = self.args.checkpoint_dir + prefix + '_{}.pt'.format('BestModel')
checkpoint_utils.set_path(filename)
checkpoint_utils.save_checkpoint(model, k, filename, optimizer)
experiment.log_metric('Train/Loss', sup_losses[0].avg, tbIndex)
experiment.log_metric('Valid/Loss', val_mean_loss, tbIndex)
experiment.log_metric('Train/treeClassAcc', perfs[1].avg, tbIndex)
experiment.log_metric('Valid/treeClassAcc', val_metrics[1], tbIndex)
experiment.log_metric('Train/treeClassF1', perfs[2].avg, tbIndex)
experiment.log_metric('Valid/treeClassF1', val_metrics[2], tbIndex)
train_metrics_avg = [p.avg for p in perfs]
train_metrics_val = [p.val for p in perfs]
print(
'Iteration: {}\t Loss {:.4f} ({:.4f})\t'.format(
k, sup_losses[0].val, sup_losses[0].avg
),
'Train_Metrics: {}\t Train_Metrics_AVG {}\t'.format(
train_metrics_val, train_metrics_avg
),
'Valid_Metrics: {}\t'.format(val_metrics),
'Best Perf: {} - {}\t'.format(best_val_metric, best_val_data)
)
for a in sup_losses:
a.reset()
for a in perfs:
a.reset()
vat_losses.reset()
# re-activate train mode
model.train()
x_l, y_l = next(data_iterators['labeled'])
if not isinstance(y_l, (list, tuple)):
y_l = [y_l]
x_ul = next(data_iterators['unlabeled'])
x_l, y_l = x_l.to(self.device), [t.to(self.device) for t in y_l]
if not isinstance(x_ul, (list, tuple)):
x_ul = x_ul.to(self.device)
else:
x_ul = [t.to(self.device) for t in x_ul]
optimizer.zero_grad()
if isinstance(x_ul, (list, tuple)):
x_ul = x_ul[0]
x_l = x_l.reshape([x_l.shape[0], 1, 3072])
outputs = model(x_l)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
reg_loss = 0.0
if self.is_entropy_based:
x_ul = x_ul.reshape([16, 1, 3072])
outputs_ul = model(x_ul)
if not isinstance(outputs_ul, (list, tuple)):
outputs_ul = [outputs_ul]
supervised_reg_losses = [
w * (0.0 if c is None else c(o))
for c, o, w in zip(self.entropy, outputs, self.weight)
]
unsupervised_reg_losses = [
w * (0.0 if c is None else c(o))
for c, o, w in zip(self.entropy, outputs_ul, self.weight)
]
reg_losses = [
((a / (x_l.size(0))) + self.args.alpha * (b / (x_ul.size(0)))) / (1.0 + self.args.alpha)
for a, b in zip(supervised_reg_losses, unsupervised_reg_losses)
]
reg_loss = sum(reg_losses)
y_l[0] = y_l[0].long()
supervised_losses = [
w * (c(o, gt) if o.size(1) == 1 else c(o, gt.squeeze(1)))
for c, o, gt, w in zip(criterion, outputs, y_l, weight)
]
supervised_loss = sum(supervised_losses)
treeClass_pred, treeClass_true = None, None
if score_param_index[0] is not None:
i = score_param_index[0]
_, pred_classes = torch.max(outputs[i], dim=1)
treeClass_true = y_l[i].view(-1).tolist()
treeClass_pred = pred_classes.view(-1).tolist()
treeClass_pred = np.array(treeClass_pred, dtype=np.int32)
treeClass_true = np.array(treeClass_true, dtype=np.int32)
loss = supervised_loss + reg_loss + self.args.alpha
loss.backward()
optimizer.step()
metrics = scoreF.scorePerformance(
treeClass_pred, treeClass_true
)
for i in range(len(supervised_losses)):
sup_losses[i + 1].update(
supervised_losses[i].item(),
x_l.shape[0]
)
sup_losses[0].update(
supervised_loss.item(),
x_l.shape[0]
)
for i in range(len(metrics)):
perfs[i].update(
metrics[i],
x_l.shape[0]
)
if k > 0 and k % self.args.chkpt_freq == 0:
filename = self.args.checkpoint_dir + \
prefix + '_{}.pt'.format(k)
checkpoint_utils.set_path(filename)
checkpoint_utils.save_checkpoint(model, k, filename, optimizer)
filename = self.args.checkpoint_dir + \
prefix + '_{}.pt'.format(self.args.iters)
checkpoint_utils.set_path(filename)
checkpoint_utils.save_checkpoint(model, self.args.iters, filename, optimizer)
def eval(self, model, data_iterators, key='val'):
assert key in ('val', 'test')
assert not (data_iterators[key] is None)
criterion = self.criterion
weight = self.weight
device = self.device
return evaluator.evaluate(model, device, data_iterators[key], self.target_labels, criterion, weight)
<file_sep>#!/bin/bash
# PROJECT_PATH will be changed to the master branch of your repo. Make sure it contains `evaluation/eval.py`
PROJECT_PATH='/rap/jvb-000-aa/COURS2019/etudiants/submissions/b3phot5/code'
RESULTS_DIR='.'#'/rap/jvb-000-aa/COURS2019/etudiants/ift6759/projects/horoma/evaluation'
DATA_DIR='/rap/jvb-000-aa/COURS2019/etudiants/data/horoma/'
cd $PROJECT_PATH/evaluation
s_exec python eval.py --dataset_dir=$DATA_DIR --results_dir=$RESULTS_DIR<file_sep>from time import time
from utils.dataset import HoromaDataset, OriginalHoromaDataset
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from utils.constants import Constants
from collections import Counter
from sklearn.utils.multiclass import unique_labels
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import sys
import os
def get_acc(output, label):
pred = torch.argmax(output, dim=1, keepdim=False)
correct = torch.mean((pred == label).type(torch.FloatTensor))
return correct
def get_split_indices(targets, overlapped=False, split=0.7):
"""
Compute splits by separating regions in labeled training and validation sets while also
to the extent possible preserving class balance. Priority is given to separating regions.
:param targets: Numpy array of labels.
:param overlapped: Choice of validation set.
:param split: Approximate percentage of data in labeled training set.
:return: training indices, validation indices
"""
if overlapped:
regions = np.loadtxt(Constants.REGION_ID_PATH_OVERLAPPED)
else:
regions = np.loadtxt(Constants.REGION_ID_PATH)
idx_map = _map_to_idx(regions)
region_map = _map_to_regions(regions, targets)
counts_map = _map_to_counts(regions, targets, idx_map)
train_indices = []
unique_targets, counts = np.unique(targets, return_counts=True)
n_train_remain = counts // (1 / split)
seen_regions = set([])
for target in unique_targets:
n_train = n_train_remain[target]
while n_train > 0:
region = _next_region_for_target(region_map, target, seen_regions)
seen_regions.add(region)
target_counts = counts_map[region]
n_train_remain -= target_counts
train_indices.extend(idx_map[region])
n_train = n_train_remain[target]
valid_indices = [i for i in range(len(targets)) if i not in train_indices]
return train_indices, valid_indices
def _next_region_for_target(region_map, target, seen_regions):
"""
Find an unexplored region which contains a sample for the given target.
:param region_map: Pre-computed dictionary of target --> associated regions.
:param target: Label in question.
:param seen_regions: Set of previously explored regions. We want to avoid returning these twice.
:return: Region id.
"""
while True:
region = region_map[target].pop()
if region not in seen_regions:
break
return region
def _map_to_counts(regions, targets, idx_map):
"""
Compute a mapping associating each region to an array of counts for each class.
:param regions: array of region ids.
:param targets: array of targets.
:param idx_map: mapping for region --> indices
:return: Dictionary associating each region to an array of counts for each class.
"""
num_targets = len(np.unique(targets))
mapping = {region: np.zeros(num_targets) for region in np.unique(regions)}
for region in mapping:
region_targets = targets[idx_map[region]]
for t in region_targets:
mapping[region][t] += 1
return mapping
def _map_to_regions(regions, targets):
"""
Helper function computing a mapping from labels to association regions.
:param regions: Array of region ids.
:param targets: Array of targets
:return: dict for mapping target --> regions.
"""
pairs = zip(targets, regions)
mapping = {target: [] for target in targets}
for key, val in pairs:
if val not in mapping[key]:
mapping[key].append(val)
return mapping
def _map_to_idx(regions):
"""
Helper function associating regions with their indices (samples)in the file.
:param regions: Array of region ids.
:return: Indices corresponding to this region.
"""
idx = {elem: [] for elem in np.unique(regions)}
for i, id in enumerate(regions):
idx[id].append(i)
return idx
def load_datasets(datapath, train_subset, flattened=False, split="train_all"):
"""
Load Horoma datasets from specified data directory.
:type datapath: str
:type flattened: bool
:type train_subset: str
"""
print("Loading datasets from ({}) ...".format(datapath), end=' ')
start_time = time()
dataset = HoromaDataset(
datapath, split=split, subset=train_subset, flattened=flattened)
print("Done in {:.2f} sec".format(time() - start_time))
return dataset
def load_original_horoma_datasets(datapath, train_subset, flattened=False, overlapped=True):
"""
Load Original Horoma datasets from specified data directory.
Return unlabeled, labeled and validation sets
:type datapath: str
:type flattened: bool
:type train_subset: str
:type overlapped: bool
"""
print("Loading datasets from ({}) ...".format(datapath), end=' ')
start_time = time()
if overlapped:
unlabeled_trainset = OriginalHoromaDataset(
datapath, split="train_overlapped", subset=train_subset, flattened=flattened)
labeled_trainset = OriginalHoromaDataset(datapath, split="train_labeled_overlapped", flattened=flattened)
labeled_validset = OriginalHoromaDataset(datapath, split="valid_overlapped", flattened=flattened)
else:
unlabeled_trainset = OriginalHoromaDataset(
datapath, split="train", subset=train_subset, flattened=flattened)
labeled_trainset = OriginalHoromaDataset(
datapath, split="train_labeled", flattened=flattened)
labeled_validset = OriginalHoromaDataset(
datapath, split="valid", flattened=flattened)
labeled_train_valid_set = OriginalHoromaDataset(
datapath, split="train_labeled", flattened=flattened)
labeled_train_valid_set.data = np.concatenate(
[labeled_train_valid_set.data, labeled_validset.data])
labeled_train_valid_set.targets = np.concatenate(
[labeled_train_valid_set.targets, labeled_validset.targets])
print("Done in {:.2f} sec".format(time() - start_time))
return unlabeled_trainset, labeled_trainset, labeled_validset, labeled_train_valid_set
def return_images(data):
all_embeddings = []
all_targets = []
loader = DataLoader(data, batch_size=32, shuffle=True)
cuda = True if torch.cuda.is_available() else False
labeled = True
if loader.dataset.data.shape[0] > 500:
labeled = False
for imgs in loader:
if labeled:
(imgs, target) = imgs
if cuda:
data = Variable(imgs).cuda()
else:
data = Variable(imgs)
data = data.view(-1, 3 * 32 * 32).cpu().data.numpy()
for l in range(np.shape(data)[0]):
all_embeddings.append(data[l])
if labeled:
all_targets.append(target[l].numpy()[0])
return all_embeddings, all_targets
def assign_labels_to_clusters(model, data, labels_true):
"""
Assign class label to each model cluster using labeled data.
The class label is based on the class of majority samples within a cluster.
Unassigned clusters are labeled as -1.
"""
print("Assigning labels to clusters ...", end=' ')
start_time = time()
labels_pred = model.predict_cluster(data)
labelled_clusters = []
for i in range(model.n_clusters):
idx = np.where(labels_pred == i)[0]
if len(idx) != 0:
labels_freq = np.bincount(labels_true[idx])
labelled_clusters.append(np.argmax(labels_freq))
else:
labelled_clusters.append(-1)
print("Done in {:.2f} sec".format(time() - start_time))
return np.asarray(labelled_clusters)
def compute_metrics(y_true, y_pred):
""" From the true and predicted labels, return the accuracy and the f1 score"""
accuracy = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred, average="weighted")
return accuracy, f1
def __compute_metrics(y_true, y_pred):
"""Computes accuracy and F1 score"""
accuracy = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred, average="weighted")
_f1 = f1_score(y_true, y_pred, average=None)
return accuracy, f1, _f1
def eval_model_predictions(model, x, y_true, cluster_labels):
"""
Predict labels and compare to true labels to compute the accuracy and f1 score
"""
print("Evaluating model ...", end=' ')
start_time = time()
y_pred = cluster_labels[model.predict_cluster(x)]
accuracy, f1 = compute_metrics(y_true, y_pred)
print(
"Done in {:.2f} sec | Accuracy: {:.2f} - F1: {:.2f}".format(time() - start_time, accuracy * 100, f1 * 100))
return y_pred, accuracy, f1
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.xavier_uniform(m.weight)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Bias') != -1:
m.bias.data.fill_(0)
def train_nrm(net, train_loader, labeled_loader, eval_loader, num_epochs, configs, n_iterations, experiment):
"""
train_nrm is a function for training NRM
:param net: the NRM network
:param train_loader: dataloader for NRM unlabeled.
:param labeled_loader: dataloader for NRM labeled.
:param eval_loader: dataloader for NRM validation.
:param num_epochs: number of epochs to train for
:param configs: dictionary containing hyperparameters
:param n_iterations: number of iterations per batch of labeled data
:param experiment: comet_ml experiment object for logging
:return best_f1: highest f1 score obtained
:return best_acc: accuracy score corresponding to epoch with highest f1
:return best_model: best epoch model
"""
best_f1 = 0
best_acc = 0
best_mode = 0
valid_accuracies = []
f1_scores = []
device = 'cuda'
NO_LABEL = -1
criterion = nn.CrossEntropyLoss(
size_average=False, ignore_index=NO_LABEL).cuda()
L2_loss = nn.MSELoss(size_average=False, reduce=False,
reduction='mean').cuda()
trainer = torch.optim.Adam(net.parameters(), configs['lr'][
0], weight_decay=configs['decay'])
best_valid_acc = 0
iter_indx = 0
net.to(device)
for epoch in range(1, num_epochs):
train_loss = 0
train_loss_xentropy = 0
train_loss_reconst = 0
train_loss_pn = 0
train_loss_kl = 0
train_loss_bnmm = 0
correct = 0
num_batch_train = 0
# switch to train mode
net.train()
for i in range(int(n_iterations)):
unsup_batch = next(iter(train_loader))
sup_batch, target = next(iter(labeled_loader))
# set up unlabeled input and labeled input with the corresponding
# labels
input_unsup_var = torch.autograd.Variable(unsup_batch[0:(
configs['batch_size'] - configs['labeled_batch_size'])]).to(device)
input_sup_var = torch.autograd.Variable(sup_batch).to(device)
target_sup_var = torch.autograd.Variable(
target.data.long()).to(device)
minibatch_unsup_size = configs[
'batch_size'] - configs['labeled_batch_size']
minibatch_sup_size = configs['labeled_batch_size']
# compute losses for unlabeled input
[output_unsup, xhat_unsup, loss_pn_unsup,
loss_bnmm_unsup] = net(input_unsup_var)
loss_reconst_unsup = L2_loss(xhat_unsup, input_unsup_var).mean()
softmax_unsup = F.softmax(output_unsup)
loss_kl_unsup = - \
torch.sum(torch.log(10.0 * softmax_unsup + 1e-8) * softmax_unsup) / minibatch_unsup_size
loss_unsup = configs['alpha_reconst'] * loss_reconst_unsup + configs['alpha_kl'] * \
loss_kl_unsup + \
configs['alpha_bnmm'] * loss_bnmm_unsup + \
configs['alpha_pn'] * loss_pn_unsup
# compute losses for labeled input
[output_sup, xhat_sup, loss_pn_sup, loss_bnmm_sup] = net(
input_sup_var, target_sup_var.squeeze_())
loss_xentropy_sup = criterion(
output_sup, target_sup_var) / minibatch_sup_size
loss_reconst_sup = L2_loss(xhat_sup, input_sup_var).mean()
softmax_sup = F.softmax(output_sup)
loss_kl_sup = - \
torch.sum(torch.log(10.0 * softmax_sup + 1e-8) * softmax_sup) / minibatch_sup_size
loss_sup = loss_xentropy_sup + configs['alpha_reconst'] * loss_reconst_sup + configs[
'alpha_kl'] * loss_kl_sup + configs['alpha_bnmm'] * loss_bnmm_sup + configs['alpha_pn'] * loss_pn_sup
loss = torch.mean(loss_unsup + loss_sup)
trainer.zero_grad()
loss.backward()
trainer.step()
# accumulate all the losses for logging
loss_reconst = loss_reconst_unsup + loss_reconst_sup
loss_pn = loss_pn_unsup + loss_pn_sup
loss_xentropy = loss_xentropy_sup
loss_kl = loss_kl_unsup + loss_kl_sup
loss_bnmm = loss_bnmm_unsup + loss_bnmm_sup
train_loss_xentropy += torch.mean(
loss_xentropy).cpu().detach().numpy()
train_loss_reconst += torch.mean(
loss_reconst).cpu().detach().numpy()
train_loss_pn += torch.mean(loss_pn).cpu().detach().numpy()
train_loss_kl += torch.mean(loss_kl).cpu().detach().numpy()
train_loss_bnmm += torch.mean(loss_bnmm).cpu().detach().numpy()
train_loss += torch.mean(loss).cpu().detach().numpy()
correct += get_acc(output_sup,
target_sup_var).cpu().detach().numpy()
num_batch_train += 1
iter_indx += 1
# Validation
valid_loss = 0
valid_loss_xentropy = 0
valid_loss_reconst = 0
valid_loss_pn = 0
valid_loss_kl = 0
valid_loss_bnmm = 0
valid_correct = 0
num_batch_valid = 0
valid_accuracy = 0
valid_f1 = 0
net.eval()
for i, (batch, target) in enumerate(eval_loader):
with torch.no_grad():
input_var = torch.autograd.Variable(batch).to(device)
target_var = torch.autograd.Variable(
target.data.long()).to(device)
minibatch_size = len(target_var)
[output, xhat, loss_pn, loss_bnmm] = net(input_var, target_var)
loss_xentropy = criterion(
output, target_var.squeeze_()) / minibatch_size
loss_reconst = L2_loss(xhat, input_var).mean()
softmax_val = F.softmax(output)
loss_kl = - \
torch.sum(torch.log(10.0 * softmax_val + 1e-8) * softmax_val) / minibatch_size
loss = loss_xentropy + configs['alpha_reconst'] * loss_reconst + configs[
'alpha_kl'] * loss_kl + configs['alpha_bnmm'] * loss_bnmm + configs['alpha_pn'] * loss_pn
valid_loss_xentropy += torch.mean(
loss_xentropy).cpu().detach().numpy()
valid_loss_reconst += torch.mean(
loss_reconst).cpu().detach().numpy()
valid_loss_pn += torch.mean(loss_pn).cpu().detach().numpy()
valid_loss_kl += torch.mean(loss_kl).cpu().detach().numpy()
valid_loss_bnmm += torch.mean(loss_bnmm).cpu().detach().numpy()
valid_loss += torch.mean(loss).cpu().detach().numpy()
valid_correct += get_acc(output,
target_var).cpu().detach().numpy()
accuracy, f1 = compute_metrics(
target_var.cpu(), torch.argmax(output, dim=1, keepdim=False).cpu())
valid_accuracy += accuracy
valid_f1 += f1
num_batch_valid += 1
valid_accuracies.append(valid_accuracy / num_batch_valid)
f1_scores.append(valid_f1 / num_batch_valid)
valid_acc = valid_correct / num_batch_valid
f1_s = valid_f1 / num_batch_valid
if f1_s > best_f1:
best_f1 = f1_s
best_acc = valid_accuracy / num_batch_valid
best_model = epoch
torch.save(net.state_dict(), os.path.join(
configs['MODEL_PATH'], 'best_model.pth'))
experiment.log_metric('train_loss', train_loss / num_batch_train)
experiment.log_metric(
'train_xent', train_loss_xentropy / num_batch_train)
experiment.log_metric(
'train_recon', train_loss_reconst / num_batch_train)
experiment.log_metric('train_pn', train_loss_pn / num_batch_train)
experiment.log_metric('train_kl', train_loss_kl / num_batch_train)
experiment.log_metric('train_bnmm', train_loss_bnmm / num_batch_train)
experiment.log_metric('valid_loss', valid_loss / num_batch_valid)
experiment.log_metric(
'valid_xent', valid_loss_xentropy / num_batch_valid)
experiment.log_metric(
'valid_recon', valid_loss_reconst / num_batch_valid)
experiment.log_metric('valid_pn', valid_loss_pn / num_batch_valid)
experiment.log_metric('valid_kl', valid_loss_kl / num_batch_valid)
experiment.log_metric('valid_bnmm', valid_loss_bnmm / num_batch_valid)
experiment.log_metric('valid_acc', valid_accuracy / num_batch_valid)
experiment.log_metric('valid_f1', valid_f1 / num_batch_valid)
epoch_str = ("Epoch %d. Train Loss: %f, Train Xent: %f, Train Reconst: %f, Train Pn: %f, Train acc %f, Valid Loss: %f, Valid acc %f, Best f1 acc %f,f1 %f, acc %f "
% (epoch, train_loss / num_batch_train, train_loss_xentropy / num_batch_train, train_loss_reconst / num_batch_train, train_loss_pn / num_batch_train,
correct / num_batch_train, valid_loss / num_batch_valid, valid_acc, best_f1, valid_f1 / num_batch_valid, valid_accuracy / num_batch_valid))
print(epoch_str)
return best_f1, best_acc, best_model
<file_sep>from comet_ml import OfflineExperiment
import argparse
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
sys.path.append("../")
import utils.data_utils as data_utils
from utils.transformer_net_utils import Trainer
from utils.dataset import OriginalHoromaDataset
import models.transformer_net as transformer_net
# Note : this code comes from OM Signal block 2 baseline
# Some changes were made to be able to use this for Horoma
def entropy_classification(x):
return (F.log_softmax(x, dim=1) * F.softmax(x, dim=1)).sum()
def margin_rank_loss(pred, target):
R, C = np.triu_indices(pred.size(0), 1)
R = torch.from_numpy(R).to(pred.device)
C = torch.from_numpy(C).to(pred.device)
x1 = pred[R].view(-1)
x2 = pred[C].view(-1)
tgt1 = target[R].view(-1)
tgt2 = target[C].view(-1)
tgt = (tgt1 >= tgt2).long().float() - (tgt1 < tgt2).long().float()
return F.margin_ranking_loss(x1, x2, tgt, margin=0.0, reduction='mean')
target_out_size_dict = {
'tree_class': 17
}
# Criterion loss to use for each output
target_criterion_dict = {
'tree_class': nn.CrossEntropyLoss()
}
# Entropy regularization for the different outputs - None means N/A (it does not apply)
target_entropy_dict = {
'tree_class': entropy_classification
}
# 0 for regression (MSE), 1 for classification (Kl_Div), None for Nothing
target_vat_dict = {
'tree_class': 1, # 1, # None, #1
}
loss_weight = None
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch ECG Example')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 16)')
parser.add_argument('--eval-batch-size', type=int, default=8, metavar='N',
help='input batch size for evaluation (default: 8)')
parser.add_argument('--iters', type=int, default=10000, metavar='N',
help='number of iterations to train (default: 10000)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR', # 0.001, # 0.0001
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--alpha', type=float, default=0.005, metavar='ALPHA', # 0.001 (actuellement),
help='regularization coefficient (default: 0.01)')
parser.add_argument('--ema_decay', type=float, default=0.999, metavar='EMA', # 0.001 (actuellement),
help='decay for exponential moving average (default: 0.999)')
parser.add_argument('--xi', type=float, default=5.0, metavar='XI',
help='hyperparameter of VAT (default: 5.0)')
parser.add_argument('--eps', type=float, default=1.0, metavar='EPS',
help='hyperparameter of VAT (default: 1.0)')
parser.add_argument('--cr_type', type=int, default=3, metavar='CR',
help='Consistency Regularization (1:Stochastic Pertubation, 2:VAT, 3:MeanTeacher - default: 3)')
parser.add_argument('--ip', type=int, default=1, metavar='IP',
help='hyperparameter of VAT (default: 1)')
parser.add_argument('--workers', type=int, default=8, metavar='W',
help='number of CPU')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument("--targets", type=str, nargs="+", default=['tree_class'],
help='list of targets to use for training')
parser.add_argument("--data_dir", type=str, default="/rap/jvb-000-aa/COURS2019/etudiants/data/horoma",
help='directory where to find the data')
parser.add_argument("--checkpoint_dir", type=str, default='../transformer_net_models/',
help='directory where to checkpoints the models')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--chkpt-freq', type=int, default=100, metavar='N',
help='how many batches to wait before performing checkpointing')
parser.add_argument('--no-entropy', action='store_true', default=False,
help='enables Entropy based regularization')
parser.add_argument('--reg-vat-var', type=float, default=0.1,
help='Assumed variance of the predicted Gaussian for regression tasks (default: 0.1)')
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
entropy_flag = not args.no_entropy
use_gpu = torch.cuda.is_available()
device = torch.device('cuda:0' if use_gpu else 'cpu')
print('device: {}'.format('GPU' if use_gpu else 'GPU'))
print('Entropy Regularization: ', entropy_flag)
print('Targets: ', args.targets)
targets = args.targets
targets = ','.join(targets)
train_labeled_dataset = OriginalHoromaDataset(args.data_dir, split="train_labeled")
train_unlabeled_dataset = OriginalHoromaDataset(args.data_dir, split="train")
valid_dataset = OriginalHoromaDataset(args.data_dir, split="valid")
data_iterators = data_utils.get_iters(
train_labeled_dataset,
train_unlabeled_dataset,
valid_dataset,
None,
l_batch_size=args.batch_size,
ul_batch_size=args.batch_size,
val_batch_size=args.eval_batch_size,
workers=args.workers
)
target_labels = targets.split(",")
target_labels = [s.lower().strip() for s in target_labels]
out_size = 17
n_layers = 0 # hyper-parameter
hidden_size = 256 # hyper-parameter # 128
kernel_size = 8 # for CNN1D only
pool_size = 4 # for CNN1D only
dropout = 0.2
n_heads = 8 # 4
key_dim = 128
val_dim = 128
inner_dim = 128
model = transformer_net.TransformerNet(
1, out_size, hidden_size, n_layers, kernel_size=kernel_size, pool_size=pool_size,
n_heads=n_heads, key_dim=key_dim, val_dim=val_dim, inner_dim=inner_dim, dropout=dropout
).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
init_iter = 0
trainer = Trainer(
args, device, target_criterion_dict,
args.targets, loss_weight,
target_vat_dict,
target_entropy_dict if entropy_flag else None,
init_iter,
)
experiment = _get_comet_experiment()
trainer.train(
model, data_iterators, optimizer,
prefix='neural_network',
experiment=experiment
)
def _get_comet_experiment():
experiment = OfflineExperiment(project_name='general',
workspace='benjaminbenoit',
offline_directory="../transformer_net_comet_experiences")
experiment.set_name("TransformerNet")
return experiment
if __name__ == '__main__':
main()
<file_sep>import torch
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
# Note : this code comes from OM Signal block 2 baseline
# Several adjustments (mainly in TransformerNet) were made to make it work for Horoma
class Conv1DLinear(nn.Module):
def __init__(self,
input_size,
output_size,
hidden_size,
kernel_size=2,
pool_size=2
):
super(Conv1DLinear, self).__init__()
self.preprocess = Preprocessor()
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size)
# size of output
lout = 3750 - kernel_size + 1
self.pool1 = nn.MaxPool1d(pool_size)
lout = math.floor(lout / pool_size)
self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size)
lout = lout - kernel_size + 1
self.pool2 = nn.MaxPool1d(pool_size)
lout = math.floor(lout / pool_size)
print('lout: ', lout)
if isinstance(output_size, (list, tuple)):
self.out = nn.ModuleList(
[nn.Linear(hidden_size * lout, o) for o in output_size]
)
else:
self.out = nn.Linear(hidden_size * lout, output_size)
self.nl = nn.ReLU()
def forward(self, x, noise=None):
x = self.preprocess(x)
if noise is not None:
x = x + noise
x = self.nl(self.pool1(self.conv1(x)))
x = self.nl(self.pool2(self.conv2(x)))
if isinstance(self.out, nn.ModuleList):
pred = [
l(x.view(-1, x.size(1) * x.size(2))
) for i, l in enumerate(self.out)
]
else:
pred = self.out(x.view(-1, x.size(1) * x.size(2)))
return pred
class Conv1DBNLinear(nn.Module):
def __init__(self,
input_size,
output_size,
hidden_size,
kernel_size=2,
pool_size=2,
dropout=0
):
super(Conv1DBNLinear, self).__init__()
self.preprocess = Preprocessor()
self.batch_norm0 = nn.BatchNorm1d(input_size)
lout = 3750
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size)
lout = self.l_out_conv1D(lout, kernel_size)
self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size)
lout = self.l_out_conv1D(lout, kernel_size)
self.batch_norm1 = nn.BatchNorm1d(hidden_size)
self.pool1 = nn.MaxPool1d(pool_size)
lout = self.l_out_maxpool1D(lout, pool_size)
self.dropout = nn.Dropout(p=dropout)
self.dropout5 = nn.Dropout(p=0.5)
input_size = hidden_size
hidden_size = hidden_size // 2
self.conv3 = nn.Conv1d(input_size, hidden_size, kernel_size)
lout = self.l_out_conv1D(lout, kernel_size)
self.conv4 = nn.Conv1d(hidden_size, hidden_size, kernel_size)
lout = self.l_out_conv1D(lout, kernel_size)
self.batch_norm2 = nn.BatchNorm1d(hidden_size)
self.pool2 = nn.MaxPool1d(pool_size)
lout = self.l_out_maxpool1D(lout, pool_size)
input_size = hidden_size
self.conv5 = nn.Conv1d(input_size, hidden_size, kernel_size)
lout = self.l_out_conv1D(lout, kernel_size)
self.conv6 = nn.Conv1d(hidden_size, hidden_size, kernel_size)
lout = self.l_out_conv1D(lout, kernel_size)
self.batch_norm3 = nn.BatchNorm1d(hidden_size)
self.pool3 = nn.MaxPool1d(pool_size)
lout = self.l_out_maxpool1D(lout, pool_size)
if isinstance(output_size, (list, tuple)):
self.out = nn.ModuleList(
[
nn.Sequential(
nn.Linear(hidden_size * lout, 200),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(200, 200),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(200, o)
) for o in output_size
]
)
else:
self.out = nn.Sequential(
nn.Linear(hidden_size * lout, 200),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(200, 200),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(200, output_size)
)
self.nl = nn.SELU()
def l_out_conv1D(self, l_in, kernel_size, stride=1, padding=0, dilation=1):
l_out = (l_in + (2 * padding) - dilation *
(kernel_size - 1) - 1) / stride
l_out = l_out + 1
return int(l_out)
def l_out_maxpool1D(self, l_in, kernel_size, stride=None, padding=0, dilation=1):
if stride is None:
stride = kernel_size
l_out = self.l_out_conv1D(
l_in, kernel_size, stride, padding, dilation
)
return l_out
def forward(self, x, noise=None):
x = self.preprocess(x)
if noise is not None:
x = x + noise
x = self.batch_norm0(x)
x = self.dropout(
self.pool1(
self.batch_norm1(self.nl(self.conv2(self.nl(self.conv1(x)))))
)
)
x = self.dropout(
self.pool2(
self.batch_norm2(self.nl(self.conv4(self.nl(self.conv3(x)))))
)
)
x = self.dropout(
self.pool3(
self.batch_norm3(self.nl(self.conv6(self.nl(self.conv5(x)))))
)
)
if isinstance(self.out, nn.ModuleList):
pred = [
l(x.view(x.size(0), -1)) for i, l in enumerate(self.out)
]
else:
pred = self.out(x.view(x.size(0), -1))
return pred
class TransformerNet(nn.Module):
def __init__(self,
input_size,
output_size,
hidden_size,
n_layers=2,
kernel_size=2,
pool_size=2,
n_heads=4,
key_dim=None,
val_dim=None,
inner_dim=None,
dropout=0.1
):
super(TransformerNet, self).__init__()
self.preprocess = Preprocessor()
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size)
# size of output
lout = self.l_out_conv1d(3750, kernel_size)
self.pool1 = nn.MaxPool1d(pool_size)
lout = self.l_out_maxpool1d(lout, pool_size)
self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size)
lout = self.l_out_conv1d(lout, kernel_size)
self.pool2 = nn.MaxPool1d(pool_size)
lout = self.l_out_maxpool1d(lout, pool_size)
self.conv3 = nn.Conv1d(hidden_size, hidden_size, kernel_size)
lout = self.l_out_conv1d(lout, kernel_size)
self.pool3 = nn.MaxPool1d(pool_size)
lout = self.l_out_maxpool1d(lout, pool_size)
print('lout: ', lout)
self.nl = nn.ReLU()
if key_dim is None:
key_dim = hidden_size // n_heads
if val_dim is None:
val_dim = hidden_size // n_heads
if inner_dim is None:
inner_dim = hidden_size // 2
self.layer_stack = [] if n_layers == 0 else nn.ModuleList([
EncoderLayer(hidden_size, inner_dim, n_heads, key_dim, val_dim, dropout=dropout)
for _ in range(n_layers)
])
if not isinstance(output_size, (list, tuple)):
output_size = [output_size]
output_modules = [
nn.Sequential(
EncoderTaskLayer2(hidden_size, inner_dim, n_heads, key_dim, val_dim, dropout=dropout, attn_flag=False),
nn.Linear(hidden_size, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, o)
) for o in output_size
]
if len(output_modules) == 1:
self.out = output_modules[0]
else:
self.out = nn.ModuleList(output_modules)
def l_out_conv1d(self, l_in, kernel_size, stride=1, padding=0, dilation=1):
l_out = (l_in + (2 * padding) - dilation *
(kernel_size - 1) - 1) / stride
l_out = l_out + 1
return int(l_out)
def l_out_maxpool1d(self, l_in, kernel_size, stride=None, padding=0, dilation=1):
if stride is None:
stride = kernel_size
l_out = self.l_out_conv1d(
l_in, kernel_size, stride, padding, dilation
)
return l_out
def forward(self, x, noise=None):
# x = self.preprocess(x)
if noise is not None:
x = x + noise
x = self.nl(self.pool1(self.conv1(x)))
x = self.nl(self.pool2(self.conv2(x)))
x = self.nl(self.pool3(self.conv3(x)))
data = x.permute(0, 2, 1)
for enc_layer in self.layer_stack:
data, _ = enc_layer(data)
if isinstance(self.out, nn.ModuleList):
pred = [
l(data) for i, l in enumerate(self.out)
]
else:
pred = self.out(data)
return pred
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class MultiHeadTaskAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.num_embeddings = 1
self.d_model = d_model
self.embedding = nn.Embedding(self.num_embeddings, d_model)
self.multihead = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout)
def forward(self, q, k, v, mask=None):
assert type(q) == int
assert q < self.num_embeddings
q = (torch.ones((k.size(0), 1, self.d_model)) / self.d_model).to(k.device)
return self.multihead(q, k, v, mask)
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1, d_out=None):
super().__init__()
if d_out is None:
d_out = d_in
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_out, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_out)
self.dropout = nn.Dropout(dropout)
self.d_out = d_out
self.d_in = d_in
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
if self.d_out == self.d_in:
output = self.layer_norm(output + residual)
else:
output = self.layer_norm(output)
return output
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1, d_out=None, attn_flag=True):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout, d_out=d_out)
self.attn_flag = attn_flag
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask)
if non_pad_mask is not None:
enc_output *= non_pad_mask
enc_output = self.pos_ffn(enc_output)
if non_pad_mask is not None:
enc_output *= non_pad_mask
if self.attn_flag:
return enc_output, enc_slf_attn
else:
return enc_output
class EncoderTaskLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1, d_out=None, attn_flag=True):
super(EncoderTaskLayer, self).__init__()
self.slf_attn = MultiHeadTaskAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout, d_out=d_out)
self.attn_flag = attn_flag
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(0, enc_input, enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
if self.attn_flag:
return enc_output.squeeze(1), enc_slf_attn
else:
return enc_output.squeeze(1)
class EncoderTaskLayer2(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1, d_out=None, attn_flag=True):
super(EncoderTaskLayer2, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout, d_out=d_out)
self.attn_flag = attn_flag
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
enc_output = enc_output.sum(dim=1, keepdim=True)
if self.attn_flag:
return enc_output.squeeze(1), enc_slf_attn
else:
return enc_output.squeeze(1)
class Preprocessor(nn.Module):
def __init__(
self,
ma_window_size=2,
mv_window_size=4,
num_samples_per_second=125):
# ma_window_size: (in seconds) window size to use
# for moving average baseline wander removal
# mv_window_size: (in seconds) window size to use
# for moving average RMS normalization
super(Preprocessor, self).__init__()
# Kernel size to use for moving average baseline wander removal: 2
# seconds * 125 HZ sampling rate, + 1 to make it odd
self.maKernelSize = (ma_window_size * num_samples_per_second) + 1
# Kernel size to use for moving average normalization: 4
# seconds * 125 HZ sampling rate , + 1 to make it odd
self.mvKernelSize = (mv_window_size * num_samples_per_second) + 1
def forward(self, x):
with torch.no_grad():
# Remove window mean and standard deviation
x = (x - torch.mean(x, dim=2, keepdim=True)) / \
(torch.std(x, dim=2, keepdim=True) + 0.00001)
# Moving average baseline wander removal
x = x - F.avg_pool1d(
x, kernel_size=self.maKernelSize,
stride=1, padding=(self.maKernelSize - 1) // 2
)
# Moving RMS normalization
x = x / (
torch.sqrt(
F.avg_pool1d(
torch.pow(x, 2),
kernel_size=self.mvKernelSize,
stride=1, padding=(self.mvKernelSize - 1) // 2
)) + 0.00001
)
# Don't backpropagate further
x = x.detach().contiguous()
return x
<file_sep>from comet_ml import OfflineExperiment
import json
import argparse
import numpy as np
import torch
import sys
import os
sys.path.append("../")
from models.encoders import PCAEncoder, VAE, AE, CAE, CVAE, ConvAE
from models.clustering import KMeansClustering, GMMClustering
from utils.utils import assign_labels_to_clusters, eval_model_predictions
from utils.model_utils import encode_dataset
from utils.constants import Constants
from utils.dataset import HoromaDataset
def main(datapath, clustering_model, encoding_model, batch_size, n_epochs, lr, flattened, device, train_split, valid_split, train_labeled_split,
experiment, encode, cluster, train_subset=None, path_to_model=None):
"""
:param datapath: path to the directory containing the samples
:param clustering_model: which clustering model to use [kmeans, gmm].
:param encoding_model: which encoding model to use, convolutional, variational or simple autoencoders.
:param batch_size: batch size
:param n_epochs: number of epochs
:param lr: learning rate
:param flattened: If True return the images in a flatten format.
:param device: use CUDA device if available else CPU .
:param overlapped: boolean, if True use the overlapped pixel patches.
:param experiment: track experiment
:param encode: boolean, if True, train and apply encoding model.
:param cluster: boolean, if True, train and apply clustering model.
:param train_subset: How many elements will be used. Default: all.
:param path_to_model: path to the directory containing saved models.
"""
train = HoromaDataset(datapath, split=train_split, subset=train_subset,
flattened=flattened)
labeled = HoromaDataset(datapath, split=train_labeled_split, subset=train_subset,
flattened=flattened)
valid_data = HoromaDataset(
datapath, split=valid_split, subset=train_subset, flattened=flattened)
train_label_indices = labeled.targets
valid_indices = valid_data.targets
print("Shape of training set: ", train.data.shape)
print("Shape of validation set: ", valid_data.data.shape)
if encode:
# Train and apply encoding model
train_enc, encoding_model = encoding_model.fit(train, valid_data, batch_size=batch_size, n_epochs=n_epochs,
lr=lr, device=device, experiment=experiment)
else:
encoding_model.load_state_dict(torch.load(path_to_model)["model"])
train_enc = encode_dataset(encoding_model, train, batch_size, device)
if cluster:
train_labeled_enc = encoding_model.encode(
labeled[train_label_indices][0].to(device))
valid_enc = encoding_model.encode(labeled[valid_indices][0].to(device))
# Train and apply clustering model
clustering_model.train(train_enc)
cluster_labels = assign_labels_to_clusters(clustering_model, train_labeled_enc,
labeled.targets[train_label_indices])
_, accuracy, f1 = eval_model_predictions(clustering_model, valid_enc, labeled.targets[valid_indices],
cluster_labels)
experiment.log_metric('accuracy', accuracy)
experiment.log_metric('f1-score', f1)
# Save models
model = {'cluster': clustering_model,
'embedding': encoding_model, 'cluster_labels': cluster_labels}
torch.save(model, Constants.PATH_TO_MODEL +
str(experiment.get_key()) + '.pth')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default=Constants.DATAPATH,
help="Path to dataset folder")
parser.add_argument("--encoder_path", type=str, default=None)
parser.add_argument("--config", type=str, default="CVAE_BASE",
help="To select configuration from config.json")
args = parser.parse_args()
config_key = args.config
datapath = args.datapath
path_to_model = args.encoder_path
print(Constants.CONFIG_PATH)
print(config_key)
print(Constants.CONFIG_PATH)
with open(Constants.CONFIG_PATH, "r") as f:
configuration = json.load(f)[config_key]
# Parse configuration file
clustering_model = configuration['cluster_model']
encoding_model = configuration['enc_model']
batch_size = configuration['batch_size']
seed = configuration['seed']
n_epochs = configuration['n_epochs']
lr = configuration['lr']
n_clusters = configuration['n_clusters']
# train_subset = configuration['train_subset']
train_split = configuration['train_split']
valid_split = configuration['valid_split']
train_labeled_split = configuration['train_labeled_split']
encode = configuration['encode']
cluster = configuration['cluster']
latent_dim = configuration['latent_dim']
flattened = False # Default
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Set all seeds for full reproducibility
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set up Comet Experiment tracking
experiment = OfflineExperiment(project_name='general',
workspace='timothynest', # Replace this with appropriate comet workspace
offline_directory="experiments")
experiment.set_name(
name=args.config + "_dim={}_overlapped={}".format(latent_dim, train_split))
experiment.log_parameters(configuration)
# Initialize necessary objects
if clustering_model == 'kmeans':
clustering_model = KMeansClustering(n_clusters, seed)
elif clustering_model == 'gmm':
clustering_model = GMMClustering(n_clusters, seed)
elif clustering_model == 'svm':
clustering_model = SVMClustering(seed)
else:
print('No clustering model specified. Using Kmeans.')
clustering_model = KMeansClustering(n_clusters, seed)
if encoding_model == 'pca':
encoding_model = PCAEncoder(seed)
flattened = True
elif encoding_model == 'vae':
encoding_model = VAE(latent_dim=latent_dim).to(device)
flattened = True
elif encoding_model == "ae":
encoding_model = AE(latent_dim=latent_dim).to(device)
flattened = True
elif encoding_model == "cae":
encoding_model = CAE(latent_dim=latent_dim).to(device)
flattened = False
elif encoding_model == "cvae":
encoding_model = CVAE(latent_dim=latent_dim).to(device)
flattened = False
elif encoding_model == "convae":
encoding_model = ConvAE(latent_dim=latent_dim).to(device)
flattened = False
else:
print('No encoding model specified. Using PCA.')
encoding_model = PCAEncoder(seed)
# Initiate experiment
main(datapath, clustering_model, encoding_model, batch_size, n_epochs, lr, flattened, device, train_split, valid_split, train_labeled_split,
experiment, encode, cluster, path_to_model=path_to_model)
<file_sep>#!/bin/bash
#PBS -A colosse-users
#PBS -l feature=k80
#PBS -l nodes=1:gpus=1
#PBS -l walltime=01:00:00
cd "${PBS_O_WORKDIR}/trainers"
s_exec python ali_train.py --config HALI
<file_sep>from utils.model_utils import get_ae_dataloaders, train_network, encode_dataset, get_cae_dataloaders
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.decomposition import PCA
class ConvAE(nn.Module):
"""
Convolutional autoencoder: composed of encoder and decoder components.
Applies multiple 2D convolutions and 2D transpose convolutions, over
the input images. each operator is followed by batch normalization and
ReLU activations.
:param input_dim: input dimension of the model, 3072.
:param latent_dim: dimension of latent-space representation.
"""
def __init__(self, input_dim=3072, latent_dim=2):
self.latent_dim = latent_dim
self.input_dim = input_dim
self.is_variational = True
self.calculate_own_loss = True
super().__init__()
self.code_size = 100
self.maxpool_kernel = 2
self.loss_fct = getattr(nn, "MSELoss")()
self.dropout = nn.Dropout(0.1)
self.encode_cnn_1 = nn.Conv2d(3, 10, kernel_size=5)
self.encode_cnn_2 = nn.Conv2d(10, 20, kernel_size=5)
self.encode_lin_1 = nn.Linear(500, 200)
self.encode_lin_2 = nn.Linear(200, 100)
self.decode_lin_1 = nn.Linear(100, 500)
self.decode_lin_2 = nn.Linear(500, 1024 * 3)
def forward(self, x):
"""return reconstruction of the latent variable, the mean mu and log prob"""
code = self.encode(x)
reconstruction = self.decode(code)
loss = self.loss_fct(reconstruction, x)
return reconstruction, loss, -1
def encode(self, input):
"""
parametrizes the approximate posterior of the latent variables
and outputs parameters to the distribution
"""
code = self.encode_cnn_1(input)
code = F.selu(F.max_pool2d(code, self.maxpool_kernel))
code = self.dropout(code)
code = self.encode_cnn_2(code)
code = F.selu(F.max_pool2d(code, self.maxpool_kernel))
code = self.dropout(code)
code = code.view([code.size(0), -1])
code = F.selu(self.encode_lin_1(code))
code = self.encode_lin_2(code)
return code
def _calculate_own_loss(self):
return True
def decode(self, input):
"""reconstruct the input from the latent space representation"""
reconstruction = F.selu(self.decode_lin_1(input))
reconstruction = torch.sigmoid(self.decode_lin_2(reconstruction))
reconstruction = reconstruction.view((input.size(0), 3, 32, 32))
return reconstruction
def fit(self, data, batch_size, n_epochs, lr, device, experiment):
"""
fit the model with the data.
:param data: the input data
:param batch_size: batch size set in config file
:param n_epochs: number of epochs
:param lr: learning rate
:param device: 'cuda' if available else 'cpu'
:param experiment: for tracking comet experiment
"""
train_loader, valid_loader = get_ae_dataloaders(
data, batch_size, split=0.8)
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
best_model = train_network(
self, train_loader, valid_loader, optimizer, n_epochs, device, experiment)
return encode_dataset(self, data, batch_size, device), best_model
class PCAEncoder:
"""
Principal component analysis (PCA): Linear dimensionality
reduction using Singular value decomposition of the data
to project it to a lower dimensional space.
"""
def __init__(self, seed):
self.pca = PCA(n_components=0.9, random_state=seed)
def fit(self, **kwargs):
traindata = kwargs['data'].data
return self.pca.fit_transform(traindata)
def encode(self, encodingdata):
if type(encodingdata) is torch.Tensor:
encodingdata = encodingdata.cpu().numpy()
return self.pca.transform(encodingdata)
class CVAE(nn.Module):
"""
Convolutional autoencoder: composed of encoder an decoder components.
Applies multiple 2D convolutions and 2D transpose convolutions, over
the input images. each operator is followed by batch normalization and
ReLU activations.
:param input_dim: input dimension of the model, 3072.
:param latent_dim: dimension of latent-space representation.
"""
def __init__(self, input_dim=3072, latent_dim=2, folder_save_model="experiment_models/", pth_filename_save_model=""):
self.latent_dim = latent_dim
self.input_dim = input_dim
self.is_variational = True
self.calculate_own_loss = False
self.folder_save_model = folder_save_model
self.pth_filename_save_model = pth_filename_save_model
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1,
padding=1), # input is b, 3, 32, 32
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=4, stride=2,
padding=1), # input is b, 3, 32, 32
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 32, kernel_size=3, stride=1,
padding=1), # b, 32, 8,8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=4, stride=2,
padding=1), # b, 32, 8,8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=1), # b, 16,4,4
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=4, stride=2, padding=1), # b, 16,4,4
nn.BatchNorm2d(16),
nn.ReLU()
)
self.embedding_mu = nn.Linear(16 * 4 * 4, self.latent_dim)
self.embedding_sigma = nn.Linear(16 * 4 * 4, self.latent_dim)
self.decode_embedding = nn.Linear(self.latent_dim, 16 * 4 * 4)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(16, 32, kernel_size=4,
stride=2, padding=1), # b, 32, 8, 8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, kernel_size=3,
stride=1, padding=1), # b, 32, 8, 8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 64, kernel_size=4,
stride=2, padding=1), # b, 64, 16, 16
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 64, kernel_size=3,
stride=1, padding=1), # b, 64, 16, 16
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2,
padding=1), # b, 3, 32, 32
nn.BatchNorm2d(3),
nn.Sigmoid()
)
def forward(self, x):
"""return reconstruction of the latent variable, the mean mu and log prob"""
mu, logvar = self._get_dist_output(x)
embedding = self._reparameterization_trick(mu, logvar)
rev_embedding = F.relu(self.decode_embedding(
embedding).view(-1, 16, 4, 4))
return self.decoder(rev_embedding), mu, logvar
def encode(self, input):
"""
parametrizes the approximate posterior of the latent variables
and outputs parameters to the distribution
"""
return self._reparameterization_trick(*self._get_dist_output(input))
def _get_dist_output(self, input):
"""return the two vectors of means and standard deviations"""
input = self.encoder(input)
input = input.view(-1, 16 * 4 * 4)
mu, sigma = self.embedding_mu(input), self.embedding_sigma(input)
return mu, sigma
def decode(self, input):
"""reconstruct the input from the latent space representation"""
rev_embedding = F.relu(self.decode_embedding(input).view(-1, 16, 4, 4))
return self.decoder(rev_embedding)
def _reparameterization_trick(self, mu, sigma):
"""
Reparametrize samples such that the stochasticity is independent
of the parameters. The reparametrization trick allow us backpropagate.
"""
sigma = torch.exp(.5 * sigma)
samples = torch.randn_like(sigma)
return samples * sigma + mu
def fit(self, data, batch_size, n_epochs, lr, device, experiment):
"""
fit the model with the data.
:param data: the input data
:param batch_size: batch size set in config file
:param seed: for reproductibililty.
:param n_epochs: number of epochs
:param lr: learning rate
:param device: 'cuda' if available else 'cpu'
:param experiment: for tracking comet experiment
"""
train_loader, valid_loader = get_ae_dataloaders(data, batch_size, split=0.8)
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
best_model = train_network(self, train_loader, valid_loader, optimizer, n_epochs, device, experiment,
folder_save_model=self.folder_save_model, pth_filename_save_model=self.pth_filename_save_model)
return encode_dataset(self, data, batch_size, device), best_model
def _calculate_own_loss(self):
return False
class CAE(nn.Module):
"""
Convolutional autoencoder: Applies a 2D convolutions, max pooling,
and ReLU activations, two linear layers and 2D transpose convolutions
over the input images.
:param input_dim: input dimension of the model, 3072.
:param latent_dim: dimension of latent-space representation.
"""
def __init__(self, input_dim=3072, latent_dim=2):
self.latent_dim = latent_dim
self.input_dim = input_dim
self.is_variational = False
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1,
padding=1), # input is b, 3, 32, 32
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=4, stride=2,
padding=1), # input is b, 3, 32, 32
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 32, kernel_size=3, stride=1,
padding=1), # b, 32, 8,8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=4, stride=2,
padding=1), # b, 32, 8,8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=1), # b, 16,4,4
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=4, stride=2, padding=1), # b, 16,4,4
nn.BatchNorm2d(16),
nn.ReLU(),
)
self.embedding = nn.Linear(16 * 4 * 4, self.latent_dim)
self.decode_embedding = nn.Linear(self.latent_dim, 16 * 4 * 4)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(16, 32, kernel_size=4,
stride=2, padding=1), # b, 32, 8, 8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, kernel_size=3,
stride=1, padding=1), # b, 32, 8, 8
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 64, kernel_size=4,
stride=2, padding=1), # b, 64, 16, 16
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 64, kernel_size=3,
stride=1, padding=1), # b, 64, 16, 16
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2,
padding=1), # b, 3, 32, 32
nn.BatchNorm2d(3),
nn.Sigmoid()
)
def forward(self, x):
"""return the reconstructed input after encoding/decoding"""
embedding = self.encode(x)
rev_embedding = F.relu(self.decode_embedding(
embedding).view(-1, 16, 4, 4))
return self.decoder(rev_embedding)
def encode(self, x):
return self.embedding(self.encoder(x).view(-1, 16 * 4 * 4))
def fit(self, traindata, valid_data, batch_size, n_epochs, lr, device, experiment):
train_loader, valid_loader = get_cae_dataloaders(traindata, valid_data, batch_size)
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
best_model = train_network(
self, train_loader, valid_loader, optimizer, n_epochs, device, experiment)
return encode_dataset(self, traindata, batch_size, device), best_model
class VAE(nn.Module):
"""
variational autoencoder: consists of an encoder an decoder components.
continuous latent spaces allow easy random sampling
and interpolation. Encoder compenent is outputting two vectors
of size n: a vector of means, μ (mu), and another vector of standard
deviations, σ (sigma).
:param input_dim: input dimension of the model, 3072.
:param latent_dim: dimension of latent-space representation.
"""
def __init__(self, input_dim=3072, latent_dim=2):
self.latent_dim = latent_dim
self.input_dim = input_dim
self.is_variational = True
self.d1 = (self.input_dim + self.latent_dim) // 6
self.d2 = (self.d1 + self.latent_dim) // 2
super().__init__()
# Encoder network architecture
self.encoder = nn.Sequential(
nn.Linear(self.input_dim, self.d1),
nn.ReLU(),
nn.Linear(self.d1, self.d2),
nn.ReLU(),
)
self.latent_fc1 = nn.Linear(self.d2, self.latent_dim)
self.latent_fc2 = nn.Linear(self.d2, self.latent_dim)
# Decoder network architecture
self.decoder = nn.Sequential(
nn.Linear(self.latent_dim, self.d2),
nn.ReLU(),
nn.Linear(self.d2, self.d1),
nn.ReLU(),
nn.Linear(self.d1, self.input_dim),
nn.Sigmoid()
)
def encode(self, input):
"""
parametrizes the approximate posterior of the latent variables
and outputs parameters to the distribution
"""
return self._reparameterization_trick(*self._get_dist_output(input))
def _get_dist_output(self, input):
"""get the two vectors of means and standard deviations"""
input = input.view(-1, self.input_dim)
input = self.encoder(input)
mu, sigma = self.latent_fc1(input), self.latent_fc2(input)
return mu, sigma
def decode(self, input):
"""reconstruct the input from the latent space representation"""
return self.decoder(input)
def _reparameterization_trick(self, mu, sigma):
"""
Reparametrize samples such that the stochasticity is independent
of the parameters. The reparametrization trick allow us backpropagate.
"""
sigma = torch.exp(.5 * sigma)
samples = torch.randn_like(sigma)
return samples * sigma + mu
def forward(self, input):
"""return reconstruction of the latent variable, the mean mu and log prob"""
mu, logvar = self._get_dist_output(input)
z = self._reparameterization_trick(mu, logvar)
reconstruction = self.decode(z)
return reconstruction, mu, logvar
def fit(self, data, batch_size, n_epochs, lr, device, experiment):
train_loader, valid_loader = get_ae_dataloaders(
data, batch_size, split=0.8)
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
best_model = train_network(
self, train_loader, valid_loader, optimizer, n_epochs, device, experiment)
return encode_dataset(best_model, data, batch_size, device), best_model
class AE(nn.Module):
"""
vanilla encoder: combination of linear layers which extent to the embedding layer,
and then back to the output layer. the data is flattened,generating an input dimension
of 3072, and then it is fed into the model, the embedding is used for clustering.
:param input_dim: input dimension of the model, 3072.
:param latent_dim: dimension of latent-space representation.
"""
def __init__(self, input_dim=3072, latent_dim=2):
self.latent_dim = latent_dim
self.input_dim = input_dim
self.is_variational = False
self.d1 = (self.input_dim + self.latent_dim) // 6
self.d2 = (self.d1 + self.latent_dim) // 2
super().__init__()
# Encoder network architecture
self.encoder = nn.Sequential(
nn.Linear(self.input_dim, self.d1),
nn.ReLU(),
nn.Linear(self.d1, self.d2),
nn.ReLU(),
nn.Linear(self.d2, self.latent_dim)
)
# Decoder network architecture
self.decoder = nn.Sequential(
nn.Linear(self.latent_dim, self.d2),
nn.ReLU(),
nn.Linear(self.d2, self.d1),
nn.ReLU(),
nn.Linear(self.d1, self.input_dim),
nn.Sigmoid()
)
def forward(self, x):
"""return the reconstructed input after encoding/decoding"""
return self.decoder(self.encoder(x))
def encode(self, x):
"""return encoded imput"""
return self.encoder(x)
def fit(self, data, batch_size, n_epochs, lr, device, experiment):
train_loader, valid_loader = get_ae_dataloaders(
data, batch_size, split=0.8)
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
best_model = train_network(
self, train_loader, valid_loader, optimizer, n_epochs, device, experiment)
return encode_dataset(best_model, data, batch_size, device), best_model
<file_sep>import torch
import torch.nn as nn
import math
import torch.nn.functional as F
class MLPClassifier(nn.Module):
"""
MLP classifier for multiclass classification
"""
def __init__(self, latent_dim=10, hidden_size=60, n_layers=2, n_class=17):
"""
args:
latent_dim: latent representation size
hidden_size: number of hidden units in hidden layers
n_layers: number of layers in MLP including output layer
n_class: total number of classes
"""
super(MLPClassifier, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
n_channels = [latent_dim] + [hidden_size] * (n_layers - 1) + [n_class]
self.layers = nn.ModuleList(
[nn.Linear(n_channels[i], n_channels[i + 1]) for i in range(n_layers)])
self._weight_init()
def _weight_init(self,):
"""
Weight Ininitalization
"""
k = math.sqrt(1. / self.hidden_size)
for i in range(len(self.layers) - 1):
nn.init.uniform_(self.layers[i].weight, -k, k)
nn.init.constant_(self.layers[i].bias, 0.0)
nn.init.uniform_(self.layers[-1].weight, -0.1, 0.1)
nn.init.constant_(self.layers[-1].bias, 0.0)
def forward(self, inputs):
for i in range(self.n_layers - 1):
out = F.relu(self.layers[i](inputs))
inputs = out
return self.layers[self.n_layers - 1](inputs)
<file_sep>import os
import sys
import argparse
import torch
import numpy as np
from joblib import load # You can use Pickle or the serialization technique of your choice
sys.path.append("../")
from utils.dataset import OriginalHoromaDataset
import models.transformer_net as transformer_net
def eval_model(model_path, dataset_dir, split):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# # SETUP DATASET # #
# Load requested dataset
dataset = OriginalHoromaDataset(dataset_dir, split=split)
data = dataset[:][0]
data = data.reshape([data.shape[0], 1, 3072])
# # SETUP MODEL # #
# Load your best model
print("\nLoading model from ({}).".format(model_path))
out_size = 17
n_layers = 0
hidden_size = 256
kernel_size = 8
pool_size = 4
dropout = 0.2
n_heads = 8
key_dim = 128
val_dim = 128
inner_dim = 128
model = transformer_net.TransformerNet(
1, out_size, hidden_size, n_layers, kernel_size=kernel_size, pool_size=pool_size,
n_heads=n_heads, key_dim=key_dim, val_dim=val_dim, inner_dim=inner_dim, dropout=dropout
).to('cuda')
resume = torch.load(model_path, map_location='cuda')
if ('module' in list(resume['state_dict'].keys())[0]) \
and not (isinstance(model, torch.nn.DataParallel)):
new_state_dict = OrderedDict()
for k, v in resume['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(resume['state_dict'])
# # PREDICTIONS # #
# Return the predicted classes as a numpy array of shape (nb_exemple, 1)
y_pred = model(data.to(device))
y_pred = y_pred.cpu()
_, y_pred = y_pred.max(1)
final_pred = dataset.map_labels[y_pred]
return final_pred
if __name__ == "__main__":
# Put your group name here
group_name = "b3phot5"
# model_path should be the absolute path on shared disk to your best model.
# You need to ensure that they are available to evaluators on Helios.
model_path = "/rap/jvb-000-aa/COURS2019/etudiants/submissions/b3phot5/model/transformer_net.pt"
#########################
# DO NOT MODIFY - BEGIN #
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset_dir", type=str, default="/rap/jvb-000-aa/COURS2019/etudiants/data/horoma/",
help="Absolute path to the dataset directory.")
parser.add_argument("-s", "--dataset_split", type=str, choices=['valid', 'test', 'train'],
default="valid", help="Which split of the dataset should be loaded from `dataset_dir`.")
parser.add_argument("-r", "--results_dir", type=str, default="./",
help="Absolute path to where the predictions will be saved.")
args = parser.parse_args()
# Arguments validation
if group_name is "b1phutN":
print("'group_name' is not set.\nExiting ...")
exit(1)
if model_path is None or not os.path.exists(model_path):
print("'model_path' ({}) does not exists or unreachable.\nExiting ...".format(model_path))
exit(1)
if args.dataset_dir is None or not os.path.exists(args.dataset_dir):
print("'dataset_dir' does not exists or unreachable..\nExiting ...")
exit(1)
y_pred = eval_model(model_path, args.dataset_dir, args.dataset_split)
assert type(y_pred) is np.ndarray, "Return a numpy array"
assert len(y_pred.shape) == 1, "Make sure ndim=1 for y_pred"
results_fname = os.path.join(args.results_dir, "{}_pred_{}.txt".format(group_name, args.dataset_split))
print('\nSaving results to ({})'.format(results_fname))
np.savetxt(results_fname, y_pred, fmt='%s')
# DO NOT MODIFY - END #
#######################
<file_sep>import os
import sys
import numpy as np
import torch
from PIL import Image
from tempfile import mkdtemp
from torchvision.transforms import functional
from torch.utils import data
from torch.utils.data import Dataset
class LocalHoromaDataset(Dataset):
"""
The data is not loaded from a file but given as parameters instead
This dataset is for Damic pretraining of the convolution clustering network
Since this pretraining is supervised, this dataset provide targets
"""
def __init__(self, data, targets):
"""
Args:
data : numpy array (number_of_sample, 3, 32, 32)
targets : numpy array (number_of_sample, 1)
"""
self.data = data
self.targets = targets
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return torch.Tensor(self.data[index]) / 255, torch.Tensor([self.targets[index]])
class OriginalHoromaDataset(Dataset):
def __init__(self, data_dir, split="train", subset=None, skip=0, flattened=False, transform=None):
"""
Args:
data_dir: Path to the directory containing the samples.
split: Which split to use. [train, valid, train_labeled, train_overlapped, train_labeled_overlapped, valid_overlapped]
subset: How many elements will be used. Default: all.
skip: How many element to skip before taking the subset.
flattened: If True return the images in a flatten format.
"""
nb_channels = 3
height = 32
width = 32
datatype = "uint8"
if split == "train":
self.nb_exemples = 152000
self.labeled = False
elif split == "train_labeled":
self.nb_exemples = 228
self.labeled = True
elif split == "valid":
self.nb_exemples = 252
self.labeled = True
elif split == "test":
self.nb_exemples = 498
self.labeled = True
elif split == "train_overlapped":
self.nb_exemples = 548720
self.labeled = False
elif split == "train_labeled_overlapped":
self.nb_exemples = 635
self.labeled = True
elif split == "valid_overlapped":
self.nb_exemples = 696
self.labeled = True
else:
raise (
"Dataset: Invalid split. Must be [train, valid, test, train_overlapped, valid_overlapped]")
filename_x = os.path.join(data_dir, "{}_x.dat".format(split))
filename_y = os.path.join(data_dir, "{}_y.txt".format(split))
self.targets = None
if os.path.exists(filename_y):
pre_targets = np.loadtxt(filename_y, 'U2')
if subset is None:
pre_targets = pre_targets[skip: None]
else:
pre_targets = pre_targets[skip: skip + subset]
self.map_labels = np.unique(pre_targets)
self.targets = np.asarray(
[np.where(self.map_labels == t)[0][0] for t in pre_targets])
self.data = np.memmap(filename_x, dtype=datatype, mode="r", shape=(
self.nb_exemples, nb_channels, height, width))
if subset is None:
self.data = self.data[skip: None]
else:
self.data = self.data[skip: skip + subset]
if flattened:
self.data = self.data.reshape(len(self.data), -1)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if self.targets is not None:
if self.transform:
return torch.Tensor(self.transform(self.data[index])) / 255, torch.Tensor([self.targets[index]])
else:
return torch.Tensor(self.data[index]) / 255, torch.Tensor([self.targets[index]])
if self.transform:
return torch.Tensor(self.transform(self.data[index])) / 255
else:
return torch.Tensor(self.data[index]) / 255
class HoromaDataset(Dataset):
def __init__(self, data_dir, split="train", subset=None, skip=0, flattened=False, transform=None):
"""
Args:
data_dir: Path to the directory containing the samples.
split: Which split to use. [train, valid, test]
subset: How many elements will be used. Default: all.
skip: How many element to skip before taking the subset.
flattened: If True return the images in a flatten format.
"""
self.nb_channels = 3
self.height = 32
self.width = 32
self.datatype = "uint8"
self.data_dir = data_dir
self.str_labels = []
self.str_to_id = []
self.id_to_str = []
self.train_labeled_split = ""
self.splits = ["train", "train_labeled", "valid", "train_overlapped",
"train_labeled_overlapped", "valid_overlapped"]
self.splits_with_all = {"train_all": ["train", "train_labeled"],
"train_overlapped_all": ["train_overlapped", "train_labeled_overlapped"],
"valid_all": ["valid", "valid_overlapped"],
"train_unlabeled_all": ["train", "train_overlapped"],
"train_labeled_all": ["train_labeled", "train_labeled_overlapped"]}
self.nb_examples = {"train": 152000,
"train_labeled": 228,
"valid": 252,
"train_overlapped": 548720,
"train_labeled_overlapped": 635,
"valid_overlapped": 696,
"train_all": 152228,
"train_overlapped_all": 549355,
"valid_all": 948,
"train_unlabeled_all": 700720,
"train_labeled_all": 863}
filename_x = os.path.join(data_dir, "{}_x.dat".format(split))
if split in self.splits:
self.data = np.memmap(
filename_x,
dtype=self.datatype,
mode="r",
shape=(self.nb_examples[split], self.height,
self.width, self.nb_channels)
)
elif split in self.splits_with_all:
self.data = self.merge_memmap(self.splits_with_all[split][
0], self.splits_with_all[split][1], split)
elif split == "test":
self.nb_examples = 498
self.data = self.data.reshape(
len(self.data), self.nb_channels, self.height, self.width)
self.targets = None
if split in ["train_labeled", "train_labeled_overlapped", "valid", "valid_overlapped"]:
filename_y = os.path.join(
data_dir, "{}_y.txt".format(split))
self.targets = self.get_targets([filename_y])
elif split in ["valid_all", "train_labeled_all"]:
filename_y = [os.path.join(data_dir, "{}_y.txt".format(
split_i)) for split_i in self.splits_with_all[split]]
self.targets = self.get_targets(filename_y)
if flattened:
self.data = self.data.reshape(len(self.data), -1)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if self.transform:
img = torch.Tensor(self.transform(self.data[index])) / 255
else:
img = torch.Tensor(self.data[index]) / 255
if self.targets is None:
return img
else:
label = torch.Tensor([self.targets[index]])
return img, label
def merge_memmap(self, split_1, split_2, new_split):
n_1 = self.nb_examples[split_1]
n_2 = self.nb_examples[split_2]
filename_1 = os.path.join(self.data_dir, "{}_x.dat".format(split_1))
data1 = np.memmap(
filename_1,
dtype=self.datatype,
mode="r",
shape=(n_1, self.height, self.width, self.nb_channels)
)
filename_2 = os.path.join(self.data_dir, "{}_x.dat".format(split_2))
data2 = np.memmap(
filename_2,
dtype=self.datatype,
mode="r",
shape=(n_2, self.height, self.width, self.nb_channels)
)
filename = os.path.join(self.data_dir, mkdtemp(), new_split + '.dat')
data = np.memmap(
filename,
dtype=self.datatype,
mode='w+',
shape=(n_1 + n_2, self.height, self.width, self.nb_channels), order='C')
data[:n_1] = data1
data[n_1:] = data2
return data
def get_targets(self, list_of_filename):
targets = []
if os.path.exists(list_of_filename[0]):
pre_targets = np.loadtxt(list_of_filename[0], 'U2')
self.str_labels = np.unique(pre_targets)
self.str_to_id = dict(
zip(self.str_labels, range(len(self.str_labels))))
self.id_to_str = dict((v, k)
for k, v in self.str_to_id.items())
for filename_y in list_of_filename:
pre_targets = np.loadtxt(filename_y, 'U2')
targets += [self.str_to_id[_str]
for _str in pre_targets if _str in self.str_to_id]
return np.asarray(targets)
if __name__ == "__main__":
valid = HoromaDataset(
data_dir='./../data/horoma',
split='valid'
)
train_labeled = HoromaDataset(
data_dir='./../data/horoma',
split='train_labeled'
)
print(len(valid))
<file_sep>import torch
import time
import datetime
import numpy as np
from models.encoders import CVAE, ConvAE
from models.clustering import KMeansClustering
from utils.utils import assign_labels_to_clusters, eval_model_predictions, compute_metrics, load_original_horoma_datasets
from utils.model_utils import encode_dataset, train_network
from torch.utils.data import DataLoader
from utils.dataset import LocalHoromaDataset
def get_class_prediction(encoding_model, clustering_model, encoded_unlabeled_train, unlabeled_train, labeled_train,
labeled_valid, batch_size, device, experiment):
"""
Apply a clustering model algorithm on an embedded space provided by the encoding_model
Return a class prediction for each sample within encoded_unlabeled_train
:param encoding_model: model used to do the encoding
:param clustering_model: model use to do the clustering
:param encoded_unlabeled_train: dataset with the unlabeled samples encoded
:param unlabeled_train: dataset with the unlabeled samples
:param labeled_train: dataset with labeled samples for training
:param labeled_valid: dataset with labeled samples for validation
:param batch_size: batch size
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: numpy array of the unlabeled data, numpy array with the predict class for each sample
"""
print("Start encoding of labeled dataset...")
encoded_labeled_train = encode_dataset(encoding_model, labeled_train, batch_size, device)
print("Done")
print("Start encoding of labeled dataset...")
encoded_labeled_valid = encode_dataset(encoding_model, labeled_valid, batch_size, device)
print("Done")
print("Start kmean training on unlabeled...")
clustering_model.train(encoded_unlabeled_train)
print("Done")
cluster_labels = assign_labels_to_clusters(clustering_model, encoded_labeled_train, labeled_train.targets)
_, accuracy, f1 = eval_model_predictions(
clustering_model, encoded_labeled_valid, labeled_valid.targets, cluster_labels)
experiment.log_metric('accuracy', accuracy)
experiment.log_metric('f1-score', f1)
unlabeled_target_pred_by_cluster = clustering_model.predict_cluster(encoded_unlabeled_train)
tensor_unla_train = torch.Tensor(unlabeled_train.data)
tensor_unla_target_pred_by_cluster = torch.Tensor(unlabeled_target_pred_by_cluster)
numpy_unla_train = tensor_unla_train.cpu().numpy()
numpy_unla_target_pred_by_cluster = tensor_unla_target_pred_by_cluster.cpu().numpy()
return numpy_unla_train, numpy_unla_target_pred_by_cluster
def _get_encoding_model(encoding_model_name, latent_dim, device, seed):
"""
Train an encoding model
:param encoding_model_name: name of the encoding model being trained
:param latent_dim: dimension of the encoded samples
:param device: cpu or cuda
:param seed
:return: a trained encoding model of type encoding_model_name
"""
if encoding_model_name == "cvae":
now = datetime.datetime.now()
pth_filename = "autoencoder_pretrain_" + str(now.month) + "_" + \
str(now.day) + "_" + str(now.hour) + "_" + str(now.minute)
encoding_model = CVAE(latent_dim=latent_dim,
folder_save_model="../damic_models/",
pth_filename_save_model=pth_filename).to(device)
elif encoding_model_name == "convae":
encoding_model = ConvAE(latent_dim=latent_dim).to(device)
else:
print('No encoding model specified. Using CVAE.')
encoding_model = CVAE(seed)
return encoding_model
def _get_clustering_model(n_clusters, seed):
return KMeansClustering(n_clusters, seed)
def _initialize_damic_conv_clustering_net_weights(damic_model, conv_net_pretrain_config, numpy_unla_train,
numpy_unla_target_pred_by_cluster, labeled_train_and_valid, device, experiment):
"""
Part of the pretraining for Damic is to initialize the weights for the convolutional clustering network.
:param damic_model
:param conv_net_pretrain_config: dictionary of configuration for the conv net pretraining
:param numpy_unla_train: numpy array of unlabeled samples
:param numpy_unla_target_pred_by_cluster: numpy array of targets for the unlabeled samples
:param labeled_train_and_valid: dataset composed of the labeled trained and validation set
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: a damic_model with his convolutional clustering network weights initialized
"""
print("Start pre-training of clustering convolutional model...")
lr = conv_net_pretrain_config["lr"]
batch_size = conv_net_pretrain_config["batch_size"]
n_epoch = conv_net_pretrain_config["n_epochs"]
pretrain_dataset_with_label = LocalHoromaDataset(numpy_unla_train, numpy_unla_target_pred_by_cluster)
pretrain_dataset_predicted_label_loader = DataLoader(pretrain_dataset_with_label, batch_size=batch_size)
valid_and_train_real_label_loader = DataLoader(labeled_train_and_valid, batch_size=batch_size)
optimizer = torch.optim.Adam(damic_model.parameters(), lr=lr)
print("Done")
# Used to save the convolutional network model at the end of the pre training
now = datetime.datetime.now()
pth_filename = "conv_net_pretrain_" + str(now.month) + "_" + str(now.day) + \
"_" + str(now.hour) + "_" + str(now.minute)
return train_network(damic_model, pretrain_dataset_predicted_label_loader, valid_and_train_real_label_loader, optimizer, n_epoch,
device, experiment, train_classifier=True, folder_save_model="../damic_models/", pth_filename_save_model=pth_filename)
def _get_class_predictions_for_damic_pretraining(datapath, train_subset, overlapped, ae_pretrain_config, device, experiment, seed):
"""
For pre-training purposes, Damic needs a first class prediction on the unlabeled samples
:param datapah
:param train_subset: if we use a subset of the samples
:param overlapped: if True, we use the overlapped dataset
:param ae_pretrain_config: dictionary of configuration for this step
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: numpy array of unlabeled samples, numpy array of target for each sample, dataset of labeled train+valid samples
"""
unlabeled_train, labeled_train, labeled_valid, labeled_train_and_valid = load_original_horoma_datasets(datapath,
train_subset=train_subset,
overlapped=overlapped)
print("Shape of unlabeled training set: ", unlabeled_train.data.shape)
print("Shape of labeled training set: ", labeled_train.data.shape)
print("Shape of labeled valid set: ", labeled_valid.data.shape)
print("Shape of labeled train and valid set: ", labeled_train_and_valid.data.shape)
latent_dim = ae_pretrain_config["latent_dim"]
encoding_model = _get_encoding_model(ae_pretrain_config["enc_model"], latent_dim, device, seed)
n_clusters = ae_pretrain_config["n_clusters"]
clustering_model = _get_clustering_model(n_clusters, seed)
batch_size = ae_pretrain_config['batch_size']
n_epochs = ae_pretrain_config['n_epochs']
lr = ae_pretrain_config['lr']
if ae_pretrain_config['train_encoder']:
print("Start training of pre-train auto-encoder...")
encoded_unlabeled_train, encoding_model = encoding_model.fit(data=unlabeled_train, batch_size=batch_size, n_epochs=n_epochs, lr=lr,
device=device, experiment=experiment)
print("Done")
else:
# Load encoding model and apply encoding
print("Load pre-train auto-encoder...")
path_to_model = ae_pretrain_config["encoder_path"]
encoding_model.load_state_dict(torch.load(path_to_model)["model"])
print("Done")
print("Start encoding of unlabeled dataset...")
encoded_unlabeled_train = encode_dataset(encoding_model, unlabeled_train, batch_size, device)
print("Done")
array_unlabeled_samples, array_targets = \
get_class_prediction(encoding_model, clustering_model, encoded_unlabeled_train, unlabeled_train,
labeled_train, labeled_valid,
batch_size, device, experiment)
return array_unlabeled_samples, array_targets, labeled_train_and_valid
def _initialize_damic_autoencoders_weights(damic_model, damic_autoencoders_pretrain_config, numpy_unla_train,
numpy_unla_target_pred_by_cluster, device, experiment):
"""
Part of the pretraining for Damic is to initialize the weights for each autoencoders.
:param damic_model
:param damic_autoencoders_pretrain_config: dictionary of configuration for this step
:param numpy_unla_train: numpy array of unlabeled samples
:param numpy_unla_target_pred_by_cluster: numpy array of targets for the unlabeled samples
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: a damic_model with each autoencoders weights initialized
"""
lr = damic_autoencoders_pretrain_config["lr"]
n_epochs = damic_autoencoders_pretrain_config["n_epochs"]
batch_size = damic_autoencoders_pretrain_config["batch_size"]
now = datetime.datetime.now()
pth_filename = "autoencoders_pretrain_" + str(now.month) + "_" + \
str(now.day) + "_" + str(now.hour) + "_" + str(now.minute)
# Didn't do a loop because it seems encapsulating the auto encoders into
# an array led into issues with optimizing the parameters
_, damic_model.ae1 = damic_model.ae1.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [0]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae2 = damic_model.ae2.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [1]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae3 = damic_model.ae3.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [2]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae4 = damic_model.ae4.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [3]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae5 = damic_model.ae5.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [4]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae6 = damic_model.ae6.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [5]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae7 = damic_model.ae7.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [6]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae8 = damic_model.ae8.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [7]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae9 = damic_model.ae9.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [8]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae10 = damic_model.ae10.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [9]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae11 = damic_model.ae11.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [10]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae12 = damic_model.ae12.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [11]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae13 = damic_model.ae13.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [12]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae14 = damic_model.ae14.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [13]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae15 = damic_model.ae15.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [14]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae16 = damic_model.ae16.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [15]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
_, damic_model.ae17 = damic_model.ae17.fit(data=numpy_unla_train[np.where(np.isin(numpy_unla_target_pred_by_cluster, [16]))],
batch_size=batch_size, n_epochs=n_epochs, lr=lr, device=device, experiment=experiment)
now = datetime.datetime.now()
pth_filename = "../damic_models/autoencoders_pretrain_" + \
str(now.month) + "_" + str(now.day) + "_" + str(now.hour) + "_" + str(now.minute)
torch.save({
"model": damic_model.state_dict(),
}, pth_filename + ".pth")
return damic_model
def execute_damic_pre_training(datapath, damic_model, train_subset, overlapped, ae_pretrain_config, conv_net_pretrain_config,
damic_autoencoders_pretrain_config, experiment, seed):
"""
See 'Deep clustering based on a mixture of autoencoders' paper
DAMIC needs a pre-training to initialize both weights for the autoencoders and the
convolutional clustering network.
At first we train one autoencoder (encoding_model) on all the unlabeled data available
Then, we apply one clustering model (clustering_model) to label those unlabeled data
After that, we train each autoencoder of DAMIC (1 encoder for each cluster/targets) only
on the data of the same class. Ex: autoencoder at index 0 will be only trained on data
labeled by the clustering model as being of class 0.
Finally we initialize the weights of the convolutional clustering network of DAMIC by using
CrossEntropyLoss on the labeled data.
:param datapath: datapath for the data to do the pre training on
:param damic_model: the model we want to do the pre-training on
:param train_subset: how many data from the dataset we should do the training on
:param overlapped: if True, we use the overlapped datasets
:param ae_pretrain_config: configuration dictionary for the auto-encoder used during pre-training
:param conv_net_pretrain_config: configuration dictionary for the convolutional clustering network for pre-tranining
:param damic_autoencoders_pretrain_config: configuration dictionary for all the autoencoders part of DAMIC for pre-training
:param experiment: comet-ml experiment to save training results
:seed: seed for reproducible results
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("== Start DAMIC pre-training ...")
numpy_unla_train, numpy_unla_target_pred_by_cluster, labeled_train_and_valid = \
_get_class_predictions_for_damic_pretraining(datapath, train_subset, overlapped,
ae_pretrain_config, device, experiment, seed)
# Use the k-means clustering to initialize the clustering network parameters
# If no model path is specified for the pretraining, we train it from scratch, otherwise we load it
conv_net_pretrain_path = conv_net_pretrain_config["conv_net_pretrain_path"]
if conv_net_pretrain_path == "":
damic_model = _initialize_damic_conv_clustering_net_weights(damic_model, conv_net_pretrain_config, numpy_unla_train,
numpy_unla_target_pred_by_cluster, labeled_train_and_valid, device,
experiment)
else:
damic_model.load_state_dict(torch.load(conv_net_pretrain_path)["model"])
# Train each auto encoder of each cluster on his own data class
autoencoders_pretrain_path = damic_autoencoders_pretrain_config["autoencoders_pretrain_path"]
if autoencoders_pretrain_path == "":
damic_model = _initialize_damic_autoencoders_weights(damic_model, damic_autoencoders_pretrain_config, numpy_unla_train,
numpy_unla_target_pred_by_cluster, device, experiment)
else:
damic_model.load_state_dict(torch.load(autoencoders_pretrain_path)["model"])
print("== DAMIC Pre-training done!")
return damic_model, numpy_unla_train, numpy_unla_target_pred_by_cluster, labeled_train_and_valid
def execute_damic_training(damic_model, configuration, numpy_unla_train, numpy_unla_target_pred_by_cluster, labeled_train_and_valid,
device, experiment):
"""
See 'Deep clustering based on a mixture of autoencoders' paper
We simultaneously train all the auto-encoders part of DAMIC as well as the Convolutional clustering network.
:param damic_model: the model we want to do the pre-training on
:param configuration: dictionnary containing all the keys/values part of DAMIC from a config file
:param numpy_unla_train: numpy array of the unlabeled training dataset
:param numpy_unla_target_pred_by_cluster: numpy array with the targets for each sample of the unlabeled traning dataset
:param labeled_train_and_valid: dataset composed of the labeled trained and validation set
:param device: cuda (training is done on gpu) or cpu
:param experiment: comet-ml experiment to save training results
:return: trained damic_model
"""
print("== Start DAMIC training ...!")
damic_train_config = configuration['damic_train']
lr = damic_train_config["lr"]
n_epochs = damic_train_config["n_epochs"]
batch_size = damic_train_config["batch_size"]
damic_train_path = damic_train_config["damic_train_path"]
if damic_train_path != "":
damic_model.load_state_dict(torch.load(damic_train_path)["model"])
pretrain_dataset_with_label = LocalHoromaDataset(numpy_unla_train, numpy_unla_target_pred_by_cluster)
pretrain_dataset_with_label_loader = DataLoader(pretrain_dataset_with_label, batch_size=batch_size)
valid_and_train_real_label_loader = DataLoader(labeled_train_and_valid, batch_size=batch_size)
damic_model_parameters = damic_model.parameters()
optimizer = torch.optim.Adam(damic_model_parameters, lr=lr)
now = datetime.datetime.now()
pth_filename = "damic_train_" + str(now.month) + "_" + str(now.day) + "_" + str(now.hour) + "_" + str(now.minute)
damic_model = train_network(damic_model,
pretrain_dataset_with_label_loader,
valid_and_train_real_label_loader,
optimizer,
n_epochs,
device,
experiment,
train_classifier=False,
train_damic=True, folder_save_model="../damic_models/", pth_filename_save_model=pth_filename)
print("== DAMIC training done!")
return damic_model
def get_accuracy_f1_scores_from_damic_model(damic_model, labeled_train_and_valid, device):
"""
Predict labels and compare to true labels to compute the accuracy and F1 score.
:param damic_model: the model we want to do the pre-training on
:param labeled_train_and_valid: dataset composed of the labeled trained and validation set
:param device: cuda (training is done on gpu) or cpu
:return: predictions made by damic, accuracy and f1 score
"""
print("Evaluating DAMIC model ...")
start_time = time()
valid_and_train_real_label_loader = DataLoader(labeled_train_and_valid, batch_size=len(labeled_train_and_valid))
with torch.no_grad():
for inputs, labels in valid_and_train_real_label_loader:
inputs = inputs.to(device)
labels = labels.long()
labels = labels.squeeze()
print("Accuracy predictions")
damic_predictions = damic_model(inputs)
_, damic_predictions = damic_predictions.max(1)
print("DAMIC predictions results")
print(damic_predictions)
print("Expected results")
print(labels)
accuracy, f1 = compute_metrics(labels, damic_predictions.cpu())
print(
"Done in {:.2f} sec | Accuracy: {:.2f} - F1: {:.2f}".format(time() - start_time, accuracy * 100, f1 * 100))
return damic_predictions, accuracy, f1
<file_sep>import os
import numpy as np
import math
import torch
from torch.optim import Optimizer
from torch.utils.data import Dataset, Subset
from torchvision.transforms import functional
import math
from bisect import bisect_right, bisect_left
import torch
import numpy as np
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
class OAdam(Optimizer):
"""Implements optimistic Adam algorithm.
It has been proposed in `Training GANs with Optimism`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Training GANs with Optimism:
https://arxiv.org/abs/1711.00141
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(OAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad.
# values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * \
math.sqrt(bias_correction2) / bias_correction1
# Optimistic update :)
p.data.addcdiv_(step_size, exp_avg,
exp_avg_sq.sqrt().add(group['eps']))
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till
# now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p.data.addcdiv_(-2.0 * step_size, exp_avg, denom)
return loss
class OptMirrorAdam(Optimizer):
"""Implements Optimistic Adam algorithm. Built on official implementation of Adam by pytorch.
See "Optimistic Mirror Descent in Saddle-Point Problems: Gointh the Extra (-Gradient) Mile"
double blind review, paper: https://openreview.net/pdf?id=Bkg8jjC9KQ
Standard Adam
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, extragradient=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad, extragradient=extragradient)
super(OptMirrorAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
# Do not allow training with out closure
if closure is not None:
loss = closure()
# Create a copy of the initial parameters
param_groups_copy = self.param_groups.copy()
# ############### First update of gradients ###########################
# ######################################################################################
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
extragradient = group['extragradient']
state = self.state[p]
# @@@@@@@@@@@@@@@ State initialization @@@@@@@@@@@@@@@@@@@@@@@@
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg_1'] = torch.zeros_like(p.data)
state['exp_avg_2'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq_1'] = torch.zeros_like(p.data)
state['exp_avg_sq_2'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad.
# values
state['max_exp_avg_sq_1'] = torch.zeros_like(p.data)
state['max_exp_avg_sq_2'] = torch.zeros_like(p.data)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
exp_avg1, exp_avg_sq1 = state[
'exp_avg_1'], state['exp_avg_sq_1']
if amsgrad:
max_exp_avg_sq1 = state['max_exp_avg_sq_1']
beta1, beta2 = group['betas']
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Step will be updated once
state['step'] += 1
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg1.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq1.mul_(beta2).addcmul_(1 - beta2, grad, grad)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# *****************************************************
# Additional steps, to get bias corrected running means
exp_avg1 = torch.div(exp_avg1, bias_correction1)
exp_avg_sq1 = torch.div(exp_avg_sq1, bias_correction2)
# *****************************************************
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till
# now
torch.max(max_exp_avg_sq1, exp_avg_sq1,
out=max_exp_avg_sq1)
# Use the max. for normalizing running avg. of gradient
denom1 = max_exp_avg_sq1.sqrt().add_(group['eps'])
else:
denom1 = exp_avg_sq1.sqrt().add_(group['eps'])
step_size1 = group['lr'] * \
math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size1, exp_avg1, denom1)
# Perform additional backward step to calculate stochastic gradient -
# WATING STATE
if extragradient:
if closure is not None:
loss = closure()
# ############### Second evaluation of gradient step ##################
# ######################################################################################
for (group, group_copy) in zip(self.param_groups, param_groups_copy):
for (p, p_copy) in zip(group['params'], group_copy['params']):
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
exp_avg2, exp_avg_sq2 = state[
'exp_avg_2'], state['exp_avg_sq_2']
if amsgrad:
max_exp_avg_sq2 = state['max_exp_avg_sq_2']
beta1, beta2 = group['betas']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg2.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq2.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# *****************************************************
# Additional steps, to get bias corrected running means
exp_avg2 = torch.div(exp_avg2, bias_correction1)
exp_avg_sq2 = torch.div(exp_avg_sq2, bias_correction2)
# *****************************************************
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till
# now
torch.max(max_exp_avg_sq2, exp_avg_sq2,
out=max_exp_avg_sq2)
# Use the max. for normalizing running avg. of gradient
denom2 = max_exp_avg_sq2.sqrt().add_(group['eps'])
else:
denom2 = exp_avg_sq2.sqrt().add_(group['eps'])
step_size2 = group['lr'] * \
math.sqrt(bias_correction2) / bias_correction1
p_copy.data.addcdiv_(-step_size2, exp_avg2, denom2)
p = p_copy # pass parameters to the initial weight variables.
return loss
class CyclicCosAnnealingLR(_LRScheduler):
"""
Implements reset on milestones inspired from CosineAnnealingLR pytorch
Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch > last set milestone, lr is automatically set to \eta_{min}
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list of ints): List of epoch indices. Must be increasing.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, milestones, eta_min=0, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.eta_min = eta_min
self.milestones = milestones
super(CyclicCosAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch >= self.milestones[-1]:
return [self.eta_min for base_lr in self.base_lrs]
idx = bisect_right(self.milestones, self.last_epoch)
left_barrier = 0 if idx == 0 else self.milestones[idx - 1]
right_barrier = self.milestones[idx]
width = right_barrier - left_barrier
curr_pos = self.last_epoch - left_barrier
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * curr_pos / width)) / 2
for base_lr in self.base_lrs]
<file_sep>import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from collections import OrderedDict
cfg = {
'AllConv13': [128, 128, 128, 'M', 256, 256, 256, 'M', 512, 256, 128, 'A'],
}
# Some utils class
class Reshape(nn.Module):
"""
Flatten the output of the convolutional layer
Parameters
----------
Input shape: (N, C * W * H)
Output shape: (N, C, W, H)
"""
def __init__(self, shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self._shape = shape
def forward(self, x):
return x.reshape(x.size()[0], self._shape[0], self._shape[1], self._shape[2])
class BiasAdder(nn.Module):
"""
Add a bias into the input
"""
def __init__(self, channels, **kwargs):
super(BiasAdder, self).__init__(**kwargs)
self.bias = nn.Parameter(th.Tensor(1, channels, 1, 1))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, x):
return x + self.bias
class Flatten(nn.Module):
"""
Flatten 4D tensor into 2D tensor
"""
def forward(self, x):
return x.view(x.size(0), -1)
class Upaverage(nn.Module):
"""
Upsample to reverse the avg pooling layer
"""
def __init__(self, scale_factor, **kwargs):
super(Upaverage, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.upsample_layer = nn.Upsample(
scale_factor=self.scale_factor, mode='nearest')
def forward(self, x):
return self.upsample_layer(x) * (1. / self.scale_factor)**2
def make_one_hot(labels, C=2):
"""
Converts an integer label torch.autograd.Variable to a one-hot Variable.
"""
target = th.eye(C)[labels.data]
target = target.to(labels.get_device())
return target
# Main NRM class
class NRM(nn.Module):
def __init__(self, net_name, batch_size, num_class, use_bias=False, use_bn=False, do_topdown=False, do_pn=False, do_bnmm=False):
super(NRM, self).__init__()
self.num_class = num_class
self.do_topdown = do_topdown
self.do_pn = do_pn
self.do_bnmm = do_bnmm
self.use_bn = use_bn
self.use_bias = use_bias
self.batch_size = batch_size
# create:
# feature extractor in the forward cnn step: self.features
# corresponding layer inm the top-down reconstruction nrm step: layers_nrm
# instance norm used in the top-down reconstruction nrm step: insnorms_nrm
# instance norm used in the forward cnn step: insnorms_cnn
self.features, layers_nrm, insnorms_nrm, insnorms_cnn = self._make_layers(
cfg[net_name], use_bias, use_bn, self.do_topdown)
# create the classifer in the forward cnn step
conv_layer = nn.Conv2d(in_channels=cfg[
net_name][-2], out_channels=self.num_class, kernel_size=(1, 1), bias=True)
flatten_layer = Flatten()
self.classifier = nn.Sequential(OrderedDict(
[('conv', conv_layer), ('flatten', flatten_layer)]))
# create the nrm
if self.do_topdown:
# add layers corresponding to the classifer in the forward step
convtd_layer = nn.ConvTranspose2d(out_channels=cfg[
net_name][-2], in_channels=self.num_class, kernel_size=(1, 1), stride=(1, 1), bias=False)
convtd_layer.weight.data = conv_layer.weight.data
layers_nrm += [('convtd', convtd_layer),
('reshape', Reshape(shape=(self.num_class, 1, 1)))]
self.nrm = nn.Sequential(OrderedDict(layers_nrm[::-1]))
# if use path normalization, then also use instance normalization
if self.do_pn:
self.insnorms_nrm = nn.Sequential(
OrderedDict(insnorms_nrm[::-1]))
self.insnorms_cnn = nn.Sequential(OrderedDict(insnorms_cnn))
def forward(self, x, y=None):
ahat = []
that = []
bcnn = []
apn = []
meancnn = []
varcnn = []
xbias = th.zeros([1, x.shape[1], x.shape[2], x.shape[3]],
device=x.get_device()) if self.do_pn else []
insnormcnn_indx = 0
# if do top-down reconstruction, we need to keep track of relu state, maxpool state,
# mean and var of the activations, and the bias terms in the forward
# cnn step
if self.do_topdown:
for name, layer in self.features.named_children():
# keep track of the maxpool state
if name.find('pool') != -1 and not name.find('average') != -1:
F.interpolate(layer(x), scale_factor=2, mode='nearest')
that.append(
th.gt(x - F.interpolate(layer(x), scale_factor=2, mode='nearest'), 0))
x = layer(x)
if self.do_pn:
xbias = layer(xbias)
else:
x = layer(x)
if self.do_pn: # get the forward results to compute the path normalization later
if name.find('batchnorm') != -1:
xbias = self.insnorms_cnn[insnormcnn_indx](xbias)
insnormcnn_indx += 1
else:
xbias = layer(xbias)
if name.find('relu') != -1: # keep track of the relu state
ahat.append(th.gt(x, 0) + th.le(x, 0) * 0.1)
if self.do_pn:
apn.append(th.gt(xbias, 0) + th.le(xbias, 0) * 0.1)
if self.use_bn:
# keep track of the mean and var of the activations
if name.find('conv') != -1:
meancnn.append(
th.mean(x, dim=(0, 2, 3), keepdim=True))
varcnn.append(th.mean(
(x - th.mean(x, dim=(0, 2, 3), keepdim=True))**2, dim=(0, 2, 3), keepdim=True))
if self.use_bias: # keep track of the bias terms when adding bias
if name.find('bias') != -1:
bcnn.append(layer.bias)
else: # otherwise, keep track of the bias terms inside the batch norm
if name.find('batchnorm') != -1:
bcnn.append(layer.bias)
else:
if self.use_bias:
if name.find('conv') != -1:
bcnn.append(layer.bias)
# reverse the order of the parameters/variables that we keep track
# to use in the top-down reconstruction nrm step since nrm is the
# reverse of cnn
ahat = ahat[::-1]
that = that[::-1]
bcnn = bcnn[::-1]
apn = apn[::-1]
meancnn = meancnn[::-1]
varcnn = varcnn[::-1]
else:
x = self.features(x)
# send the features into the classifier
z = self.classifier(x)
# do reconstruction via nrm
# xhat: the reconstruction image
# loss_pn: path normalization loss
# loss_bnmm: batch norm moment matching loss
if self.do_topdown:
xhat, _, loss_pn, loss_bnmm = self.topdown(self.nrm, make_one_hot(y, self.num_class), ahat, that, bcnn, th.ones([1, z.size()[1]], device=z.get_device()), apn, meancnn, varcnn) if y is not None else self.topdown(
self.nrm, make_one_hot(th.argmax(z.detach(), dim=1), self.num_class), ahat, that, bcnn, th.ones([1, z.size()[1]], device=z.get_device()), apn, meancnn, varcnn)
else:
xhat = None
loss_pn = None
loss_bnmm = None
return [z, xhat, loss_pn, loss_bnmm]
def _make_layers(self, cfg, use_bias, use_bn, do_topdown):
layers = []
layers_nrm = []
insnorms_nrm = []
insnorms_cnn = []
in_channels = 3
for i, x in enumerate(cfg):
# if max pooling layer, then add max pooling and dropout into the
# cnn. Add upsample layers, dropout, batchnorm, and instance norm -
# for path normaliztion - into the nrm.
if x == 'M':
layers += [('pool%i' % i, nn.MaxPool2d(2, stride=2)),
('dropout%i' % i, nn.Dropout(0.5))]
if do_topdown:
if use_bn:
layers_nrm += [('upsample%i' % i, nn.Upsample(scale_factor=2, mode='nearest')),
('dropout%i' % i, nn.Dropout(0.5)), ('batchnorm%i' % i, nn.BatchNorm2d(cfg[i - 1]))]
insnorms_nrm += [('instancenormtd%i' %
i, nn.InstanceNorm2d(cfg[i - 1], affine=True))]
else:
layers_nrm += [('upsample%i' % i, nn.Upsample(scale_factor=2,
mode='nearest')), ('dropout%i' % i, nn.Dropout(0.5))]
# if avg pooling layer, then add average pooling layer into the
# cnn. Add up average layers, batchnorm and instance norm - for
# path normaliztion - into the nrm.
elif x == 'A':
layers += [('average%i' % i, nn.AvgPool2d(6, stride=1))]
if do_topdown:
if use_bn:
layers_nrm += [('upaverage%i' % i, Upaverage(scale_factor=6)),
('batchnorm%i' % i, nn.BatchNorm2d(cfg[i - 1]))]
insnorms_nrm += [('instancenormtd%i' %
i, nn.InstanceNorm2d(cfg[i - 1], affine=True))]
else:
layers_nrm += [('upaverage%i' %
i, Upaverage(scale_factor=6))]
else: # add other layers into the cnn and the nrm
padding_cnn = (0, 0) if x == 512 else (1, 1)
padding_nrm = (0, 0) if x == 512 else (1, 1)
if use_bn:
conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=x, kernel_size=(
3, 3), padding=padding_cnn, bias=False)
if use_bias:
layers += [('conv%i' % i, conv_layer),
('batchnorm%i' % i, nn.BatchNorm2d(x)),
('bias%i' % i, BiasAdder(channels=x)),
('relu%i' % i, nn.LeakyReLU(0.1))]
else:
layers += [('conv%i' % i, conv_layer),
('batchnorm%i' % i, nn.BatchNorm2d(x)),
('relu%i' % i, nn.LeakyReLU(0.1))]
insnorms_cnn += [('instancenormcnn%i' %
i, nn.InstanceNorm2d(x, affine=True))]
if do_topdown:
if (cfg[i - 1] == 'M' or cfg[i - 1] == 'A') and not i == 0:
layers_nrm += [('convtd%i' % i, nn.ConvTranspose2d(out_channels=in_channels, in_channels=x, kernel_size=3, stride=(1, 1),
padding=padding_nrm, bias=False))]
layers_nrm[-1][-1].weight.data = conv_layer.weight.data
else:
layers_nrm += [('batchnormtd%i' % i, nn.BatchNorm2d(in_channels)), ('convtd%i' % i, nn.ConvTranspose2d(
out_channels=in_channels, in_channels=x, kernel_size=3, stride=(1, 1), padding=padding_nrm, bias=False))]
layers_nrm[-1][-1].weight.data = conv_layer.weight.data
insnorms_nrm += [('instancenormtd%i' %
i, nn.InstanceNorm2d(in_channels, affine=True))]
elif use_bias:
conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=x, kernel_size=(
3, 3), padding=padding_cnn, use_bias=True)
layers += [('conv%i' % i, conv_layer),
('relu%i' % i, nn.LeakyReLU(0.1))]
if do_topdown:
layers_nrm += [('convtd%i' % i, nn.ConvTranspose2d(out_channels=in_channels, in_channels=x, kernel_size=3, stride=(1, 1),
padding=padding_nrm, bias=False))]
layers_nrm[-1][-1].weight.data = conv_layer.weight.data
else:
conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=x, kernel_size=(
3, 3), padding=padding_cnn, bias=False)
layers += [('conv%i' % i, conv_layer),
('relu%i' % i, nn.LeakyReLU(0.1))]
if do_topdown:
layers_nrm += [('convtd%i' % i, nn.ConvTranspose2d(out_channels=in_channels, in_channels=x, kernel_size=3, stride=(1, 1),
padding=padding_nrm, bias=False))]
layers_nrm[-1][-1].weight.data = conv_layer.weight.data
in_channels = x
model = nn.Sequential(OrderedDict(layers))
return model, layers_nrm, insnorms_nrm, insnorms_cnn
def topdown(self, net, xhat, ahat, that, bcnn, xpn, apn, meancnn, varcnn):
mu = xhat
mupn = xpn
loss_pn = th.zeros([self.batch_size, ], device=mu.get_device())
loss_bnmm = th.zeros([self.batch_size, ], device=mu.get_device())
ahat_indx = 0
that_indx = 0
meanvar_indx = 0
insnormtd_indx = 0
prev_name = ''
for i, (name, layer) in enumerate(net.named_children()):
if name.find('conv') != -1 and i > 1:
# mask the intermediate rendered images by the relu states in
# the forward step
mu = mu * \
ahat[ahat_indx].type(th.FloatTensor).to(mu.get_device())
if self.do_pn: # compute the path normalization loss
mupn = mupn * \
apn[ahat_indx].type(th.FloatTensor).to(mu.get_device())
mu_b = bcnn[ahat_indx].data.reshape((1, -1, 1, 1)) * mu
mupn_b = bcnn[ahat_indx].data.reshape((1, -1, 1, 1)) * mupn
loss_pn_layer = th.mean(
th.abs(mu_b - mupn_b), dim=(1, 2, 3))
loss_pn = loss_pn + loss_pn_layer
ahat_indx += 1
if prev_name.find('upsamplelayer') != -1 and not prev_name.find('avg') != -1:
# mask the intermediate rendered images by the maxpool states
# in the forward step
mu = mu * \
that[that_indx].type(th.FloatTensor).to(mu.get_device())
if self.do_pn:
mupn = mupn * \
that[that_indx].type(
th.FloatTensor).to(mu.get_device())
that_indx += 1
# compute the next intermediate rendered images
mu = layer(mu)
# compute the next intermediate rendered results for computing the
# path normalization loss in the next layer
if (name.find('batchnorm') != -1) and (i < len(net) - 1):
if self.do_pn:
mupn = self.insnorms_nrm[insnormtd_indx](mupn)
insnormtd_indx += 1
else:
if self.do_pn:
mupn = layer(mupn)
if (name.find('conv') != -1) and (i != (len(net) - 2)):
if self.do_bnmm and self.use_bn:
# compute the KL distance between two Gaussians - the
# intermediate rendered images and the mean/var from the
# forward step
loss_bnmm = loss_bnmm + 0.5 * th.mean(((th.mean(mu, dim=(0, 2, 3)) - meancnn[meanvar_indx])**2) / varcnn[meanvar_indx]) + 0.5 * th.mean(th.mean((mu - th.mean(mu, dim=(0, 2, 3), keepdim=True))**2, dim=(
0, 2, 3)) / varcnn[meanvar_indx]) - 0.5 * th.mean(th.log(th.mean((mu - th.mean(mu, dim=(0, 2, 3), keepdim=True))**2, dim=(0, 2, 3)) + 1e-8) - th.log(varcnn[meanvar_indx])) - 0.5
meanvar_indx += 1
prev_name = name
return mu, mupn, loss_pn, loss_bnmm
<file_sep>import numpy as np
from torch.utils.data import DataLoader, SubsetRandomSampler
from copy import deepcopy
import torch.nn.functional as F
import torch.nn as nn
import torch
from utils.utils import __compute_metrics
def get_ae_dataloaders(traindata, batch_size, split):
"""get dataloaders for train and valid sets"""
indices = list(range(len(traindata)))
np.random.shuffle(indices)
n_train = int(split * len(indices))
train_loader = DataLoader(traindata, batch_size=batch_size, sampler=SubsetRandomSampler(indices[:n_train]))
valid_loader = DataLoader(traindata, batch_size=batch_size, sampler=SubsetRandomSampler(indices[n_train:]))
return train_loader, valid_loader
def get_cae_dataloaders(traindata, valid_data, batch_size):
"""get dataloaders for train and valid sets"""
train_loader = DataLoader(traindata, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_data, batch_size=batch_size)
return train_loader, valid_loader
def loss_function(recon_x, x, mu, logvar):
MSE = F.mse_loss(recon_x.view(-1, 32 * 32 * 3),
x.view(-1, 32 * 32 * 3), reduction='sum')
# see Appendix B from VAE paper:
# <NAME>. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return MSE + KLD
def encode_dataset(model, data, batch_size, device, is_unlabeled=True):
"""encode data using model (model must provide an encode function)"""
full_loader = DataLoader(data, batch_size=batch_size)
tensors = []
with torch.no_grad():
if is_unlabeled:
for batch_idx, inputs in enumerate(full_loader):
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
inputs = inputs.to(device)
tensors.append(model.encode(inputs))
else:
for batch_idx, (inputs, labels) in enumerate(full_loader):
inputs = inputs.to(device)
tensors.append(model.encode(inputs))
return torch.cat(tensors, dim=0)
def _train_one_epoch(model, train_loader, optimizer, epoch, device, experiment):
"""
Train one epoch for the model
:param model: model on which we do the training
:param train_loader: dataloader
:param optimizer: optimizer
:param epoch: number of epoch
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: loss for this epoch
"""
model.train()
running_loss = 0.0
for batch_idx, inputs in enumerate(train_loader):
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
inputs = inputs.to(device)
optimizer.zero_grad()
if model.is_variational:
pred, mu, logvar = model(inputs)
if model.calculate_own_loss:
loss = mu
else:
loss = loss_function(pred, inputs, mu, logvar)
else:
outputs = model(inputs)
criterion = nn.MSELoss(reduction='sum')
loss = criterion(outputs, inputs)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(inputs),
len(train_loader) *
len(inputs),
100. * batch_idx /
len(train_loader),
loss.item() / len(inputs)))
train_loss = running_loss / len(train_loader.dataset)
experiment.log_metric("Train loss", train_loss, step=epoch)
return train_loss
def _train_one_epoch_classifier(model, train_loader, optimizer, epoch, device, experiment):
"""
Train one epoch for a model of type classifier
:param model: model on which we do the training
:param train_loader: dataloader
:param optimizer: optimizer
:param epoch: number of epoch
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: loss for this epoch
"""
model.train()
running_loss = 0.0
for batch_idx, data in enumerate(train_loader):
inputs, labels = data
labels = labels.long()
labels = labels.squeeze()
inputs = inputs.to(device)
labels = labels.to(device)
labels[labels < 0] = 0
labels[labels > 16] = 16
optimizer.zero_grad()
outputs = model(inputs)
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(inputs),
len(train_loader) * len(inputs),
100. * batch_idx / len(train_loader),
loss.item() / len(inputs)))
model.status = "Conv net pretrain done"
train_loss = running_loss / len(train_loader)
experiment.log_metric("Conv classifier pretrain loss", train_loss, step=epoch)
return train_loss
def _train_one_epoch_damic(model, train_loader, optimizer, epoch, device, experiment):
"""
Train one epoch for a Damic model
:param model: model on which we do the training
:param train_loader: dataloader
:param optimizer: optimizer
:param epoch: number of epoch
:param device: cpu or cuda
:param experiment: comet experiment to log results
:return: loss for this epoch
"""
model.train()
running_loss = 0.0
print("====== TRAINING DAMIC")
for batch_idx, data in enumerate(train_loader):
inputs, _ = data
current_batch_size = inputs.shape[0]
inputs = inputs.to(device)
optimizer.zero_grad()
conv_net_class_predictions, ae_reconstruction = model.train_damic(inputs, current_batch_size)
criterion_ae = nn.MSELoss()
loss_autoencoders = torch.FloatTensor(17, len(inputs)).zero_().to(device)
for i in range(17):
loss_autoencoder = criterion_ae(inputs, ae_reconstruction[i].to(device))
loss_autoencoders[i] = -(loss_autoencoder / 2.0)
loss_autoencoders = loss_autoencoders.transpose(0, 1)
# Calculate loss given the formula (3) p2 from 'Deep clustering based on a mixture of autoencoders paper'
exp_loss_autoencoders = loss_autoencoders.exp()
total_loss_per_class = conv_net_class_predictions * exp_loss_autoencoders
total_loss = total_loss_per_class.sum(dim=1)
total_loss_log = total_loss.log()
total_loss_log_sum = total_loss_log.sum()
# Simultaneously train all the autoencoders and the convolutional network
total_loss_log_sum.backward()
optimizer.step()
running_loss += total_loss_log_sum.item()
train_loss = running_loss / len(train_loader)
experiment.log_metric("DAMIC train loss", train_loss, step=epoch)
return train_loss
def _test(model, test_loader, epoch, device, experiment):
""" Compute reconstruction loss of model over given dataset. Model is an autoencoder"""
test_loss = 0
test_size = 0
with torch.no_grad():
for inputs in test_loader:
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
inputs = inputs.to(device)
# ConvAe is variational
if model.is_variational:
output, mu, logvar = model(inputs)
if model.calculate_own_loss:
test_loss += mu
else:
test_loss += loss_function(output,
inputs, mu, logvar).item()
test_size += len(inputs)
else:
output = model(inputs)
criterion = nn.MSELoss(reduction='sum')
test_loss += criterion(output, inputs).item()
test_size += len(inputs)
test_loss /= test_size
experiment.log_metric("Validation loss", test_loss, step=epoch)
return test_loss
def _test_classifier(model, test_loader, epoch, device, experiment):
""" Compute cross entropy loss over given test_loader """
test_loss = 0
test_size = 0
with torch.no_grad():
for inputs, labels in test_loader:
inputs = inputs.to(device)
labels = labels.long()
labels = labels.squeeze()
labels = labels.to(device)
outputs = model(inputs)
criterion = nn.CrossEntropyLoss()
test_loss += criterion(outputs, labels).item()
test_size += len(inputs)
test_loss /= test_size
experiment.log_metric("Conv classifier pretrain validation loss", test_loss, step=epoch)
return test_loss
def train_network(model, train_loader, test_loader, optimizer, n_epochs, device, experiment, train_classifier=False, train_damic=False,
folder_save_model="../experiment_models/", pth_filename_save_model=""):
"""
Train a network
:param model: model on which we do the training
:param train_loader: dataloader with training samples
:param test_loader: dataloader with test samples
:param optimizer: optimizer
:param n_epochs: number of epoch
:param device: cpu or cuda
:param experiment: comet experiment to log results
:param train_classifier: if True we train a classifier
:param train_classifier: if True we train a Damic model
:param experiment: comet experiment to log results
:param folder_save_model: folder where to save the best model
:param pth_filename_save_model: name of the pth file for the best model
:return: best model after the training
"""
best_loss = np.inf
key = experiment.get_key()
best_model = None
for epoch in range(n_epochs):
if train_damic:
train_loss = _train_one_epoch_damic(model, train_loader, optimizer, epoch, device, experiment)
valid_loss = _test_classifier(model, test_loader, epoch, device, experiment)
elif train_classifier:
train_loss = _train_one_epoch_classifier(model, train_loader, optimizer, epoch, device, experiment)
valid_loss = _test_classifier(model, test_loader, epoch, device, experiment)
else:
train_loss = _train_one_epoch(model, train_loader, optimizer, epoch, device, experiment)
valid_loss = _test(model, test_loader, epoch, device, experiment)
try:
if valid_loss < best_loss:
if pth_filename_save_model == "":
pth_filename = folder_save_model + str(key) + '.pth'
else:
pth_filename = folder_save_model + pth_filename_save_model + '.pth'
torch.save({
"epoch": epoch,
"optimizer": optimizer.state_dict(),
"model": model.state_dict(),
"loss": valid_loss
}, pth_filename)
best_loss = valid_loss
best_model = deepcopy(model) # Keep best model thus far
except FileNotFoundError as e:
print(
"Directory for logging experiments does not exist. Launch script from repository root.")
raise e
print("Training loss after {} epochs: {:.6f}".format(epoch, train_loss))
print("Validation loss after {} epochs: {:.6f}".format(epoch, valid_loss))
# Return best model
return best_model
def loop_over_unlabeled_data(data, batch_size):
"""Infiinte dataloader for unlabeled data"""
data_loader = DataLoader(data, batch_size=batch_size, shuffle=True)
while True:
for batch in iter(data_loader):
yield batch
def loop_over_labeled_data(data, batch_size):
"""Infiinte dataloader for labeled data"""
data_loader = DataLoader(data, batch_size=batch_size, shuffle=True)
while True:
for batch in iter(data_loader):
yield batch
def _train_one_epoch_unlabeled(model, train_data, optimizer, batch_size, n_unlabeled_batch, epoch, device, experiment):
"""Train one epoch of unlabeled data.
:param model: CAE model
:param train_data: train dataset
:param optimizer: unsupervised optimizer
:param batch_size: batch size
:param n_unlabeled_batch: number of unlabeled batches
:param epoch: epoch number
:param device: device
:param experiment: cometml experiment
"""
model.train()
running_loss = 0.0
criterion = nn.MSELoss(reduction='sum')
n_total = 0.0
for batch_idx, inputs in enumerate(loop_over_unlabeled_data(train_data, batch_size)):
if batch_idx < n_unlabeled_batch:
inputs = inputs.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, inputs)
loss.backward()
optimizer.step()
running_loss += loss.item()
n_total += len(inputs)
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx,
n_unlabeled_batch,
100. * batch_idx /
n_unlabeled_batch,
loss.item() / len(inputs)))
else:
break
train_loss = running_loss / n_total
experiment.log_metric("Autoencoder train loss", train_loss, step=epoch)
print("Autoencoder Training loss after {} epochs: {:.6f}".format(epoch, train_loss))
return model
def _train_one_epoch_labeled(encoding_model, classifier_model, train_data, optimizer, batch_size, n_labeled_batch, epoch, device, experiment):
"""Train one epoch of labeled data.
:param encoding_model: CAE model
:param classifier_model: MLP model
:param train_data: train dataset
:param optimizer: unsupervised optimizer
:param batch_size: batch size
:param n_labeled_batch: number of labeled batches
:param epoch: epoch number
:param device: device
:param experiment: cometml experiment
"""
encoding_model.train()
classifier_model.train()
running_loss = 0.0
criterion = nn.CrossEntropyLoss(reduction="sum")
n_total = 0.0
pred_labels = []
true_labels = []
for batch_idx, (inputs, targets) in enumerate(loop_over_labeled_data(train_data, batch_size)):
if batch_idx < n_labeled_batch:
inputs = inputs.to(device)
targets = targets.squeeze().to(device).long()
optimizer.zero_grad()
# encode the inputs
inp_encodings = encoding_model.encode(inputs)
outputs = classifier_model(inp_encodings)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
true_labels.append(targets)
pred_labels.append(torch.argmax(outputs, dim=1))
running_loss += loss.item()
n_total += len(inputs)
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx,
n_labeled_batch,
100. * batch_idx /
n_labeled_batch,
loss.item() / len(inputs)))
else:
break
train_loss = running_loss / n_total
true_labels = torch.cat(true_labels, dim=0).cpu().detach().numpy()
pred_labels = torch.cat(pred_labels, dim=0).cpu().detach().numpy()
train_accuracy, train_f1, __train_f1 = __compute_metrics(
true_labels, pred_labels)
experiment.log_metric("Classifier train loss", train_loss, step=epoch)
experiment.log_metric('Train accuracy', train_accuracy, step=epoch)
experiment.log_metric('Train f1-score', train_f1, step=epoch)
print("Classifier Train loss after {} epochs: {:.6f}".format(epoch, train_loss))
print("Epoch {}: Supervised Train accuracy {:.3f}| f1-score {:.3f}".format(epoch, train_accuracy, train_f1))
return encoding_model, classifier_model, train_accuracy, train_f1
def _test_semisupervised(encoding_model, classifier_model, test_loader, epoch, device, experiment):
""" Validation of semisupervised task
:param encoding_model: CAE model
:param classifier_model: MLP model
:param test_loader: test data loader
:param epoch: epoch number
:param device: device
:param experiment: cometml experiment
"""
encoding_model.eval()
classifier_model.eval()
test_unsup_loss = 0.0
test_sup_loss = 0.0
unsup_criterion = nn.MSELoss(reduction='sum')
classification_criterion = nn.CrossEntropyLoss(reduction="sum")
pred_labels = []
true_labels = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs = inputs.to(device)
targets = targets.squeeze().to(device).long()
out_decoder = encoding_model(inputs)
# encoder ouput (latent representation)
inp_encodings = encoding_model.encode(inputs)
pred_targets = classifier_model(inp_encodings)
test_unsup_loss += unsup_criterion(out_decoder, inputs).item()
test_sup_loss += classification_criterion(pred_targets, targets).item()
true_labels.append(targets)
pred_labels.append(torch.argmax(pred_targets, dim=1))
test_unsup_loss /= len(test_loader.dataset)
test_sup_loss /= len(test_loader.dataset)
true_labels = torch.cat(true_labels, dim=0).cpu().detach().numpy()
pred_labels = torch.cat(pred_labels, dim=0).cpu().detach().numpy()
valid_accuracy, valid_f1, __valid_f1 = __compute_metrics(
true_labels, pred_labels)
experiment.log_metric("Autoencoder Validation loss", test_unsup_loss, step=epoch)
experiment.log_metric("Supervised Validation loss", test_sup_loss, step=epoch)
experiment.log_metric('Validation accuracy', valid_accuracy, step=epoch)
experiment.log_metric('Validation f1-score', valid_f1, step=epoch)
print("Supervised Validation loss after {} epochs: {:.6f}".format(epoch, test_sup_loss))
print("Epoch {}: Supervised Validation accuracy {:.3f}| f1-score {:.3f}".format(epoch, valid_accuracy, valid_f1))
return true_labels, pred_labels, valid_accuracy, valid_f1
def train_semi_supervised_network(encoding_model, classifier_model, train_unlab_data, train_lab_data, valid_loader,
n_epochs, batch_size, lr_unsup, lr_sup, device, n_labeled_batch, n_unlabeled_batch, patience, experiment):
"""Training of semisupervised task
:param encoding_model: CAE model
:param classifier_model: MLP model
:param train_unlab_data: unlabeled train dataset
:param train_lab_data: labeled train dataset
:param valid_loader: validation set loader
:param lr_unsup: unsupervised learning rate
:param lr_sup: supervised learning rate
:param batch_size: batch size
:param n_labeled_batch: number of labeled batches
:param n_unlabeled_batch: number of unlabeled batches
:param n_epochs: no. of epochs
:param patience: patience for early stopping
:param device: device
:param experiment: cometml experiment
"""
best_acc = 0.0
best_f1 = 0.0
k = 0
key = experiment.get_key()
lr_unsup_encoder = lr_unsup * (len(train_lab_data) / len(train_unlab_data))
param_unsup = [
{'params': encoding_model.encoder.parameters(), 'lr': lr_unsup},
{'params': encoding_model.embedding.parameters(), 'lr': lr_unsup},
{'params': encoding_model.decode_embedding.parameters(), 'lr': lr_unsup},
{'params': encoding_model.decoder.parameters(), 'lr': lr_unsup}
]
param_sup = [
{'params': encoding_model.encoder.parameters(), 'lr': lr_sup},
{'params': encoding_model.embedding.parameters(), 'lr': lr_sup},
{'params': classifier_model.parameters(), 'lr': lr_sup}
]
optimizer_unsupervised = torch.optim.Adam(param_unsup, lr=lr_unsup)
optimizer_supervised = torch.optim.Adam(param_sup)
unsup_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_unsupervised, step_size=10, gamma=0.5)
sup_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_supervised, step_size=10, gamma=0.5)
for epoch in range(n_epochs):
unsup_scheduler.step()
sup_scheduler.step()
encoding_model = _train_one_epoch_unlabeled(encoding_model, train_unlab_data, optimizer_unsupervised,
batch_size, n_unlabeled_batch, epoch, device, experiment)
encoding_model, classifier_model, train_accuracy, train_f1 = _train_one_epoch_labeled(encoding_model, classifier_model, train_lab_data,
optimizer_supervised, batch_size, n_labeled_batch,
epoch, device, experiment)
valid_true_labels, valid_pred_labels, valid_accuracy, valid_f1 = _test_semisupervised(encoding_model, classifier_model,
valid_loader, epoch, device, experiment)
try:
# if valid_accuracy > best_acc and valid_f1 > best_f1:
if valid_f1 > best_f1:
best_acc = valid_accuracy
best_f1 = valid_f1
k = 0
print("Saving best model....")
torch.save({
"epoch": epoch,
"unsup_optimizer": optimizer_unsupervised.state_dict(),
"sup_optimizer": optimizer_supervised.state_dict(),
"encode_model": encoding_model.state_dict(),
"sup_model": classifier_model.state_dict(),
"best_acc": valid_accuracy,
"best_f1": valid_f1,
"train_acc": train_accuracy,
"train_f1": train_f1,
}, "../experiment_models/" + str(key) + '.pth')
# plot_confusion_matrix(valid_true_labels, valid_pred_labels, classes=np.arange(17),
# title='Confusion matrix for Validation')
elif k < patience:
k += 1
else:
print("Early stopping......")
break
except FileNotFoundError as e:
print(
"Directory for logging experiments does not exist. Launch script from repository root.")
raise e
<file_sep>from comet_ml import OfflineExperiment
import json
import os
import sys
import argparse
import numpy as np
import torch
sys.path.append("../")
from models.clustering import DAMICClustering
from utils.damic_utils import execute_damic_pre_training, execute_damic_training, get_accuracy_f1_scores_from_damic_model
from utils.constants import Constants
def main(datapath, configuration, config_key):
"""
:param datapath: path to the directory containing the samples
:param configuration: dictionnary containing all the keys/values part of
the config_key json file
:param config_key: key of the configuration we have to load from the
configuration file (Ex: DAMIC)
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = configuration['seed']
_set_torch_seed(seed)
experiment = _set_comet_experiment(configuration, config_key)
damic_model = DAMICClustering(17).to(device)
ae_pretrain_config = configuration['autoencoder_pretrain']
conv_net_pretrain_config = configuration['damic_conv_net_pretrain']
damic_autoencoders_pretrain_config = \
configuration['damic_autoencoders_pretrain']
train_subset = configuration['train_subset']
overlapped = configuration['overlapped']
damic_model, numpy_unla_train, numpy_unla_target_pred_by_cluster, labeled_train_and_valid = \
execute_damic_pre_training(datapath, damic_model, train_subset,
overlapped, ae_pretrain_config,
conv_net_pretrain_config,
damic_autoencoders_pretrain_config,
experiment, seed)
damic_model = execute_damic_training(damic_model, configuration,
numpy_unla_train,
numpy_unla_target_pred_by_cluster,
labeled_train_and_valid, device,
experiment)
_, accuracy, f1 = get_accuracy_f1_scores_from_damic_model(damic_model, labeled_train_and_valid, device)
experiment.log_metric('accuracy', accuracy)
experiment.log_metric('f1-score', f1)
def _set_torch_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def _set_comet_experiment(configuration, config_key):
experiment = OfflineExperiment(project_name='general',
workspace='benjaminbenoit',
offline_directory="../damic_comet_experiences")
experiment.set_name(config_key)
experiment.log_parameters(configuration)
return experiment
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default=Constants.DATAPATH,
help="Path to dataset folder")
parser.add_argument("--config", type=str, default="DAMIC", help="To select configuration from config.json")
args = parser.parse_args()
config_key = args.config
datapath = args.datapath
with open(Constants.CONFIG_PATH, 'r') as f:
configuration = json.load(f)[config_key]
# Initiate experiment
main(datapath, configuration, config_key)
<file_sep>from comet_ml import OfflineExperiment
import json
import argparse
import torch
import sys
import os
sys.path.append("../")
from models import *
from models.clustering import *
from utils.ali_utils import *
from utils.utils import *
from utils.utils import load_datasets
from utils.constants import Constants
from utils.dataset import HoromaDataset
from models.nrm import NRM
def main(datapath, configs, experiment):
"""
:param datapath: path to the directory containing the samples
:param configs: dictionary containing hyperparameters for training.
:param experiment: comet ml experiment object for logging results
"""
train_split = configs['train_split']
valid_split = configs['valid_split']
train_labeled_split = configs['train_labeled_split']
train = HoromaDataset(datapath, split=train_split, subset=None,
flattened=False)
labeled = HoromaDataset(datapath, split=train_labeled_split, subset=None,
flattened=False)
valid_data = HoromaDataset(
datapath, split=valid_split, subset=None, flattened=False)
train_loader = DataLoader(train, batch_size=configs[
'batch_size'], shuffle=True)
labeled_loader = DataLoader(labeled, batch_size=configs[
'labeled_batch_size'], shuffle=True)
eval_loader = DataLoader(valid_data, batch_size=configs[
'labeled_batch_size'], shuffle=True)
print("Shape of training set: ", train.data.shape)
print("Shape of validation set: ", valid_data.data.shape)
n_iterations = np.floor(
labeled.data.shape[0] / configs['labeled_batch_size'])
device = 'cuda'
net = NRM('AllConv13', batch_size=configs['labeled_batch_size'], num_class=17, use_bias=configs['use_bias'], use_bn=configs[
'use_bn'], do_topdown=configs['do_topdown'], do_pn=configs['do_pn'], do_bnmm=configs['do_bnmm']).to(device)
net.apply(weights_init)
best_f1, best_acc, best_model = train_nrm(net, train_loader, labeled_loader, eval_loader, configs[
'n_epochs'], configs, n_iterations, experiment)
experiment.log_metric('best_accuracy', best_acc)
experiment.log_metric('best_f1-score', best_f1)
experiment.log_metric('best_model_epoch', best_model)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--datapath", type=str, default=Constants.DATAPATH,
help="Path to dataset folder")
parser.add_argument("--encoder_path", type=str, default=None)
parser.add_argument("--config", type=str, default="NRM",
help="To select configuration from config.json")
args = parser.parse_args()
config_key = args.config
datapath = args.datapath
path_to_model = args.encoder_path
with open(Constants.CONFIG_PATH, 'r') as f:
configuration = json.load(f)[config_key]
# Parse configuration file
seed = configuration['seed']
# Set all seeds for full reproducibility
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if not os.path.exists('experiments'):
print('mkdir ', 'experiments')
os.mkdir('experiments')
experiment = OfflineExperiment(project_name="ali", workspace='timothynest', # Replace this with appropriate comet workspace
offline_directory=str('../experiments/' + configuration['experiment']))
experiment.set_name(
name=configuration['experiment'])
experiment.log_parameters(configuration)
experiment.add_tag(configuration['experiment'])
MODEL_PATH = '../experiments/' + configuration['experiment'] + '/models'
if not os.path.exists(MODEL_PATH):
print('mkdir ', MODEL_PATH)
os.mkdir(MODEL_PATH)
configuration['MODEL_PATH'] = MODEL_PATH
# Initiate experiment
main(datapath, configuration, experiment)
<file_sep>from PIL import Image
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import confusion_matrix
from utils.dataset import HoromaDataset
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(title + '.png')
plt.close()
def plot_historgrams(data, label, str_labels):
"""
This function plots the histograms.
"""
counter = Counter(data)
print(counter)
counter = dict(sorted(counter.items(), key=lambda i: i[0]))
print(counter)
frequencies = counter.values()
names = counter.keys()
print("{} frequencies: {},\n names: {}".format(label, frequencies, names))
x_coordinates = np.arange(len(counter))
plt.figure()
plt.bar(x_coordinates, frequencies, align='center')
plt.xticks(x_coordinates, str_labels)
plt.title("Histogram of class labels for " + label + " labeled data")
plt.xlabel("Class Ids")
plt.ylabel("Frequency")
plt.savefig(label + "_hist.png")
plt.close()
if __name__ == "__main__":
# plot bar graph of frequencies of classes
plot_historgrams(valid.targets, "Validation", valid.str_labels)
i = np.random.randint(0, len(train_labeled))
print(i)
print(train_labeled[i][0].size(), train_labeled[i][1])
print(train_labeled[0][0].size(), train_labeled[0][1])
img = Image.fromarray(
(255 * train_labeled[i][0]).numpy().astype(np.uint8), 'RGB')
img.show()
| 27a775292a84a8be1d5f53a04590b59b432ee9b3 | [
"Markdown",
"Python",
"Shell"
] | 30 | Python | swechhasingh/ift6759-horoma | cabf292aafc3d3056c9178fbea5b75ba6a71c4e1 | a90a8ac877a0e8b7a570fd46dfcd922e8b6f71b9 |
refs/heads/master | <repo_name>AdalegGIT/Dec10<file_sep>/reeading.rb
# puts " Enter the first number"
# a = gets.chomp.to_i
# puts "Enter the second number"
# b = gets.chomp.to_i
# puts "What do you want to do?"
# ans = gets.chomp
# if ans == "add"
# puts "Lets do addition \n ----------------------------------"
# puts "Sum of the numbers is : #{a+b}..!"
# elsif ans == "subtract"
# puts "Lets do subtraction \n ----------------------------------"
# puts "Difference of the numbers is : #{a-b}..!"
# else
# puts "Idont know to do what u ask for :("
# end
a = 1
b = 2
unless a == b
puts "The value of a is not equal to the value of b"
end<file_sep>/flow_control.rb
# if statements
# if (some condition is true)
# then do something awesome
# (optionally) otherwise
# then do something else
# puts "enter the day of week"
# day_of_week = gets.chomp
# if day_of_week == "Tuesday"
# puts "Happy Tuesday"
# elsif day_of_week == "Wednesday"
# puts "Happy Wednesday"
# else
# puts "Have a good day!"
# end
# Looping
# for loop
# while loop
# count = 10
# while count >= 0
# puts count
# count = count - 1
# end
# until loop
#.times loop
count = 1
100.times do
puts "hello ...... i m gonna repeat #{count} times "
count = count + 1
end
# Iteration
<file_sep>/hello_world.rb
#printing a word
puts "Enter you name "
h = gets.chomp
puts "Hi I read your name as #{h} ...."
puts "when are u graduating?"
my_year = gets.chomp.to_i
sum = my_year + 4
puts "I graduate in #{my_year + 4}"
puts "Hello world"
puts "Isn't this great?"
task = "10"
taskNum = task.to_i #method to convert string to integr
t = taskNum.to_s #method to convert int to string
task1 = "Devloper2"
puts t + task1
# apples = 3
# myNewName = "Developer"
# myoldName = "software"
# puts myNewName+myoldName
# task == apples
# a = task + apples
# b = myNewName * 3
# puts b
# puts apples
# puts task
# puts myNewName
# apples = apphello_world.rb:15:inles * 2
# puts apples
# apples = apples + 7
# puts apples
# apples = apples + 2
# puts apples
# apples += 2
# puts apples
# apples = "5 apples"
# apples *= 5
# # apples = apples * 5
# puts apples
# stuff_i_have = "computer"
# puts stuff_i_have
# #stuff_i_have = nil
# puts stuff_i_have
# aNew = 2
# bNew = 4
# c = aNew < bNew
# puts "this is#{myNewName} #{c}my output #{stuff_i_have}"
# weather_is_cold = true
# puts weather_is_cold
# puts '-------'
# puts user_input * 5
# apples_per_stand = gets.chomp.to_i
# # puts apples_per_stand.inspect
# num_apple_stands = 10
# total_apples = apples_per_stand * num_apple_stands
# puts "I have #{total_apples} apples total."
# puts "Don't you wish you had #{total_apples} apples?"
# puts "\n\n----------\n\n"
# temperature = 0
# while temperature < 100
# puts "What's the current temperature? (type 'exit' to stop)"
# user_input = gets.chomp
# if user_input == 'exit'
# break
# end
# temperature = user_input.to_i
# if temperature < 10
# puts "Wear a coat!"
# elsif temperature < 20
# puts "Maybe wear a sweater?"
# else
# puts "You can wear shorts and tshirt."
# end
# end
# weather_is_cold = true
# if weather_is_cold
# puts "The weather is cold."
# end
# while true
# puts "This is the song that never ends."
# end
# if "Hello"
# puts "Hey everyone!"
# end
<file_sep>/README.md
# README
This README would normally document whatever steps are necessary to get the
application up and running.
#Topics to discuss
What is programming?
What is Ruby?
How to run Ruby programs
Basic Data Types
Variables and assignment
Control Structures
#What is Programming?
Programming is the process of solving problems through code.
Different types of programming languages
#Low-level programming languages - C, C++
Generally interact directly with the hardware layer
Requires the developer to manage the resources available on the physical computer through their code
Useful when you have very limited resources or are custom embedded systems
#High-level programming languages - Ruby, Python, Java
Details of the hardware are taken care of by the language so that developers can solve more general problems
The language is more expressive, meaning it allows solutions to be written in terms of "what to accomplish" rather than describing each step of "how to accomplish it"
Useful for creating solutions that work across multiple platforms and when the hardware is not the focus
#What is Ruby?
Founded by <NAME> (Matz) in 1993 (Japan)
First stable version was released in 1995
Syntactically Simple
Truly Object-Oriented
Having Iterators and Closures
Exception Handling
Garbage Collection
Portable
Dynamic
Ruby interpreter
#Executing Ruby Program
From the command line:
$ ruby my_filename.rb
Irb
You can also run Ruby using the irb command. It stands for "interactive Ruby".Though this won't let you save your inputs, it's a great way to play around with your code.
#Basic Data Types
Number
Integers: whole numbers, e.g. 10
Floats: decimal numbers, e.g 7.2 or 0.2
Arithmetic: +, -, *, /, %
Comparisons: >, <, >=, <=, ==, !=
String
Boolean
"truthiness"
"falsiness”
Nil
#Converting between data types
to_s: to string
to_i: to integer
to_f: to float
!! ahead of a value converts to boolean
#Variables
Meaningful names to variables
Constants
#Output and Input
puts allows us to display information in the terminal to the program's user.
gets.chomp allows us to receive information from the program's user.
#Control Structures
if / elsif / else
While
Comments in ruby #
| 02a9007f34fa660a7a18fe897a14ce2a09f219d6 | [
"Markdown",
"Ruby"
] | 4 | Ruby | AdalegGIT/Dec10 | 5c75ddad0d3f71a9cc73860c624fad405d716336 | e0c07c7c58d4725ab5d3d0c9ffe6d08e8c727cb1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.