file_name stringlengths 6 86 | file_path stringlengths 45 249 | content stringlengths 47 6.26M | file_size int64 47 6.26M | language stringclasses 1 value | extension stringclasses 1 value | repo_name stringclasses 767 values | repo_stars int64 8 14.4k | repo_forks int64 0 1.17k | repo_open_issues int64 0 788 | repo_created_at stringclasses 767 values | repo_pushed_at stringclasses 767 values |
|---|---|---|---|---|---|---|---|---|---|---|---|
BDecoder.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/org/gudy/azureus2/core3/util/BDecoder.java | /*
* BeDecoder.java
*
* Created on May 30, 2003, 2:44 PM
* Copyright (C) 2003, 2004, 2005, 2006 Aelitis, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* AELITIS, SAS au capital de 46,603.30 euros
* 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France.
*/
package org.gudy.azureus2.core3.util;
import java.util.*;
import java.io.*;
import java.nio.*;
/**
* A set of utility methods to decode a bencoded array of byte into a Map.
* integer are represented as Long, String as byte[], dictionnaries as Map, and list as List.
*
* @author TdC_VgA
*
*/
public class BDecoder
{
private boolean recovery_mode;
public static Map
decode(
byte[] data )
throws IOException
{
return( new BDecoder().decodeByteArray( data ));
}
public static Map
decode(
byte[] data,
int offset,
int length )
throws IOException
{
return( new BDecoder().decodeByteArray( data, offset, length ));
}
public static Map
decode(
BufferedInputStream is )
throws IOException
{
return( new BDecoder().decodeStream( is ));
}
public
BDecoder()
{
}
public Map
decodeByteArray(
byte[] data)
throws IOException
{
return( decode(new BDecoderInputStreamArray(data)));
}
public Map
decodeByteArray(
byte[] data,
int offset,
int length )
throws IOException
{
return( decode(new BDecoderInputStreamArray(data, offset, length )));
}
public Map
decodeStream(
BufferedInputStream data )
throws IOException
{
Object res = decodeInputStream(new BDecoderInputStreamStream(data), 0);
if ( res == null ){
throw( new BEncodingException( "BDecoder: zero length file" ));
}else if ( !(res instanceof Map )){
throw( new BEncodingException( "BDecoder: top level isn't a Map" ));
}
return((Map)res );
}
private Map
decode(
BDecoderInputStream data )
throws IOException
{
Object res = decodeInputStream(data, 0);
if ( res == null ){
throw( new BEncodingException( "BDecoder: zero length file" ));
}else if ( !(res instanceof Map )){
throw( new BEncodingException( "BDecoder: top level isn't a Map" ));
}
return((Map)res );
}
private Object
decodeInputStream(
BDecoderInputStream dbis,
int nesting )
throws IOException
{
if (nesting == 0 && !dbis.markSupported()) {
throw new IOException("InputStream must support the mark() method");
}
//set a mark
dbis.mark(Integer.MAX_VALUE);
//read a byte
int tempByte = dbis.read();
//decide what to do
switch (tempByte) {
case 'd' :
//create a new dictionary object
Map tempMap = new HashMap();
try{
//get the key
byte[] tempByteArray = null;
while ((tempByteArray = (byte[]) decodeInputStream(dbis, nesting+1)) != null) {
//decode some more
Object value = decodeInputStream(dbis,nesting+1);
// value interning is too CPU-intensive, let's skip that for now
//if(value instanceof byte[] && ((byte[])value).length < 17)
//value = StringInterner.internBytes((byte[])value);
// keys often repeat a lot - intern to save space
String key = null;//StringInterner.intern( tempByteArray );
// if ( key == null ){
//
// CharBuffer cb = Constants.BYTE_CHARSET.decode(ByteBuffer.wrap(tempByteArray));
//
// key = new String(cb.array(),0,cb.limit());
//
// key = StringInterner.intern( key );
// }
key = new String(tempByteArray, "UTF-8");
tempMap.put( key, value);
}
/*
if ( tempMap.size() < 8 ){
tempMap = new CompactMap( tempMap );
}*/
dbis.mark(Integer.MAX_VALUE);
tempByte = dbis.read();
dbis.reset();
if ( nesting > 0 && tempByte == -1 ){
throw( new BEncodingException( "BDecoder: invalid input data, 'e' missing from end of dictionary"));
}
}catch( Throwable e ){
if ( !recovery_mode ){
if ( e instanceof IOException ){
throw((IOException)e);
}
throw( new IOException( e.toString() ));
}
}
// if (tempMap instanceof HashMap)
// ((HashMap) tempMap).compactify(0.9f);
//return the map
return tempMap;
case 'l' :
//create the list
ArrayList tempList = new ArrayList();
try{
//create the key
Object tempElement = null;
while ((tempElement = decodeInputStream(dbis, nesting+1)) != null) {
//add the element
tempList.add(tempElement);
}
tempList.trimToSize();
dbis.mark(Integer.MAX_VALUE);
tempByte = dbis.read();
dbis.reset();
if ( nesting > 0 && tempByte == -1 ){
throw( new BEncodingException( "BDecoder: invalid input data, 'e' missing from end of list"));
}
}catch( Throwable e ){
if ( !recovery_mode ){
if ( e instanceof IOException ){
throw((IOException)e);
}
throw new IOException(e.toString());
}
}
//return the list
return tempList;
case 'e' :
case -1 :
return null;
case 'i' :
return new Long(getNumberFromStream(dbis, 'e'));
case '0' :
case '1' :
case '2' :
case '3' :
case '4' :
case '5' :
case '6' :
case '7' :
case '8' :
case '9' :
//move back one
dbis.reset();
//get the string
return getByteArrayFromStream(dbis);
default :{
int rem_len = dbis.available();
if ( rem_len > 256 ){
rem_len = 256;
}
byte[] rem_data = new byte[rem_len];
dbis.read( rem_data );
throw( new BEncodingException(
"BDecoder: unknown command '" + tempByte + ", remainder = " + new String( rem_data )));
}
}
}
/*
private long getNumberFromStream(InputStream dbis, char parseChar) throws IOException {
StringBuffer sb = new StringBuffer(3);
int tempByte = dbis.read();
while ((tempByte != parseChar) && (tempByte >= 0)) {
sb.append((char)tempByte);
tempByte = dbis.read();
}
//are we at the end of the stream?
if (tempByte < 0) {
return -1;
}
String str = sb.toString();
// support some borked impls that sometimes don't bother encoding anything
if ( str.length() == 0 ){
return( 0 );
}
return Long.parseLong(str);
}
*/
private long
getNumberFromStream(
BDecoderInputStream dbis,
char parseChar)
throws IOException
{
final char[] chars = new char[32];
int tempByte = dbis.read();
int pos = 0;
while ((tempByte != parseChar) && (tempByte >= 0)) {
chars[pos++] = (char)tempByte;
if ( pos == chars.length ){
throw( new NumberFormatException( "Number too large: " + new String(chars,0,pos) + "..." ));
}
tempByte = dbis.read();
}
//are we at the end of the stream?
if (tempByte < 0) {
return -1;
}else if ( pos == 0 ){
// support some borked impls that sometimes don't bother encoding anything
return(0);
}
return( parseLong( chars, 0, pos ));
}
public static long
parseLong(
char[] chars,
int start,
int length )
{
long result = 0;
boolean negative = false;
int i = start;
int max = start + length;
long limit;
if ( length > 0 ){
if ( chars[i] == '-' ){
negative = true;
limit = Long.MIN_VALUE;
i++;
}else{
limit = -Long.MAX_VALUE;
}
if ( i < max ){
int digit = chars[i++] - '0';
if ( digit < 0 || digit > 9 ){
throw new NumberFormatException(new String(chars,start,length));
}else{
result = -digit;
}
}
long multmin = limit / 10;
while ( i < max ){
// Accumulating negatively avoids surprises near MAX_VALUE
int digit = chars[i++] - '0';
if ( digit < 0 || digit > 9 ){
throw new NumberFormatException(new String(chars,start,length));
}
if ( result < multmin ){
throw new NumberFormatException(new String(chars,start,length));
}
result *= 10;
if ( result < limit + digit ){
throw new NumberFormatException(new String(chars,start,length));
}
result -= digit;
}
}else{
throw new NumberFormatException(new String(chars,start,length));
}
if ( negative ){
if ( i > start+1 ){
return result;
}else{ /* Only got "-" */
throw new NumberFormatException(new String(chars,start,length));
}
}else{
return -result;
}
}
// This one causes lots of "Query Information" calls to the filesystem
/*
private long getNumberFromStreamOld(InputStream dbis, char parseChar) throws IOException {
int length = 0;
//place a mark
dbis.mark(Integer.MAX_VALUE);
int tempByte = dbis.read();
while ((tempByte != parseChar) && (tempByte >= 0)) {
tempByte = dbis.read();
length++;
}
//are we at the end of the stream?
if (tempByte < 0) {
return -1;
}
//reset the mark
dbis.reset();
//get the length
byte[] tempArray = new byte[length];
int count = 0;
int len = 0;
//get the string
while (count != length && (len = dbis.read(tempArray, count, length - count)) > 0) {
count += len;
}
//jump ahead in the stream to compensate for the :
dbis.skip(1);
//return the value
CharBuffer cb = Constants.DEFAULT_CHARSET.decode(ByteBuffer.wrap(tempArray));
String str_value = new String(cb.array(),0,cb.limit());
return Long.parseLong(str_value);
}
*/
private byte[]
getByteArrayFromStream(
BDecoderInputStream dbis )
throws IOException
{
int length = (int) getNumberFromStream(dbis, ':');
if (length < 0) {
return null;
}
// note that torrent hashes can be big (consider a 55GB file with 2MB pieces
// this generates a pieces hash of 1/2 meg
if ( length > 8*1024*1024 ){
throw( new IOException( "Byte array length too large (" + length + ")"));
}
byte[] tempArray = new byte[length];
int count = 0;
int len = 0;
//get the string
while (count != length && (len = dbis.read(tempArray, count, length - count)) > 0) {
count += len;
}
if ( count != tempArray.length ){
throw( new IOException( "BDecoder::getByteArrayFromStream: truncated"));
}
return tempArray;
}
public void
setRecoveryMode(
boolean r )
{
recovery_mode = r;
}
public static void
print(
PrintWriter writer,
Object obj )
{
print( writer, obj, "", false );
}
private static void
print(
PrintWriter writer,
Object obj,
String indent,
boolean skip_indent )
{
String use_indent = skip_indent?"":indent;
if ( obj instanceof Long ){
writer.println( use_indent + obj );
}else if ( obj instanceof byte[]){
byte[] b = (byte[])obj;
if ( b.length==20 ){
writer.println( use_indent + " { "+ ByteFormatter.nicePrint( b )+ " }" );
}else if ( b.length < 64 ){
writer.println( new String(b) );
}else{
writer.println( "[byte array length " + b.length );
}
}else if ( obj instanceof String ){
writer.println( use_indent + obj );
}else if ( obj instanceof List ){
List l = (List)obj;
writer.println( use_indent + "[" );
for (int i=0;i<l.size();i++){
writer.print( indent + " (" + i + ") " );
print( writer, l.get(i), indent + " ", true );
}
writer.println( indent + "]" );
}else{
Map m = (Map)obj;
Iterator it = m.keySet().iterator();
while( it.hasNext()){
String key = (String)it.next();
if ( key.length() > 256 ){
writer.print( indent + key.substring(0,256) + "... = " );
}else{
writer.print( indent + key + " = " );
}
print( writer, m.get(key), indent + " ", true );
}
}
}
/**
* Converts any byte[] entries into UTF-8 strings
* @param map
* @return
*/
public static Map
decodeStrings(
Map map )
{
if (map == null ){
return( null );
}
Iterator it = map.entrySet().iterator();
while( it.hasNext()){
Map.Entry entry = (Map.Entry)it.next();
Object value = entry.getValue();
if ( value instanceof byte[]){
try{
entry.setValue( new String((byte[])value,"UTF-8" ));
}catch( Throwable e ){
e.printStackTrace();
}
}else if ( value instanceof Map ){
decodeStrings((Map)value );
}else if ( value instanceof List ){
decodeStrings((List)value );
}
}
return( map );
}
public static List
decodeStrings(
List list )
{
if ( list == null ){
return( null );
}
for (int i=0;i<list.size();i++){
Object value = list.get(i);
if ( value instanceof byte[]){
try{
String str = new String((byte[])value, "UTF-8" );
list.set( i, str );
}catch( Throwable e ){
e.printStackTrace();;
}
}else if ( value instanceof Map ){
decodeStrings((Map)value );
}else if ( value instanceof List ){
decodeStrings((List)value );
}
}
return( list );
}
private static void
print(
File f,
File output )
{
try{
BDecoder decoder = new BDecoder();
decoder.setRecoveryMode( false );
PrintWriter pw = new PrintWriter( new FileWriter( output ));
print( pw, decoder.decodeStream( new BufferedInputStream( new FileInputStream( f ))));
pw.flush();
}catch( Throwable e ){
e.printStackTrace();
}
}
private interface
BDecoderInputStream
{
public int
read()
throws IOException;
public int
read(
byte[] buffer )
throws IOException;
public int
read(
byte[] buffer,
int offset,
int length )
throws IOException;
public int
available()
throws IOException;
public boolean
markSupported();
public void
mark(
int limit );
public void
reset()
throws IOException;
}
private class
BDecoderInputStreamStream
implements BDecoderInputStream
{
final private BufferedInputStream is;
private
BDecoderInputStreamStream(
BufferedInputStream _is )
{
is = _is;
}
public int
read()
throws IOException
{
return( is.read());
}
public int
read(
byte[] buffer )
throws IOException
{
return( is.read( buffer ));
}
public int
read(
byte[] buffer,
int offset,
int length )
throws IOException
{
return( is.read( buffer, offset, length ));
}
public int
available()
throws IOException
{
return( is.available());
}
public boolean
markSupported()
{
return( is.markSupported());
}
public void
mark(
int limit )
{
is.mark( limit );
}
public void
reset()
throws IOException
{
is.reset();
}
}
private class
BDecoderInputStreamArray
implements BDecoderInputStream
{
final private byte[] buffer;
final private int count;
private int pos;
private int mark;
private
BDecoderInputStreamArray(
byte[] _buffer )
{
buffer = _buffer;
count = buffer.length;
}
private
BDecoderInputStreamArray(
byte[] _buffer,
int _offset,
int _length )
{
buffer = _buffer;
pos = _offset;
count = Math.min( _offset + _length, _buffer.length );
mark = _offset;
}
public int
read()
throws IOException
{
return (pos < count) ? (buffer[pos++] & 0xff) : -1;
}
public int
read(
byte[] buffer )
throws IOException
{
return( read( buffer, 0, buffer.length ));
}
public int
read(
byte[] b,
int offset,
int length )
throws IOException
{
if ( pos >= count ){
return( -1 );
}
if ( pos + length > count ){
length = count - pos;
}
if (length <= 0){
return( 0 );
}
System.arraycopy(buffer, pos, b, offset, length);
pos += length;
return( length );
}
public int
available()
throws IOException
{
return( count - pos );
}
public boolean
markSupported()
{
return( true );
}
public void
mark(
int limit )
{
mark = pos;
}
public void
reset()
throws IOException
{
pos = mark;
}
}
public static void
main(
String[] args )
{
print( new File( "C:\\Temp\\xxx.torrent" ),
new File( "C:\\Temp\\xxx.txt" ));
}
}
| 16,591 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
SHA1Hasher.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/org/gudy/azureus2/core3/util/SHA1Hasher.java | /*
* Created on Apr 13, 2004
* Created by Alon Rohter
* Copyright (C) 2004, 2005, 2006 Aelitis, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* AELITIS, SAS au capital de 46,603.30 euros
* 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France.
*
*/
package org.gudy.azureus2.core3.util;
import java.nio.ByteBuffer;
/**
* SHA-1 hasher utility frontend.
*/
public final class SHA1Hasher {
private final SHA1 sha1;
/**
* Create a new SHA1Hasher instance
*/
public SHA1Hasher() {
sha1 = new SHA1();
}
/**
* Calculate the SHA-1 hash for the given bytes.
* @param bytes data to hash
* @return 20-byte hash
*/
public byte[] calculateHash( byte[] bytes ) {
ByteBuffer buff = ByteBuffer.wrap( bytes );
return calculateHash( buff );
}
/**
* Calculate the SHA-1 hash for the given buffer.
* @param buffer data to hash
* @return 20-byte hash
*/
public byte[] calculateHash( ByteBuffer buffer ) {
sha1.reset();
return sha1.digest( buffer );
}
/**
* Start or continue a hash calculation with the given data.
* @param data input
*/
public void update( byte[] data ) {
update( ByteBuffer.wrap( data ));
}
/**
* Start or continue a hash calculation with the given data,
* starting at the given position, for the given length.
* @param data input
* @param pos start position
* @param len length
*/
public void update( byte[] data, int pos, int len ) {
update( ByteBuffer.wrap( data, pos, len ));
}
/**
* Start or continue a hash calculation with the given data.
* @param buffer data input
*/
public void update( ByteBuffer buffer ) {
sha1.update( buffer );
}
/**
* Finish the hash calculation.
* @return 20-byte hash
*/
public byte[] getDigest() {
return sha1.digest();
}
/**
* Resets the hash calculation.
*/
public void reset() {
sha1.reset();
}
/**
* Save the current hasher state for later resuming.
*/
public void saveHashState() {
sha1.saveState();
}
/**
* Restore the hasher state from previous save.
*/
public void restoreHashState() {
sha1.restoreState();
}
}
| 2,918 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
BEncodingException.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/org/gudy/azureus2/core3/util/BEncodingException.java | /**
* Copyright (C) 2007 Aelitis, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* AELITIS, SAS au capital de 63.529,40 euros
* 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France.
*
*/
package org.gudy.azureus2.core3.util;
import java.io.IOException;
/**
* @author Aaron Grunthal
* @create 09.08.2007
*
* This is thrown when the BEncoder/BDecoder stumbles upon any internal error, such as malformed input
* @IOException will be used if the encountered problem is of external nature (mostly IOExceptiosn themselves)
*/
public class BEncodingException extends IOException {
public BEncodingException(){}
public BEncodingException(String message) { super(message); }
}
| 1,385 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CryptoUtils.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/test/CryptoUtils.java | package edu.washington.cs.oneswarm.community2.test;
import java.security.Key;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.Signature;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
public class CryptoUtils {
private KeyPairGenerator generator;
// private Signature signer = null;
// private MessageDigest digest = null;
public static final int KEY_SIZE_BITS = 1024;
public CryptoUtils() {
try {
generator = KeyPairGenerator.getInstance("RSA");
// signer = Signature.getInstance("SHA1withRSA");
// digest = MessageDigest.getInstance("SHA-1");
} catch (NoSuchAlgorithmException e1) {
e1.printStackTrace();
}
generator.initialize(KEY_SIZE_BITS);
}
public KeyPair getPair() {
return generator.generateKeyPair();
}
public static String getBase64FromKey( Key inKey ) {
return Base64.encode(inKey.getEncoded()).replaceAll("\n","");
}
}
| 1,013 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
TestDB.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/test/TestDB.java | package edu.washington.cs.oneswarm.community2.test;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.logging.LogManager;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.server.CommunityDAO;
import edu.washington.cs.oneswarm.community2.shared.KeyRegistrationRecord;
public class TestDB {
private CommunityDAO dao;
public void run() throws Exception {
dao = CommunityDAO.get();
dao.drop_tables();
dao.create_tables();
dao.registerUser("abc123", "\u00a9stwtf?", "1.2.3.4", "admin");
////
// Runnable r = new Runnable() {
// public void run() {
// try {
// random_inserts(dao, 2000);
// } catch( Exception e ) {
// e.printStackTrace();
// }
// }
// };
// (new Thread(r)).start();
// (new Thread(r)).start();
// FriendRecord [] peers = dao.getRegisteredKeys();
// Random r = new Random();
// long start = System.currentTimeMillis();
// CryptoUtils c = new CryptoUtils();
// CommunityDAO db = CommunityDAO.get();
//
// List<String> keys = new LinkedList<String>();
// for( int q=0; q<100; q++ ) {
// if( (q%100) == 0 ) {
// System.out.println(q + " keys");
// }
//
// keys.add( Base64.encode(c.getPair().getPublic().getEncoded()).replaceAll("\n","") );
// }
//
// start = System.currentTimeMillis();
// Random rand = new Random();
// while( keys.size() > 0 ) {
//
// if( rand.nextDouble() < 0.75 ) {
// String k = keys.remove(0);
// String nick = "r-" + k.hashCode();
// db.registerUser(k, nick, "127.0.0.1", "admin");
// dao.getPeers(k);
// } else {
// FriendRecord [] registered = dao.getRegisteredKeys();
// if( registered.length > 0 ) {
// dao.deregisterKey(registered[rand.nextInt(registered.length)].getBase64PublicKey());
// }
// }
//
// }
//
// System.out.println("total db ops: " + (System.currentTimeMillis()-start));
// PrintStream fout = new PrintStream(new FileOutputStream("/tmp/topotest"));
// CommunityDAO.get().dumpTable("topology", fout, "\n");
}
public static int random_inserts( CommunityDAO db, int howmany ) throws Exception {
CryptoUtils c = new CryptoUtils();
List<String> keys = new ArrayList<String>();
long start = System.currentTimeMillis();
for( int i=0; i<howmany; i++ ) {
keys.add(Base64.encode(c.getPair().getPublic().getEncoded()).replaceAll("\n",""));
if( (i % 10) == 0 ) {
System.out.println("key generation: " + i + " / " +howmany);
}
}
System.out.println("key generation took: " + (System.currentTimeMillis() - start)/1000);
start = System.currentTimeMillis();
int registered = 0;
for( int kItr=0; kItr<keys.size(); kItr++ ) {
String k = keys.get(kItr);
String nick = "rand-" + k.hashCode();
db.registerUser(k, nick, "127.0.0.1", "admin");
registered++;
if( (kItr % 100) == 0 ) {
System.out.println("inserts: " + kItr + " / " + keys.size());
}
}
System.out.println("db inserts: " + (System.currentTimeMillis()-start)/1000);
return registered;
}
public static final void main( String [] args ) throws Exception {
try {
LogManager.getLogManager().readConfiguration(new FileInputStream("./logging.properties"));
System.out.println("read log configuration");
} catch( Exception e ) {
System.err.println("error reading log config: " + e.toString());
}
(new TestDB()).run();
}
public static int random_deletes(CommunityDAO dao, int howmany) {
KeyRegistrationRecord [] recs = dao.getRegisteredKeys();
Collections.shuffle(Arrays.asList(recs));
int removed = 0;
for( int i=0; i<howmany && i<recs.length; i++ ) {
try {
dao.deregisterKey(recs[i].getBase64PublicKey());
removed++;
}catch( Exception e ) {
e.printStackTrace();
}
}
return removed;
}
}
| 4,041 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
TestRegistration.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/test/TestRegistration.java | package edu.washington.cs.oneswarm.community2.test;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import java.security.KeyPair;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.shared.CommunityConstants;
public class TestRegistration {
private KeyPair pair;
public TestRegistration( KeyPair pair ) {
this.pair = pair;
}
public void run() {
try {
Map<String, String> requestHeaders = new HashMap<String, String>();
Map<String, String> formParams = new HashMap<String, String>();
formParams.put(CommunityConstants.BASE64_PUBLIC_KEY, Base64.encode(pair.getPublic().getEncoded()));
formParams.put(CommunityConstants.NICKNAME, "test user");
URL url = new URL("http://127.0.0.1:8080/community");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("POST");
conn.setDoInput(true);
conn.setDoOutput(true);
for (String head : requestHeaders.keySet()) {
conn.setRequestProperty(head, requestHeaders.get(head));
}
// add url form parameters
OutputStreamWriter out = new OutputStreamWriter(conn.getOutputStream());
Iterator<String> params = formParams.keySet().iterator();
while (params.hasNext()) {
String name = params.next();
String value = formParams.get(name);
out.append(URLEncoder.encode(name, "UTF-8") + "=" + URLEncoder.encode(value, "UTF-8"));
if (params.hasNext()) {
out.append("&");
}
}
out.flush();
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line = null;
while ((line = in.readLine()) != null) {
System.out.println("resp line: " + line);
}
in.close();
System.out.println("final status code: " + conn.getResponseCode() + " / " + conn.getResponseMessage());
} catch (Exception e) {
e.printStackTrace();
}
}
public static void main(String[] args) throws Exception {
KeyPair pair = null;
try {
pair = (KeyPair) (new ObjectInputStream(new FileInputStream("/tmp/keys.scratch"))).readObject();
System.out.println("loaded saved key pair");
} catch( Exception e ) {
CryptoUtils c = new CryptoUtils();
pair = c.getPair();
System.out.println(Base64.encode(pair.getPublic().getEncoded()));
ObjectOutputStream saved = new ObjectOutputStream(new FileOutputStream("/tmp/keys.scratch"));
saved.writeObject(pair);
System.out.println("generated/saved key pair");
}
System.out.println("pub");
System.out.println(Base64.encode(pair.getPublic().getEncoded()));
(new TestRegistration(pair)).run();
}
}
| 3,003 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CDF.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/test/CDF.java | package edu.washington.cs.oneswarm.community2.test;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class CDF {
private String name;
public CDF(String name) {
this.name = name;
}
List<Comparable> vals = new ArrayList<Comparable>();
public synchronized void addValue( Comparable n ) {
vals.add(n);
}
public void draw() {
Collections.sort(vals);
try {
PrintStream out = new PrintStream(new FileOutputStream("/tmp/cdf-" + name));
for( int i=0; i<vals.size(); i++ ) {
out.format("%3.16f %3.16f\n", (double)i / (double)vals.size(), Double.parseDouble(vals.get(i).toString()));
}
out.flush();
} catch( IOException e ) {
e.printStackTrace();
}
}
}
| 866 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
TestEmbeddedServer.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/test/TestEmbeddedServer.java | package edu.washington.cs.oneswarm.community2.test;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import java.security.KeyManagementException;
import java.security.KeyPair;
import java.security.NoSuchAlgorithmException;
import java.security.Signature;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.shared.CommunityConstants;
import edu.washington.cs.oneswarm.community2.utils.ByteManip;
class StopWatch {
long start;
public StopWatch() {
start();
}
public void start() {
start = System.currentTimeMillis();
}
public long lap( String task ) {
long v = (System.currentTimeMillis()-start);
// System.out.println(task + ":: " + v);
start();
return v;
}
}
class LiberalTrustManager implements X509TrustManager
{
public LiberalTrustManager() {}
public void checkClientTrusted(X509Certificate[] certs, String authType) {}
public void checkServerTrusted(X509Certificate[] certs, String authType) {}
public X509Certificate[] getAcceptedIssuers() {
return new java.security.cert.X509Certificate[0];
}
}
public class TestEmbeddedServer {
private int port;
private String host;
private String community_url;
List<KeyPair> generatedKeys = new LinkedList<KeyPair>();
private ExecutorService threadPool;
private SSLContext sslcontext;
public static final String SCRATCH_PATH = "scratch_keys";
public TestEmbeddedServer( String host, int port ) {
this.host = host;
this.port = port;
this.community_url = host + ":" + port + "/community";
getKeys();
TrustManager[] osTrustManager = new TrustManager[] {
new LiberalTrustManager()
};
try {
sslcontext = SSLContext.getInstance("SSL");
sslcontext.init(null, osTrustManager, null);
} catch (NoSuchAlgorithmException e) {
System.err.println(e);
e.printStackTrace();
} catch (KeyManagementException e) {
System.err.println(e);
e.printStackTrace();
}
threadPool = Executors.newFixedThreadPool(300, new ThreadFactory(){
public Thread newThread(Runnable r) {
Thread t = new Thread(r, "Request thread pool thread");
// t.setDaemon(true);
return t;
}});
}
private void getKeys() {
try {
generatedKeys = (List<KeyPair>)((new ObjectInputStream(new FileInputStream(SCRATCH_PATH))).readObject());
} catch( Exception e ) {
System.err.println("couldn't get scratch keys: " + e.toString());
System.out.println("generating keys...");
CryptoUtils c = new CryptoUtils();
generatedKeys = new LinkedList<KeyPair>();
for( int i=0; i<200; i++ ) {
if( (i%100) == 0 ) {
System.out.println("done " + i);
}
generatedKeys.add(c.getPair());
}
System.out.println("done, writing...");
try {
(new ObjectOutputStream(new FileOutputStream(SCRATCH_PATH))).writeObject(generatedKeys);
} catch (Exception e2 ) {
e.printStackTrace();
}
System.out.println("done");
}
}
CDF reg_connections = new CDF("reg_connections");
CDF reg_io = new CDF("reg_io");
CDF ref_connections = new CDF("ref_connections");
CDF ref_io = new CDF("ref_io");
public void bench_key_registrations() {
long start = System.currentTimeMillis();
register_all();
System.out.println("register all took: " + (System.currentTimeMillis() - start));
reg_connections.draw();
reg_io.draw();
}
public void bench_refreshes() {
refreshed.clear();
refreshing_error.clear();
long start = System.currentTimeMillis();
for( KeyPair p : generatedKeys ) {
threadPool.submit(new PeerRequest(p));
}
while( refreshed.size() < generatedKeys.size() ) {
System.out.println("done refreshing: " + refreshed.size());
try {
Thread.sleep(1000);
} catch( Exception e ) {}
}
System.out.println("alldone -- errors: " + refreshing_error.size());
ref_io.draw();
ref_connections.draw();
}
private void register_all() {
registered.clear();
errors.clear();
for( KeyPair p : generatedKeys ) {
threadPool.submit(new RegistrationRequest(p));
}
while( registered.size() + errors.size() < generatedKeys.size() ) {
try {
Thread.sleep(5*1000);
} catch( Exception e ) {}
System.out.println("registered: " + registered.size() + " error: " + errors.size());
}
}
final List<KeyPair> registered = Collections.synchronizedList(new ArrayList<KeyPair>());
final List<KeyPair> errors = Collections.synchronizedList(new ArrayList<KeyPair>());
final Set<String> recently_challenged = Collections.synchronizedSet(new HashSet<String>());
final List<KeyPair> refreshed = Collections.synchronizedList(new ArrayList<KeyPair>());
final List<KeyPair> refreshing_error = Collections.synchronizedList(new ArrayList<KeyPair>());
// assumes this key is registered.
class PeerRequest implements Runnable {
private KeyPair keys;
private String base64Key;
private long start;
public PeerRequest( KeyPair keys ) {
this.keys = keys;
base64Key = CryptoUtils.getBase64FromKey(keys.getPublic());
}
public void run() {
HttpURLConnection conn = null;
try {
start = System.currentTimeMillis();
String theURLString = community_url + "?" + CommunityConstants.BASE64_PUBLIC_KEY + "=" + URLEncoder.encode(base64Key, "UTF-8");
URL url = new URL(theURLString);
conn = (HttpURLConnection) url.openConnection();
if( conn instanceof HttpsURLConnection ) {
try {
((HttpsURLConnection) conn).setSSLSocketFactory(sslcontext.getSocketFactory());
} catch( Exception e ) {
e.printStackTrace();
throw new IOException(e.getMessage());
}
}
conn.setConnectTimeout(10*1000); // 10 second timeouts
conn.setReadTimeout(60*1000);
conn.setRequestMethod("GET");
StopWatch watch = new StopWatch();
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
ref_connections.addValue(watch.lap("Connect1"));
String l = in.readLine();
if( l == null ) {
throw new IOException("null challenge line");
}
if (l.startsWith(CommunityConstants.CHALLENGE)) {
String[] toks = l.split("\\s+");
if (toks.length != 2) {
throw new IOException("Received a malformed challenge");
}
long challenge = Long.parseLong(toks[1]);
reissueWithResponse(challenge, watch);
} else {
System.err.println("Didn't get challenge, got: " + l);
}
} catch( Exception e ) {
refreshing_error.add(keys);
e.printStackTrace();
} finally {
recently_challenged.remove(CryptoUtils.getBase64FromKey(keys.getPublic()));
refreshed.add(keys);
if( conn != null ) {
try {
conn.getOutputStream().close();
conn.getInputStream().close();
} catch( IOException e ) {}
conn.disconnect();
}
}
}
public void reissueWithResponse( long challenge, StopWatch watch ) {
HttpURLConnection conn = null;
try {
byte[] encrypted_response = null;
Signature signer = Signature.getInstance("SHA1withRSA");
signer.initSign(keys.getPrivate());
signer.update(ByteManip.ltob(challenge+1));
encrypted_response = signer.sign();
String urlStr = community_url + "?" + CommunityConstants.BASE64_PUBLIC_KEY + "=" + URLEncoder.encode(base64Key, "UTF-8") + "&" + CommunityConstants.CHALLENGE_RESPONSE + "=" + URLEncoder.encode(Base64.encode(encrypted_response), "UTF-8");
// System.out.println("url str: " + urlStr);
URL url = new URL(urlStr);
watch.lap("intermediary tasks");
conn = (HttpURLConnection) url.openConnection();
if( conn instanceof HttpsURLConnection ) {
try {
((HttpsURLConnection) conn).setSSLSocketFactory(sslcontext.getSocketFactory());
} catch( Exception e ) {
e.printStackTrace();
throw new IOException(e.getMessage());
}
}
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
String line = null;
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
ref_connections.addValue(watch.lap("connect2"));
while( (line = in.readLine()) != null ) {
bytes.write(line.getBytes());
}
// processAsXML(bytes);
//System.out.println("read: " + bytes.size() + " in " + (System.currentTimeMillis()-start) + " ms e2e");
ref_io.addValue(watch.lap("read response to 2"));
} catch (Exception e) {
e.printStackTrace();
} finally {
if( conn != null ) {
conn.disconnect();
}
}
}
}
class RegistrationRequest implements Runnable {
private String nick;
private String base64Key;
private KeyPair keys;
public RegistrationRequest( KeyPair keys ) {
this(keys, null);
}
public RegistrationRequest( KeyPair keys, String inNick ) {
this.keys = keys;
this.base64Key = CryptoUtils.getBase64FromKey(keys.getPublic());
if( inNick != null ) {
this.nick = inNick;
} else {
nick = "r-" + base64Key.hashCode();
}
}
public void run() {
try {
URL url = new URL(community_url);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if( conn instanceof HttpsURLConnection ) {
try {
((HttpsURLConnection) conn).setSSLSocketFactory(sslcontext.getSocketFactory());
} catch( Exception e ) {
e.printStackTrace();
throw new IOException(e.getMessage());
}
}
conn.setConnectTimeout(10*1000); // 10 second timeouts
conn.setReadTimeout(10*1000);
conn.setDoOutput(true);
conn.setRequestMethod("POST");
conn.setRequestProperty("Content-type", "application/x-www-form-urlencoded");
// TODO: add gzip processing here?
Map<String, String> requestHeaders = new HashMap<String, String>();
Map<String, String> formParams = new HashMap<String, String>();
formParams.put("base64key", base64Key);
formParams.put("nick", nick);
for (String head : requestHeaders.keySet()) {
conn.setRequestProperty(head, requestHeaders.get(head));
}
// add url form parameters
StopWatch timer = new StopWatch();
OutputStreamWriter out = new OutputStreamWriter(conn.getOutputStream());
reg_connections.addValue(timer.lap("initial connection"));
Iterator<String> params = formParams.keySet().iterator();
while (params.hasNext()) {
String name = params.next();
String value = formParams.get(name);
out.append(URLEncoder.encode(name, "UTF-8") + "=" + URLEncoder.encode(value, "UTF-8"));
if (params.hasNext()) {
out.append("&");
}
}
long start = System.currentTimeMillis();
out.flush();
timer.lap("wrote params");
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line = null;
while ((line = in.readLine()) != null) {
// System.out.println("resp line: " + line);
}
timer.lap("read response");
in.close();
reg_io.addValue(System.currentTimeMillis()-start);
System.out.println("final status code: " + conn.getResponseCode() + " / " + conn.getResponseMessage());
registered.add(keys);
} catch( Exception e ) {
e.printStackTrace();
errors.add(keys);
}
}
};
private void single_request() {
KeyPair p = generatedKeys.get(0);
// threadPool.submit(new RegistrationRequest(p));
threadPool.submit(new PeerRequest(p));
}
public static final void main( String [] args ) throws Exception {
TestEmbeddedServer test = null;
if( args.length == 0 ) {
test = new TestEmbeddedServer("https://ultramagnetic.dyn.cs.washington.edu", 8081);
// test = new TestEmbeddedServer("http://127.0.0.1", 8081);
} else {
test = (new TestEmbeddedServer(args[0], Integer.parseInt(args[1])));
}
// test.bench_key_registrations();
// while( true ) {
test.bench_refreshes();
// }
// test.single_request();
}
}
| 12,828 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
TestRequest.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/test/TestRequest.java | package edu.washington.cs.oneswarm.community2.test;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import java.security.KeyPair;
import java.security.Signature;
import java.util.ArrayList;
import java.util.List;
import java.util.zip.GZIPInputStream;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Result;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMResult;
import javax.xml.transform.stream.StreamSource;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.shared.CommunityConstants;
import edu.washington.cs.oneswarm.community2.utils.ByteManip;
public class TestRequest {
public static final String BASE_URL = "http://127.0.0.1:8080/community";
private KeyPair pair;
private String base64PubKey;
public TestRequest(KeyPair pair) {
this.pair = pair;
base64PubKey = Base64.encode(pair.getPublic().getEncoded());
}
public void run() {
/**
* Three step: 1) get challenge, 2) send reponse, 3) parse list
*/
try {
URL url = new URL(BASE_URL + "?" + CommunityConstants.BASE64_PUBLIC_KEY + "=" + URLEncoder.encode(base64PubKey, "UTF-8"));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String l = in.readLine();
if (l != null) {
System.out.println("got: " + l);
if (l.startsWith(CommunityConstants.CHALLENGE)) {
String[] toks = l.split("\\s+");
if (toks.length != 2) {
System.err.println("bad challenge");
return;
}
long challenge = Long.parseLong(toks[1]);
System.out.println("got challenge: " + challenge);
reissueWithResponse(challenge);
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
private void reissueWithResponse(long challenge) {
try {
byte[] encrypted_response = null;
Signature sig = Signature.getInstance("SHA1withRSA");
sig.initSign(pair.getPrivate());
sig.update(ByteManip.ltob(challenge + 1));
encrypted_response = sig.sign();
String urlStr = BASE_URL + "?" + CommunityConstants.BASE64_PUBLIC_KEY + "=" + URLEncoder.encode(base64PubKey, "UTF-8") + "&" + CommunityConstants.CHALLENGE_RESPONSE + "=" + URLEncoder.encode(Base64.encode(encrypted_response), "UTF-8");
System.out.println("url str: " + urlStr);
URL url = new URL(urlStr);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
readLimitedInto(conn, 2 * 1024 * 1024, bytes);
System.out.println("read: " + bytes.size());
processAsXML(bytes);
// BufferedReader in = new BufferedReader(new
// InputStreamReader(conn.getInputStream()));
// String l = in.readLine();
// while( l != null ) {
// System.out.println("got: " + l);
// l = in.readLine();
// }
System.out.println("done");
} catch (Exception e) {
e.printStackTrace();
}
}
private void processAsXML(ByteArrayOutputStream bytes) {
ByteArrayInputStream input = new ByteArrayInputStream(bytes.toByteArray());
try {
TransformerFactory factory = TransformerFactory.newInstance();
Transformer xformer = factory.newTransformer();
Source source = new StreamSource(input);
DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
Document doc = builder.newDocument();
Result result = new DOMResult(doc);
xformer.transform(source, result);
NodeList root = doc.getElementsByTagName(CommunityConstants.RESPONSE_ROOT);
Node response = root.item(0);
String refreshInterval;
NodeList firstLevel = response.getChildNodes();
for( int i=0; i<firstLevel.getLength(); i++ ) {
Node kid = firstLevel.item(i);
if( kid.getLocalName().equals(CommunityConstants.REFRESH_INTERVAL) ) {
refreshInterval = kid.getTextContent();
System.out.println("got refresh interval: " + refreshInterval);
} else if( kid.getLocalName().equals(CommunityConstants.FRIEND_LIST) ) {
parseFriendList(kid);
}
}
} catch (ParserConfigurationException e) {
// couldn't even create an empty doc
} catch (TransformerException e) {
;
} catch( NullPointerException e ) {
// basically means the file had bad structure
e.printStackTrace();
}
}
private List<String[]> parseFriendList(Node kid) {
List<String[]> out = new ArrayList<String[]>();
for( int i=0; i<kid.getChildNodes().getLength(); i++ ) {
Node entry = kid.getChildNodes().item(i);
String key = entry.getAttributes().getNamedItem(CommunityConstants.KEY_ATTRIB).getTextContent();
String nick = entry.getAttributes().getNamedItem(CommunityConstants.NICK_ATTRIB).getTextContent();
System.out.println("parsed " + key + " / " + nick);
out.add(new String[]{key, nick});
}
return out;
}
private InputStream getConnectionInputStream(HttpURLConnection conn) throws IOException {
if (conn.getHeaderField("Content-Encoding") != null) {
if (conn.getHeaderField("Content-Encoding").contains("gzip")) {
return new GZIPInputStream(conn.getInputStream());
}
}
return conn.getInputStream();
}
private void readLimitedInto(HttpURLConnection conn, int limit, ByteArrayOutputStream read) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(getConnectionInputStream(conn)));
String line = null;
while ((line = in.readLine()) != null) {
read.write(line.getBytes());
if (read.size() > limit) {
return;
}
}
}
public static void main(String[] args) throws Exception {
KeyPair pair = null;
try {
pair = (KeyPair) (new ObjectInputStream(new FileInputStream("/tmp/keys.scratch"))).readObject();
System.out.println("loaded saved key pair");
} catch (Exception e) {
CryptoUtils c = new CryptoUtils();
pair = c.getPair();
System.out.println(Base64.encode(pair.getPublic().getEncoded()));
ObjectOutputStream saved = new ObjectOutputStream(new FileOutputStream("/tmp/keys.scratch"));
saved.writeObject(pair);
System.out.println("generated/saved key pair");
}
System.out.println("pub");
System.out.println(Base64.encode(pair.getPublic().getEncoded()));
(new TestRequest(pair)).run();
}
}
| 6,947 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
MutableLong.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/MutableLong.java | package edu.washington.cs.oneswarm.community2.utils;
public class MutableLong {
public long v;
public MutableLong( long v ) {
set(v);
}
public long get() {
return v;
}
public void set(long v) {
this.v = v;
}
}
| 225 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
ByteManip.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/ByteManip.java | package edu.washington.cs.oneswarm.community2.utils;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class ByteManip {
public static long btol(byte[] b) {
long l = 0;
for(int i =0; i < 8; i++){
l <<= 8;
l ^= (long) b[i] & 0xFF;
}
return l;
}
public static byte[] ltob(long l) {
byte[] b = new byte[8];
for (int i = 0; i < 8; i++) {
b[7-i] = (byte) (l >>> (i * 8));
}
return b;
}
public static String ntoa( int ip )
{
long a = (ip & 0xFF000000) >>> 24;
long b = (ip & 0x00FF0000) >>> 16;
long c = (ip & 0x0000FF00) >>> 8;
long d = (ip & 0x000000FF) >>> 0;
return a + "." + b + "." + c + "." + d;
}
public static int aton( String ip )
{
String [] toks = ip.split("\\.");
int a = Integer.parseInt(toks[0]);
int b = Integer.parseInt(toks[1]);
int c = Integer.parseInt(toks[2]);
int d = Integer.parseInt(toks[3]);
return (int)((a << 24) | (b << 16) | (c << 8) | d);
}
public static int ip_to_l(String ip) throws UnknownHostException {
byte[] bytes = InetAddress.getByName(ip).getAddress();
return (bytes[0]<<24) & 0xff000000 |
(bytes[1] << 16) & 0x00ff0000 |
(bytes[2] << 8) & 0x0000ff00 |
bytes[3] & 0x000000ff;
}
public static byte[] intToByteArray(int value) {
byte[] b = new byte[4];
for (int i = 0; i < 4; i++) {
int offset = (b.length - 1 - i) * 8;
b[i] = (byte) ((value >>> offset) & 0xFF);
}
return b;
}
}
| 1,502 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
URLUTF8Encoder.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/URLUTF8Encoder.java | /**
* Provides a method to encode any string into a URL-safe
* form.
* Non-ASCII characters are first encoded as sequences of
* two or three bytes, using the UTF-8 algorithm, before being
* encoded as %HH escapes.
*
* Created: 17 April 1997
* Author: Bert Bos <bert@w3.org>
*
* URLUTF8Encoder: http://www.w3.org/International/URLUTF8Encoder.java
*
* Copyright © 1997 World Wide Web Consortium, (Massachusetts
* Institute of Technology, European Research Consortium for
* Informatics and Mathematics, Keio University). All Rights Reserved.
* This work is distributed under the W3C® Software License [1] in the
* hope that it will be useful, but WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE.
*
* [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
*/
package edu.washington.cs.oneswarm.community2.utils;
public class URLUTF8Encoder
{
final static String[] hex = {
"%00", "%01", "%02", "%03", "%04", "%05", "%06", "%07",
"%08", "%09", "%0a", "%0b", "%0c", "%0d", "%0e", "%0f",
"%10", "%11", "%12", "%13", "%14", "%15", "%16", "%17",
"%18", "%19", "%1a", "%1b", "%1c", "%1d", "%1e", "%1f",
"%20", "%21", "%22", "%23", "%24", "%25", "%26", "%27",
"%28", "%29", "%2a", "%2b", "%2c", "%2d", "%2e", "%2f",
"%30", "%31", "%32", "%33", "%34", "%35", "%36", "%37",
"%38", "%39", "%3a", "%3b", "%3c", "%3d", "%3e", "%3f",
"%40", "%41", "%42", "%43", "%44", "%45", "%46", "%47",
"%48", "%49", "%4a", "%4b", "%4c", "%4d", "%4e", "%4f",
"%50", "%51", "%52", "%53", "%54", "%55", "%56", "%57",
"%58", "%59", "%5a", "%5b", "%5c", "%5d", "%5e", "%5f",
"%60", "%61", "%62", "%63", "%64", "%65", "%66", "%67",
"%68", "%69", "%6a", "%6b", "%6c", "%6d", "%6e", "%6f",
"%70", "%71", "%72", "%73", "%74", "%75", "%76", "%77",
"%78", "%79", "%7a", "%7b", "%7c", "%7d", "%7e", "%7f",
"%80", "%81", "%82", "%83", "%84", "%85", "%86", "%87",
"%88", "%89", "%8a", "%8b", "%8c", "%8d", "%8e", "%8f",
"%90", "%91", "%92", "%93", "%94", "%95", "%96", "%97",
"%98", "%99", "%9a", "%9b", "%9c", "%9d", "%9e", "%9f",
"%a0", "%a1", "%a2", "%a3", "%a4", "%a5", "%a6", "%a7",
"%a8", "%a9", "%aa", "%ab", "%ac", "%ad", "%ae", "%af",
"%b0", "%b1", "%b2", "%b3", "%b4", "%b5", "%b6", "%b7",
"%b8", "%b9", "%ba", "%bb", "%bc", "%bd", "%be", "%bf",
"%c0", "%c1", "%c2", "%c3", "%c4", "%c5", "%c6", "%c7",
"%c8", "%c9", "%ca", "%cb", "%cc", "%cd", "%ce", "%cf",
"%d0", "%d1", "%d2", "%d3", "%d4", "%d5", "%d6", "%d7",
"%d8", "%d9", "%da", "%db", "%dc", "%dd", "%de", "%df",
"%e0", "%e1", "%e2", "%e3", "%e4", "%e5", "%e6", "%e7",
"%e8", "%e9", "%ea", "%eb", "%ec", "%ed", "%ee", "%ef",
"%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
"%f8", "%f9", "%fa", "%fb", "%fc", "%fd", "%fe", "%ff"
};
/**
* Encode a string to the "x-www-form-urlencoded" form, enhanced
* with the UTF-8-in-URL proposal. This is what happens:
*
* <ul>
* <li><p>The ASCII characters 'a' through 'z', 'A' through 'Z',
* and '0' through '9' remain the same.
*
* <li><p>The unreserved characters - _ . ! ~ * ' ( ) remain the same.
*
* <li><p>The space character ' ' is converted into a plus sign '+'.
*
* <li><p>All other ASCII characters are converted into the
* 3-character string "%xy", where xy is
* the two-digit hexadecimal representation of the character
* code
*
* <li><p>All non-ASCII characters are encoded in two steps: first
* to a sequence of 2 or 3 bytes, using the UTF-8 algorithm;
* secondly each of these bytes is encoded as "%xx".
* </ul>
*
* @param s The string to be encoded
* @return The encoded string
*/
public static String encode(String s)
{
StringBuffer sbuf = new StringBuffer();
int len = s.length();
for (int i = 0; i < len; i++) {
int ch = s.charAt(i);
if ('A' <= ch && ch <= 'Z') { // 'A'..'Z'
sbuf.append((char)ch);
} else if ('a' <= ch && ch <= 'z') { // 'a'..'z'
sbuf.append((char)ch);
} else if ('0' <= ch && ch <= '9') { // '0'..'9'
sbuf.append((char)ch);
} else if (ch == ' ') { // space
sbuf.append('+');
} else if (ch == '-' || ch == '_' // unreserved
|| ch == '.' || ch == '!'
|| ch == '~' || ch == '*'
|| ch == '\'' || ch == '('
|| ch == ')') {
sbuf.append((char)ch);
} else if (ch <= 0x007f) { // other ASCII
sbuf.append(hex[ch]);
} else if (ch <= 0x07FF) { // non-ASCII <= 0x7FF
sbuf.append(hex[0xc0 | (ch >> 6)]);
sbuf.append(hex[0x80 | (ch & 0x3F)]);
} else { // 0x7FF < ch <= 0xFFFF
sbuf.append(hex[0xe0 | (ch >> 12)]);
sbuf.append(hex[0x80 | ((ch >> 6) & 0x3F)]);
sbuf.append(hex[0x80 | (ch & 0x3F)]);
}
}
return sbuf.toString();
}
}
| 5,005 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
IPServletFilter.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/IPServletFilter.java | package edu.washington.cs.oneswarm.community2.utils;
import java.io.IOException;
import java.io.PrintStream;
import java.util.List;
import java.util.logging.Logger;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse;
public class IPServletFilter implements Filter {
private List<IPFilter> whitelist;
private List<IPFilter> blacklist;
private static Logger logger = Logger.getLogger(IPServletFilter.class.getName());
public IPServletFilter( List<IPFilter> whitelist, List<IPFilter> blacklist) {
this.whitelist = whitelist;
this.blacklist = blacklist;
}
public void destroy() {}
public void doFilter(ServletRequest request,
ServletResponse response,
FilterChain chain) throws IOException, ServletException {
for( IPFilter filter : blacklist ) {
if( filter.contains(request.getRemoteAddr()) ) {
logger.warning("Dropped blacklisted connection request: " + request.getRemoteAddr());
if( response instanceof HttpServletResponse ) {
HttpServletResponse resp = ((HttpServletResponse)response);
sendNoAuth(resp);
}
return;
}
}
// check whitelist if it has any entries (and we're not connecting from localhost)
if( whitelist.size() > 0 && request.getRemoteAddr().equals("127.0.0.1") == false ) {
boolean ok = false;
for( IPFilter filter : whitelist ) {
if( filter.contains(request.getRemoteAddr()) ) {
ok = true;
break;
}
}
if( !ok ) {
logger.warning("Dropped connection request from user not in whitelist: " + request.getRemoteAddr());
if( response instanceof HttpServletResponse ) {
HttpServletResponse resp = ((HttpServletResponse)response);
sendNoAuth(resp);
}
return;
}
}
/**
* Pass-through, this request is okay.
*/
if( chain != null ) {
chain.doFilter(request, response);
}
}
private void sendNoAuth(HttpServletResponse resp) throws IOException {
resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
PrintStream out = new PrintStream(resp.getOutputStream());
out.println("<html><body><h1>401/IP not authorized</h1></body></html>\r\n\r\n");
out.flush();
out.close();
}
public void init(FilterConfig config) throws ServletException {}
} | 2,411 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
StringTools.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/StringTools.java | package edu.washington.cs.oneswarm.community2.utils;
import java.util.Date;
public class StringTools {
public static String formatRate(String inRateStr) {
return formatRate(Long.parseLong(inRateStr));
}
public static String formatRate(long inLongBytes) {
return formatRate(inLongBytes, "B");
}
public static String formatRate(long inBytes2, String unit) {
double inBytes=(double)inBytes2;
if (inBytes < 1024)
return trim(inBytes, 0) + " " + unit;
inBytes /= 1024.0;
if (inBytes < 1024)
return trim(inBytes, 0) + " K" + unit;
inBytes /= 1024.0;
if (inBytes < 1024)
return trim(inBytes, 2) + " M" + unit;
inBytes /= 1024.0;
if (inBytes < 1024)
return trim(inBytes, 2) + " G" + unit;
inBytes /= 1024.0;
return trim(inBytes, 2) + " T" + unit;
}
public static String trim(double d, int places) {
String out = Double.toString(d);
if (out.indexOf('.') != -1) {
return out.substring(0, Math.min(out.indexOf('.') + places, out.length()));
}
return out;
}
public static String truncate(String str, int max, boolean trimBack) {
if (str.length() < max) {
return str;
} else {
if (trimBack)
return str.substring(0, max - 3) + "...";
else
// trim from front
return "..." + str.substring(str.length() - (max - 3), str.length());
}
}
public static String formatDateAppleLike(Date date, boolean useAgo) {
if (date == null) {
return "never";
}
boolean inTheFuture = false;
int secAgo = (int) (((new Date()).getTime() - date.getTime()) / 1000);
if (secAgo < 0) {
inTheFuture = true;
secAgo = -secAgo;
}
int minAgo = secAgo / 60;
int hoursAgo = minAgo / 60;
int daysAgo = hoursAgo / 24;
int monthsAgo = daysAgo / 31;
String ret = "";
if (secAgo < 5) {
return "now";
} else if (secAgo < 60) {
ret = "<1 minute";
} else if (minAgo == 1) {
ret = "1 minute";
} else if (minAgo < 60) {
ret = minAgo + " minutes";
} else if (hoursAgo == 1) {
ret = "1 hour";
} else if (hoursAgo < 24) {
ret = hoursAgo + " hours";
} else if (daysAgo == 1) {
if (inTheFuture) {
return "tomorrow";
} else {
return "yesterday";
}
} else if (daysAgo < 62) {
ret = daysAgo + " days";
} else if (monthsAgo < 24) {
ret = (monthsAgo) + " months";
} else {
return "Years";
}
if (useAgo) {
if (inTheFuture) {
return "in " + ret;
} else {
return ret + " ago";
}
} else {
return ret;
}
}
public static String formatDateAppleLike(Date lastDate) {
return StringTools.formatDateAppleLike(lastDate, true);
}
}
| 2,589 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
ConciseLogFormatter.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/ConciseLogFormatter.java | package edu.washington.cs.oneswarm.community2.utils;
import java.text.DateFormat;
import java.util.Date;
import java.util.logging.Formatter;
import java.util.logging.LogRecord;
public class ConciseLogFormatter extends Formatter {
public String format(LogRecord record) {
String [] nameToks = record.getLoggerName().split("\\.");
String time = DateFormat.getInstance().format(new Date());
return "[" + record.getLevel() + "] " + time + " " + nameToks[nameToks.length-1] + ": " + record.getMessage() + "\n";
}
}
| 526 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
IPFilter.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/IPFilter.java | package edu.washington.cs.oneswarm.community2.utils;
import java.net.UnknownHostException;
import java.util.BitSet;
public class IPFilter {
private BitSet prefix;
private int sigBits;
private int lower = Integer.MIN_VALUE, upper = Integer.MAX_VALUE; // if we're using an IP address range
public IPFilter( String prefix ) throws UnknownHostException {
if( prefix.contains("-") ) {
this.prefix = null;
String [] toks = prefix.split("-");
lower = ByteManip.aton(toks[0]);
upper = ByteManip.aton(toks[1]);
}
else {
String [] toks = prefix.split("/");
sigBits = Integer.parseInt(toks[1]);
this.prefix = fromByteArray(ByteManip.intToByteArray(ByteManip.aton(toks[0])));
}
}
public IPFilter( String prefix, int sigBits ) throws UnknownHostException {
this(ByteManip.ip_to_l(prefix), sigBits);
}
public IPFilter( int prefix, int sigBits ) {
this.prefix = fromByteArray(ByteManip.intToByteArray(prefix));
this.sigBits = sigBits;
}
public boolean contains( String ip ) throws UnknownHostException {
return contains(ByteManip.aton(ip));
}
public boolean contains( int ip ) {
if( prefix != null ) {
BitSet bits = fromByteArray(ByteManip.ltob(ip));
for( int i=31; i>=(32-sigBits); i-- ) {
if( bits.get(i) != prefix.get(i) ) {
return false;
}
}
return true;
} else {
return ip < upper && ip > lower;
}
}
private static BitSet fromByteArray(byte[] bytes) {
BitSet bits = new BitSet();
for (int i=0; i<bytes.length*8; i++) {
if ((bytes[bytes.length-i/8-1]&(1<<(i%8))) > 0) {
bits.set(i);
}
}
return bits;
}
public String toString() {
StringBuilder sb = new StringBuilder();
for( int i=0; i<32; i++ ) {
sb.append(this.prefix.get(i) == true ? "1" : "0");
}
return sb.toString();
}
public static final void main( String [] args ) throws Exception {
IPFilter prefix = new IPFilter("128.208.5.5/24");
System.out.println("prefix: " + prefix);
System.out.println(prefix.contains("128.208.4.255"));
IPFilter filt = new IPFilter("128.208.5.0-128.208.7.10");
System.out.println(filt.contains("128.208.4.0"));
System.out.println(filt.contains("128.208.5.1"));
System.out.println(filt.contains("128.208.6.24"));
System.out.println(filt.contains("128.208.7.11"));
System.out.println(filt.contains("129.208.7.11"));
}
}
| 2,417 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
GangliaStat.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/utils/GangliaStat.java | package edu.washington.cs.oneswarm.community2.utils;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.LinkedList;
import java.util.Timer;
import java.util.TimerTask;
public class GangliaStat {
private static final int DEFAULT_INITIAL_DELAY = 60;
private static final int DEFAULT_UPDATE_PERIOD = 60;
private static final int DEFAULT_PORT = 8649;
private final long delay;
private final long period;
private int updateCount = 0;
private final String collector;
private final int port;
private LinkedList<StatReporter> reportTasks = new LinkedList<StatReporter>();
public GangliaStat(String collector) {
this(collector, DEFAULT_PORT, DEFAULT_UPDATE_PERIOD, DEFAULT_INITIAL_DELAY);
}
/**
*
* @param collector
* @param port
* port of the
* @param period
* frequency of updates in seconds
* @param delay
* number of seconds to wait before doing the first udpate
*/
public GangliaStat(String collector, int port, int period, int delay) {
this.period = 1000 * period;
this.delay = 1000 * delay;
this.port = port;
this.collector = collector;
setMonitor();
}
private void setMonitor() {
Timer timer = new Timer("Ganglia Monitor", true);
timer.scheduleAtFixedRate(new TimerTask() {
public void run() {
updateCount++;
synchronized (reportTasks) {
try {
InetAddress col = InetAddress.getByName(collector);
for (StatReporter sr : reportTasks) {
GMetricSender.send(col, port, sr.getName(), sr.getValue(), sr.getUnit(), GMetricSender.SLOPE_BOTH, 60, 300);
}
} catch (UnknownHostException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}, delay, period);
}
public void addMetric(StatReporter stat) {
synchronized (reportTasks) {
reportTasks.add(stat);
}
}
public abstract static class StatReporter {
private final String name;
private final String unit;
public StatReporter(String name, String unit) {
this.name = name;
this.unit = unit;
}
public StatReporter(G25Metric metric) {
this(metric.name, metric.units);
}
public String getName() {
return name;
}
public String getUnit() {
return unit;
}
public abstract double getValue();
}
private enum G25Metric {
METRIC_NET_UPLOAD(29, "bytes_out", 300, Type.FLOAT, "bytes/sec", Slope.BOTH, "%.2f"),
METRIC_NET_DOWNLOAD(30, "bytes_in", 300, Type.FLOAT, "bytes/sec", Slope.BOTH, "%.2f");
final int key;
final String name;
final int tmax;
final Type type;
final String units;
final Slope slope;
final String fmt;
final int messageSize;
G25Metric(int key, String name, int tmax, Type type, String units, Slope slope, String fmt) {
this.key = key;
this.name = name;
this.tmax = tmax;
this.type = type;
this.units = units;
this.slope = slope;
this.fmt = fmt;
this.messageSize = type.size;
}
enum Type {
DOUBLE("double", 7, 16), FLOAT("float", 6, 8), INT32("int32", 5, 8), STRING("string", 1, 32), UNINT32("uint32", 4, 8);
String type;
int size;
int code;
Type(String type, int code, int size) {
this.type = type;
this.code = code;
this.size = size;
}
}
enum Slope {
BOTH("both", 3), ZERO("zero", 0);
String slope;
int key;
Slope(String s, int key) {
this.slope = s;
this.key = key;
}
}
}
private static class GMetricSender {
private final static int METRIC_USER_DEFINED_ID = 0;
public final static int SLOPE_ZERO = 0;
public final static int SLOPE_POSITIVE = 1;
public final static int SLOPE_NEGATIVE = 2;
public final static int SLOPE_BOTH = 3;
public final static int SLOPE_UNSPECIFIED = 4;
public final static String VALUE_STRING = "string";
public final static String VALUE_UNSIGNED_SHORT = "uint16";
public final static String VALUE_SHORT = "int16";
public final static String VALUE_UNSIGNED_INT = "uint32";
public final static String VALUE_INT = "int32";
public final static String VALUE_FLOAT = "float";
public final static String VALUE_DOUBLE = "double";
public static void send(InetAddress address, int port, String name, String value, String type, String units, int slope, int tmax, int dmax) {
try {
DatagramSocket socket = new DatagramSocket();
byte[] buf = write(name, value, type, units, slope, tmax, dmax);
DatagramPacket p = new DatagramPacket(buf, buf.length, address, port);
socket.send(p);
} catch (IOException e) {
// who cares
}
}
public static void send(InetAddress address, int port, String name, double dvalue, String units, int slope, int tmax, int dmax) {
try {
String value = Double.toString(dvalue);
DatagramSocket socket = new DatagramSocket();
byte[] buf = write(name, value, VALUE_DOUBLE, units, slope, tmax, dmax);
DatagramPacket p = new DatagramPacket(buf, buf.length, address, port);
socket.send(p);
} catch (IOException e) {
// who cares
}
}
public static void send(InetAddress address, int port, String name, int dvalue, String units, int slope, int tmax, int dmax) {
try {
String value = Integer.toString(dvalue);
DatagramSocket socket = new DatagramSocket();
byte[] buf = write(name, value, VALUE_INT, units, slope, tmax, dmax);
DatagramPacket p = new DatagramPacket(buf, buf.length, address, port);
socket.send(p);
} catch (IOException e) {
// who cares
}
}
public static void send(InetAddress address, int port, G25Metric metric, String value) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
try {
dos.writeInt(METRIC_USER_DEFINED_ID);
writeXDRString(dos, metric.type.type);
writeXDRString(dos, metric.name);
writeXDRString(dos, value);
writeXDRString(dos, metric.units);
dos.writeInt(metric.slope.key);
dos.writeInt(metric.tmax);
dos.writeInt(metric.tmax);
byte[] buf = baos.toByteArray();
DatagramSocket socket = new DatagramSocket();
DatagramPacket p = new DatagramPacket(buf, buf.length, address, port);
socket.send(p);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/*
* EVERYTHING BELOW HERE YOU DON"T NEED TO USE
*/
private static byte[] write(String name, String value, String type, String units, int slope, int tmax, int dmax) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
dos.writeInt(METRIC_USER_DEFINED_ID);
writeXDRString(dos, type);
writeXDRString(dos, name);
writeXDRString(dos, value);
writeXDRString(dos, units);
dos.writeInt(slope);
dos.writeInt(tmax);
dos.writeInt(dmax);
return baos.toByteArray();
} catch (IOException e) {
// really this is impossible
return null;
}
}
private static void writeXDRString(DataOutputStream dos, String s) throws IOException {
dos.writeInt(s.length());
dos.writeBytes(s);
int offset = s.length() % 4;
if (offset != 0) {
for (int i = offset; i < 4; ++i) {
dos.writeByte(0);
}
}
}
public static void main(String args[]) throws Exception {
InetAddress remote = InetAddress.getByName(args[0]);
int port = 8649;
int iter = 0;
while (true) {
send(remote, port, "test47", 9.99 + iter++, "req/sec", SLOPE_BOTH, 100, 100);
System.out.println("updated " + iter);
Thread.sleep(60 * 1000);
}
}
}
public static void main(String[] args) throws InterruptedException {
GangliaStat stat = new GangliaStat("jermaine", DEFAULT_PORT, 10, 0);
stat.addMetric(new StatReporter("Test", "req/s") {
double iter = 47;
@Override
public double getValue() {
iter += 1.5;
return iter;
}
});
stat.addMetric(new StatReporter(G25Metric.METRIC_NET_DOWNLOAD) {
double iter = 1000;
@Override
public double getValue() {
iter += 1.5 * 1000;
return iter;
}
});
stat.addMetric(new StatReporter(G25Metric.METRIC_NET_UPLOAD) {
double iter = 1000;
@Override
public double getValue() {
iter += 1.5 * 1000;
return iter;
}
});
Thread.sleep(1000000);
}
}
| 8,396 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
NoSuchUserException.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/shared/NoSuchUserException.java | package edu.washington.cs.oneswarm.community2.shared;
public class NoSuchUserException extends Exception {
private String key;
public NoSuchUserException(String base64PublicKey) {
super("No such user: " + base64PublicKey);
key = base64PublicKey;
}
public NoSuchUserException() {}
}
| 295 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CommunityAccountLite.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/shared/CommunityAccountLite.java | package edu.washington.cs.oneswarm.community2.shared;
/**
* Just like CommunityAccount in the server package, except doesn't implement Principal (since GWT can't deal with it)
*/
public class CommunityAccountLite {
private String username;
private String pw_hash;
private String [] roles;
private int registrations;
private long uid;
public int getRegistrations() {
return registrations;
}
public void setRegistrations(int registrations) {
this.registrations = registrations;
}
public int getMaxRegistrations() {
return max_registrations;
}
public void setMaxRegistrations(int max_registrations) {
this.max_registrations = max_registrations;
}
private int max_registrations;
public CommunityAccountLite( String username, String pw_hash, String [] roles, int registrations, int max_registrations, long uid ) {
this.username = username;
this.pw_hash = pw_hash;
this.roles = roles;
this.registrations = registrations;
this.max_registrations = max_registrations;
this.uid = uid;
}
public CommunityAccountLite() {} // for serialization support
public long getID() {
return uid;
}
public String getName() {
return username;
}
public String getHash() {
return pw_hash;
}
public String [] getRoles() {
return roles;
}
public int hashCode() {
return username.hashCode();
}
public boolean equals( Object rhs ) {
if( rhs instanceof CommunityAccountLite ) {
return ((CommunityAccountLite)rhs).username.equals(username);
}
return false;
}
public String toString() {
return "Name: " + getName() + " Roles: " + getRoles().length;
}
}
| 1,625 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
DuplicateAccountException.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/shared/DuplicateAccountException.java | package edu.washington.cs.oneswarm.community2.shared;
public class DuplicateAccountException extends Exception {
public String toString() {
return "Duplicate account name";
}
}
| 183 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
KeyRegistrationRecord.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/shared/KeyRegistrationRecord.java | package edu.washington.cs.oneswarm.community2.shared;
import java.util.Date;
public class KeyRegistrationRecord implements Comparable<KeyRegistrationRecord> {
String nickname;
String base64key;
String registrationIP;
long createdByID;
Date registeredDate;
Date lastRefreshedDate;
private Long mID;
/**
* This is a convenience constructor that's only used for comparison (e.g., in the binary search of the peer list) when
* we only have a public key available and we need to construct a dummy FriendRecord.
*
* In other circumstances, this should be created from a database request and the resulting ResultSet (or from the full constructor)
*/
public KeyRegistrationRecord( String inBase64Key ) {
base64key = inBase64Key;
}
public void setLastRefresh( Date inRefreshTime ) {
lastRefreshedDate = inRefreshTime;
}
public KeyRegistrationRecord() {} // for serializability
public KeyRegistrationRecord( String base64Key, String nick, Date registeredDate, Date lastRefreshedDate, String registrationIP, long createdByID, long id ) {
this.base64key = base64Key;
this.nickname = nick;
this.mID = id;
this.registeredDate = registeredDate;
this.lastRefreshedDate = lastRefreshedDate;
this.registrationIP = registrationIP;
this.createdByID = createdByID;
if( this.nickname == null ) {
nickname = "Unspecified-" + base64key.hashCode();
}
else if( this.nickname.length() == 0 ) {
nickname = "Unspecified-" + base64key.hashCode();
}
}
public String getNickname() {
return nickname;
}
public String getBase64PublicKey() {
return base64key;
}
public String getRegistrationIP() {
return registrationIP;
}
public long getCreatedByID() {
return createdByID;
}
public Date getRegisteredDate() {
return registeredDate;
}
public Date getLastRefreshedDate() {
return lastRefreshedDate;
}
public int compareTo(KeyRegistrationRecord o) {
int thisVal = this.base64key.hashCode();
int anotherVal = o.base64key.hashCode();
return (thisVal<anotherVal ? -1 : (thisVal==anotherVal ? 0 : 1));
}
public boolean equals( Object rhs ) {
if( rhs instanceof KeyRegistrationRecord ) {
return base64key.equals(((KeyRegistrationRecord)rhs).base64key);
}
return false;
}
public int hashCode() {
return base64key.hashCode();
}
public String toString() {
return nickname + " " + base64key.hashCode() + " / " + base64key;
}
public Long getID() {
return mID;
}
}
| 2,464 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CommunityConstants.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/shared/CommunityConstants.java | package edu.washington.cs.oneswarm.community2.shared;
public final class CommunityConstants {
public static final String VERSION = "0.7pre";
/**
* Cookies
*/
public static final String ADMIN_SESSION_COOKIE = "community_session";
/**
* Form field names
*/
public static final String BASE64_PUBLIC_KEY = "base64key";
public static final String NICKNAME = "nick";
public static final String CHALLENGE_RESPONSE = "resp";
/**
* Form response bodies
*/
public static final String REGISTRATION_SUCCESS = "REGISTRATION_OK";
public static final String REGISTRATION_DUPLICATE = "REGISTRATION_DUPLICATE";
public static final String REGISTRATION_RATE_LIMITED = "REGISTRATION_RATE_LIMITED";
public static final String CHALLENGE = "CHALLENGE";
/**
* XML elements and attributes
*/
public static final String RESPONSE_ROOT = "CommunityServerResponse";
public static final String REFRESH_INTERVAL = "RefreshInterval";
public static final String FRIEND_LIST = "FriendList";
public static final String FRIEND = "Friend";
public static final String KEY_ATTRIB = "Base64Key";
public static final String NICK_ATTRIB = "nick";
public static final int MAX_NICK_LENGTH = 128;
}
| 1,202 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
PreviewImageServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/PreviewImageServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
public class PreviewImageServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(PreviewImageServlet.class.getName());
public PreviewImageServlet() {
CommunityDAO.get();
logger.info("Started PreviewImageServlet");
}
public void doGet(HttpServletRequest request, HttpServletResponse response) {
try {
long id = Long.parseLong(request.getParameter("id"));
PublishedSwarmDetails swarmDetails = CommunityDAO.get().getSwarmDetails(id);
if( System.getProperty(EmbeddedServer.Setting.DONT_DISPLAY_PREVIEWS.getKey())
.equals(Boolean.TRUE.toString()) ) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
if( swarmDetails == null ) {
logger.warning("Swarm details are null for id: " + id);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
response.setContentType("image/png");
response.getOutputStream().write(swarmDetails.getPreviewPNG());
} catch( NumberFormatException e ) {
logger.warning("Problem during preview generation: " + e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
} catch (IOException e) {
logger.warning("Problem during preview generation: " + e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
| 1,654 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
UpdatePreviewServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/UpdatePreviewServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.util.List;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileItemFactory;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
public class UpdatePreviewServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(UpdatePreviewServlet.class.getName());
long loadTime = 0;
public UpdatePreviewServlet() {
CommunityDAO.get();
logger.info("Preview update servlet created.");
}
public void doPost(HttpServletRequest request, HttpServletResponse response) {
logger.finest("got post: " + request.toString());
// only moderators / admins can update previews.
if( request.getUserPrincipal() == null ) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
boolean canModerate = false, isAdmin = false;
if( request.getUserPrincipal() != null ) {
CommunityAccount user = CommunityDAO.get().getAccountForName(request.getUserPrincipal().getName());
canModerate = user.canModerate();
isAdmin = user.isAdmin();
}
if( canModerate == false && isAdmin == false ) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
if( ServletFileUpload.isMultipartContent(request) == false ) {
logger.warning("Got a POST to the preview update servlet that is not multi-part, dropping.");
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
FileItemFactory factory = new DiskFileItemFactory();
ServletFileUpload upload = new ServletFileUpload(factory);
upload.setFileSizeMax(5*1048576);
PrintStream out = null;
try {
List<FileItem> items = upload.parseRequest(request);
out = new PrintStream(response.getOutputStream());
long id = -1L;
byte [] previewpng = null;
for( FileItem f : items ) {
logger.info("field name: " + f.getFieldName() + " name: " + f.getName() + " " + f.getSize() );
if( f.getFieldName().equals("id") ) {
id = Long.parseLong(f.getString());
} else if( f.getFieldName().equals("previewpng") ) {
InputStream in = f.getInputStream();
previewpng = new byte[in.available()];
f.getInputStream().read(previewpng);
} else {
throw new IOException("Unrecognized field name: " + f.getFieldName());
}
}
if( id < 0 ) {
throw new IOException("Missing parameter: id");
}
if( previewpng.length > 0 ) {
CommunityDAO.get().update_preview(id, previewpng);
} else {
CommunityDAO.get().update_preview(id, null);
}
response.sendRedirect("/details.jsp?id=" + id);
} catch (FileUploadException e) {
logger.warning(e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
out.println("Bad request -- " + e.toString());
} catch (IOException e) {
logger.warning(e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
out.println("Bad request -- " + e.toString());
} finally {
try {
out.flush();
out.close();
} catch( Exception e ) {}
}
}
}
| 3,552 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
PublishedSwarmDetails.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/PublishedSwarmDetails.java | package edu.washington.cs.oneswarm.community2.server;
public class PublishedSwarmDetails {
long swarmID;
String description;
int downloads;
String language;
int upvotes, downvotes;
byte [] previewPNG;
public PublishedSwarmDetails( long swarmID, String description,
int downloads, String language,
int upvotes, int downvotes, byte [] previewPNG ) {
this.swarmID = swarmID;
this.description = description;
this.downloads = downloads;
this.language = language;
this.upvotes = upvotes;
this.downloads = downvotes;
this.previewPNG = previewPNG;
}
public long getSwarmID() {
return swarmID;
}
public String getDescription() {
return description;
}
public int getDownloads() {
return downloads;
}
public String getLanguage() {
return language;
}
public int getUpvotes() {
return upvotes;
}
public int getDownvotes() {
return downvotes;
}
public byte[] getPreviewPNG() {
return previewPNG;
}
}
| 968 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
Comment.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/Comment.java | package edu.washington.cs.oneswarm.community2.server;
public class Comment {
long swarmID;
long commentID;
String accountName;
long timestamp;
long replyTo;
int upvote, downvote;
String ip;
String comment;
public Comment( long swarmID, long commentID, String accountName, long timestamp,
long replyTo, int upvote, int downvote, String ip, String comment ) {
this.swarmID = swarmID;
this.commentID = commentID;
this.accountName = accountName;
this.timestamp = timestamp;
this.replyTo = replyTo;
this.upvote = upvote;
this.downvote = downvote;
this.ip = ip;
this.comment = comment;
}
public long getSwarmID() {
return swarmID;
}
public long getCommentID() {
return commentID;
}
public String getAccountName() {
return accountName;
}
public long getTimestamp() {
return timestamp;
}
public long getReplyTo() {
return replyTo;
}
public int getUpvote() {
return upvote;
}
public int getDownvote() {
return downvote;
}
public String getIp() {
return ip;
}
public String getComment() {
return comment;
}
}
| 1,092 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
DuplicateRegistrationException.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/DuplicateRegistrationException.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
public class DuplicateRegistrationException extends IOException {
public DuplicateRegistrationException(String dup) {
super("Duplicate registration: " + dup);
}
}
| 251 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
SwarmPublishServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/SwarmPublishServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileItemFactory;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import com.mysql.jdbc.exceptions.MySQLIntegrityConstraintViolationException;
public class SwarmPublishServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(SwarmPublishServlet.class.getName());
long loadTime = 0;
public SwarmPublishServlet() {
CommunityDAO.get();
logger.info("Swarm publishing servlet created.");
}
public void doPost(HttpServletRequest request, HttpServletResponse response) {
logger.finest("got post: " + request.toString());
if( request.getUserPrincipal() == null &&
System.getProperty(EmbeddedServer.StartupSetting.REQUIRE_AUTH_FOR_PUBLISH.getKey()).equals(Boolean.TRUE.toString()) ) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
if( System.getProperty(EmbeddedServer.Setting.ALLOW_USER_PUBLISHING.getKey()).equals(
Boolean.FALSE.toString()) ) {
boolean canModerate = false, isAdmin = false;
if( request.getUserPrincipal() != null ) {
CommunityAccount user = CommunityDAO.get().getAccountForName(request.getUserPrincipal().getName());
canModerate = user.canModerate();
isAdmin = user.isAdmin();
}
if( canModerate == false && isAdmin == false ) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
}
if( ServletFileUpload.isMultipartContent(request) == false ) {
logger.warning("Got a POST to the publish servlet that is not multi-part, dropping.");
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
FileItemFactory factory = new DiskFileItemFactory();
ServletFileUpload upload = new ServletFileUpload(factory);
upload.setFileSizeMax(5*1048576);
try {
List<FileItem> items = upload.parseRequest(request);
String description = null;
byte [] torrentbin = null;
byte [] previewpng = null;
String category = null;
for( FileItem f : items ) {
logger.info("field name: " + f.getFieldName() + " name: " + f.getName() + " " + f.getSize() );
if( f.getFieldName().equals("commentstr") ) {
description = f.getString();
} else if( f.getFieldName().equals("torrentbin") ) {
InputStream in = f.getInputStream();
torrentbin = new byte[in.available()];
f.getInputStream().read(torrentbin);
} else if( f.getFieldName().equals("previewpng") ) {
InputStream in = f.getInputStream();
previewpng = new byte[in.available()];
f.getInputStream().read(previewpng);
} else if( f.getFieldName().equals("categorystr") ) {
category = f.getString();
// need to validate this against our list of keywords. client may not respect.
if( CommunityDAO.get().getCategories().contains(category) == false ) {
logger.warning("Client offered bad category keyword: " + category + " / " + request.getRemoteAddr());
category = null;
}
} else {
throw new IOException("Unrecognized field name: " + f.getFieldName());
}
}
boolean shouldAttribute = true;
try {
shouldAttribute = Boolean.parseBoolean(System.getProperty(EmbeddedServer.Setting.RETAIN_ACCOUNT_INFO.getKey()));
} catch( Exception e ) {
e.printStackTrace();
}
CommunityDAO.get().publish_swarm(torrentbin, previewpng, description, category,
shouldAttribute ? (CommunityAccount) request.getUserPrincipal() : null,
request.getRemoteAddr());
} catch( DuplicateSwarmRegistrationException e ) {
logger.warning(e.toString());
response.setStatus(HttpServletResponse.SC_CONFLICT);
} catch (FileUploadException e) {
logger.warning(e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
} catch (IOException e) {
logger.warning(e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
}
}
| 4,542 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
TooManyRegistrationsException.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/TooManyRegistrationsException.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
public class TooManyRegistrationsException extends IOException {
private int howmany;
public TooManyRegistrationsException( int howmany ) {
super("Too many registrations: " + howmany);
this.howmany = howmany;
}
public TooManyRegistrationsException() {
super("Too many registrations");
}
}
| 387 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
KeyRegistrationServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/KeyRegistrationServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.BufferedOutputStream;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import java.security.InvalidKeyException;
import java.security.KeyFactory;
import java.security.NoSuchAlgorithmException;
import java.security.PublicKey;
import java.security.Signature;
import java.security.SignatureException;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.X509EncodedKeySpec;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.GZIPOutputStream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.shared.CommunityConstants;
import edu.washington.cs.oneswarm.community2.shared.KeyRegistrationRecord;
import edu.washington.cs.oneswarm.community2.utils.ByteManip;
public class KeyRegistrationServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(KeyRegistrationServlet.class.getName());
/**
* We use this to rate-limit posts to limit DoS. A single IP can only
* register a new user once every MIN_REGISTRATION_INTERVAL_MS (assuming we
* don't overflow the recentPosts table during that time)
*/
private static final long MIN_REGISTRATION_INTERVAL_MS = 10 * 1000;
static Map<String, Long> recentPosts = Collections.synchronizedMap(new LinkedHashMap<String, Long>() {
protected boolean removeEldestEntry(Map.Entry<String, Long> ent) {
return size() > 1000;
}
});
/**
* This stores the challenges issued to recent connections and is soft
* state.
*/
static Map<String, Long> recentChallenges = Collections.synchronizedMap(new LinkedHashMap<String, Long>() {
protected boolean removeEldestEntry(Map.Entry<String, Long> ent) {
return size() > 1000;
}
});
public KeyRegistrationServlet() {
CommunityDAO.get();
logger.info("Key registration servlet created.");
}
public void doGet(HttpServletRequest request, HttpServletResponse response) {
logger.finest("get request: " + request.toString());
/**
* Sanity checking -- does this request have a key?
*/
if (request.getParameter(CommunityConstants.BASE64_PUBLIC_KEY) == null) {
logger.warning("GET request with no key from: " + request.getRemoteAddr());
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
/**
* First the easy cases -- do we even _know_ about this key?
*/
if (CommunityDAO.get().isRegistered(request.getParameter(CommunityConstants.BASE64_PUBLIC_KEY)) == false) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
logger.warning("Get request and we don't know about the key " + request.getRemoteAddr());
return;
}
if (request.getParameter(CommunityConstants.CHALLENGE_RESPONSE) != null) {
logger.finer("Get request with challenge response, checking...");
processChallengeResponse(request, response);
} else {
logger.finer("Get request without challenge, sending challenge...");
sendChallenge(request, response);
}
}
private void sendChallenge(HttpServletRequest request, HttpServletResponse response) {
try {
PrintStream out = new PrintStream(new BufferedOutputStream(response.getOutputStream()));
long challenge = (long) ((Long.MAX_VALUE - 1) * Math.random());
recentChallenges.put(request.getParameter(CommunityConstants.BASE64_PUBLIC_KEY), challenge);
out.println(CommunityConstants.CHALLENGE + " " + challenge);
logger.finer("Issued challenge -- " + challenge + " -- to " + request.getRemoteAddr());
out.flush();
out.close();
} catch (IOException e) {
e.printStackTrace();
logger.warning(e.toString());
}
}
private void processChallengeResponse(HttpServletRequest request, HttpServletResponse response) {
try {
String base64_key = request.getParameter(CommunityConstants.BASE64_PUBLIC_KEY);
String base64_response = request.getParameter(CommunityConstants.CHALLENGE_RESPONSE);
/**
* First the easy cases -- do we even _know_ about this key? If not,
* no need to do crypto.
*
* This may also happen if we 1) just pruned the database, 2) and this user was in the middle of a refresh
* (although this is pretty unlikely)
*/
if (CommunityDAO.get().isRegistered(base64_key) == false) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
logger.finer("Challenge request with unknown key, dropping (unauthorized) " + request.getRemoteAddr());
return;
}
if (recentChallenges.containsKey(base64_key) == false) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
logger.finer("Challenge request without recent issue, dropping (unauthorized) " + request.getRemoteAddr());
return;
}
long originalChallenge = recentChallenges.get(base64_key);
byte[] key_bytes = Base64.decode(base64_key);
if (key_bytes == null) {
logger.warning("Couldn't decode key bytes from " + request.getRemoteAddr() + " / " + base64_key);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
byte[] response_bytes = Base64.decode(base64_response);
if (response_bytes == null) {
logger.warning("Couldn't decode challenge response from " + request.getRemoteAddr() + " / " + base64_response);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
X509EncodedKeySpec pubKeySpec = new X509EncodedKeySpec(key_bytes);
KeyFactory factory = KeyFactory.getInstance("RSA");
PublicKey pub = null;
try {
pub = factory.generatePublic(pubKeySpec);
} catch (InvalidKeySpecException e) {
logger.warning("Couldn't decode valid public key from " + request.getRemoteAddr() + " / " + base64_response);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
Signature sig = Signature.getInstance("SHA1withRSA");
sig.initVerify(pub);
sig.update(ByteManip.ltob(originalChallenge + 1));
if (sig.verify(response_bytes)) {
logger.fine("Signature verified, generating response " + request.getRemoteAddr());
generateAndSendKeyList(request, response);
} else {
logger.warning("Key failed challenge/response. Expected: " + (originalChallenge + 1) + " received signature didn't verify from " + request.getRemoteAddr());
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
logger.severe(e.toString());
} catch (InvalidKeyException e) {
System.err.println(e);
e.printStackTrace();
} catch (SignatureException e) {
System.err.println(e);
e.printStackTrace();
}
}
private void generateAndSendKeyList(HttpServletRequest request, HttpServletResponse response) {
List<KeyRegistrationRecord> nearest = CommunityDAO.get().getPeers(request.getParameter(CommunityConstants.BASE64_PUBLIC_KEY));
if (nearest == null) {
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
logger.warning("Got null set of out keys");
return;
}
logger.fine("Got " + nearest.size() + " nearest peers for request " + request.getRemoteAddr());
/**
* If this is a request for an authenticated server, add the account names
* to the returned nicknames. But, don't do this to the FriendRecord since
* that is persistent.
*/
Map<Long, CommunityAccount> id_to_rec = new HashMap<Long, CommunityAccount>();
if( request.getUserPrincipal() != null ) {
for( CommunityAccount rec : CommunityDAO.get().getAccounts() ) {
id_to_rec.put(rec.getID(), rec);
}
}
String encoding = request.getHeader("Accept-Encoding");
boolean supportsGzip = false;
if (encoding != null) {
if (encoding.toLowerCase().indexOf("gzip") > -1) {
supportsGzip = true;
logger.finer("Client accepts gzip: " + request.getRemoteAddr());
}
}
OutputStream responseOut = null;
try {
if (supportsGzip == true) {
response.setHeader("Content-Encoding", "gzip");
responseOut = new GZIPOutputStream(response.getOutputStream());
} else {
responseOut = response.getOutputStream();
}
} catch (IOException e) {
e.printStackTrace();
logger.warning(e.toString());
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
return;
}
try {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.newDocument();
Element root = doc.createElement(CommunityConstants.RESPONSE_ROOT);
doc.appendChild(root);
Element refreshTime = doc.createElement(CommunityConstants.REFRESH_INTERVAL);
refreshTime.setTextContent(Integer.toString(Integer.parseInt(System.getProperty(EmbeddedServer.Setting.REFRESH_INTERVAL.getKey())) * 60));
root.appendChild(refreshTime);
Element friendList = doc.createElement(CommunityConstants.FRIEND_LIST);
root.appendChild(friendList);
for (KeyRegistrationRecord rec : nearest) {
Element friend = doc.createElement(CommunityConstants.FRIEND);
friend.setAttribute(CommunityConstants.KEY_ATTRIB, rec.getBase64PublicKey());
/**
* Possible that it might not if a user was deleted after we obtain the nearest peers
* but before we obtain the account list
*/
String acct = "";
if( id_to_rec.containsKey(rec.getCreatedByID()) &&
System.getProperty(EmbeddedServer.Setting.INCLUDE_USERNAME_WITH_NICKNAME.getKey()).equals(Boolean.TRUE.toString()) ) {
acct += " (" + id_to_rec.get(rec.getCreatedByID()).getName() + ")";
}
friend.setAttribute(CommunityConstants.NICK_ATTRIB, rec.getNickname() + acct);
friendList.appendChild(friend);
}
TransformerFactory transFactory = TransformerFactory.newInstance();
Transformer trans = transFactory.newTransformer();
trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "no");
trans.setOutputProperty(OutputKeys.INDENT, "yes");
StringWriter sw = new StringWriter();
StreamResult result = new StreamResult(sw);
DOMSource src = new DOMSource(doc);
trans.transform(src, result);
responseOut.write(sw.toString().getBytes("UTF8"));
responseOut.flush();
logger.finest(sw.toString());
logger.finer("XML write done. finished " + request.getRemoteAddr());
} catch (IOException e) {
logger.warning(e.toString());
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
return;
} catch (ParserConfigurationException e) {
logger.warning(e.toString());
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
return;
} catch (TransformerException e) {
logger.warning(e.toString());
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
return;
} finally {
if( responseOut != null ) {
try { responseOut.close(); } catch( IOException e ) {}
}
}
}
public void doPost(HttpServletRequest request, HttpServletResponse response) {
if( logger.isLoggable(Level.FINEST) ) {
logger.finest("Got post: " + request.toString());
}
try {
request.setCharacterEncoding("UTF-8");
} catch (UnsupportedEncodingException e1) {
e1.printStackTrace();
logger.warning(e1.toString());
}
/**
* Check for flooding, except from localhost (used for stress-testing)
*/
if( request.getRemoteAddr().equals("127.0.0.1") == false &&
System.getProperty("allow.flooding").equals(Boolean.FALSE.toString()) ) {
if (recentPosts.containsKey(request.getRemoteAddr())) {
Long time = recentPosts.get(request.getRemoteAddr());
if (time + MIN_REGISTRATION_INTERVAL_MS > System.currentTimeMillis()) {
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
logger.warning("Flooding from: " + request.getRemoteAddr() + " (last request: " + (new java.util.Date(time)).toString() + ", now: " + (new java.util.Date()) + ")");
try {
response.getOutputStream().write(CommunityConstants.REGISTRATION_RATE_LIMITED.getBytes());
response.getOutputStream().flush();
response.getOutputStream().close();
} catch (IOException e) {
logger.warning(e.toString());
}
recentPosts.put(request.getRemoteAddr(), System.currentTimeMillis());
return;
}
}
recentPosts.put(request.getRemoteAddr(), System.currentTimeMillis());
}
/**
* Actual request processing
*/
registerUser(request, response);
}
private void registerUser(HttpServletRequest request, HttpServletResponse response) {
String key = request.getParameter(CommunityConstants.BASE64_PUBLIC_KEY);
String nick = request.getParameter(CommunityConstants.NICKNAME);
String remote_ip = request.getRemoteAddr();
String username = request.getRemoteUser();
logger.info("Registration request, username: "+ username + " / nick: " + nick);
if (key == null) {
logger.warning("Dropping registration request with null key from " + remote_ip);
return;
}
if (nick == null) {
logger.warning("Dropping registration request with null nick from " + remote_ip);
return;
}
if( username == null) {
logger.finer("Open server, using admin to register");
username = "admin";
}
if (nick.length() > CommunityConstants.MAX_NICK_LENGTH) {
logger.warning("Truncating lengthy nick: " + nick);
nick = nick.substring(0, CommunityConstants.MAX_NICK_LENGTH);
}
logger.finer("Registration request: key=" + key + " remote_ip=" + remote_ip);
try {
BufferedWriter out = new BufferedWriter(new OutputStreamWriter(response.getOutputStream()));
try {
CommunityDAO.get().registerUser(key, nick, remote_ip, username);
response.setStatus(HttpServletResponse.SC_OK);
out.append(CommunityConstants.REGISTRATION_SUCCESS);
logger.finer("Successfully registered " + nick + " from " + request.getRemoteAddr());
} catch (DuplicateRegistrationException e) {
logger.finer("Duplicate registration " + nick + " from " + request.getRemoteAddr());
response.setStatus(HttpServletResponse.SC_CONFLICT);
out.append(CommunityConstants.REGISTRATION_DUPLICATE);
} catch( TooManyRegistrationsException e ) {
logger.finer(e.toString() + " / " + remote_ip);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
out.append(e.toString());
} catch (Exception e) {
e.printStackTrace();
logger.warning(e.toString());
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} finally {
out.flush();
out.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
| 15,400 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CapabilitiesServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/CapabilitiesServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import edu.washington.cs.oneswarm.community2.server.EmbeddedServer.StartupSetting;
public class CapabilitiesServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(CapabilitiesServlet.class.getName());
String [] filterKeywords = null;
public CapabilitiesServlet() {
logger.info("Capabilities servlet started.");
String filterFileName = System.getProperty(EmbeddedServer.StartupSetting.SEARCH_FILTER_FILE.getKey());
if( filterFileName == null ) {
filterKeywords = null;
} else {
FileInputStream fis = null;
try {
fis = new FileInputStream(filterFileName);
BufferedReader in = new BufferedReader(new InputStreamReader(fis));
List<String> scratch = new ArrayList<String>();
while( true ) {
String line = in.readLine();
if( line == null ) {
break;
}
line = line.trim();
String [] toks = line.split("\\s+");
for( String s : toks ) {
scratch.add(s);
logger.fine("Filter keyword: " + s);
}
}
filterKeywords = scratch.toArray(new String[0]);
} catch( IOException e ) {
logger.warning("Error reading filter keywords: " + e.toString());
e.printStackTrace();
} finally {
if( fis != null ) {
try {
fis.close();
} catch( IOException e ) {}
}
}
}
}
public void doGet(HttpServletRequest request, HttpServletResponse response) {
try {
PrintStream out = new PrintStream(response.getOutputStream());
out.println("<capabilities>");
out.println("<peers path=\"community/\"/>");
// always give publish -- if clients don't have perms, they will see an error (and can fix) vs. the pain
// of not having publish
// if( request.getUserPrincipal() != null ) {
out.println("<publish path=\"publish/\"/>");
// }
out.println("<splash path=\"files.jsp\"/>");
out.println("<id name=\"" + System.getProperty(EmbeddedServer.Setting.SERVER_NAME.getKey()) + "\"/>");
if( System.getProperty(EmbeddedServer.Setting.RSS_BASE_URL.getKey()) != null ) {
out.println("<rss path=\"/rss\"/>");
}
if( System.getProperty(EmbeddedServer.StartupSetting.UNENCRYPTED_PORT.getKey()) != null ) {
int alt_port = Integer.parseInt(System.getProperty(StartupSetting.UNENCRYPTED_PORT.getKey()));
out.println("<nossl port=\"" + alt_port + "\"/>");
}
if( filterKeywords != null ) {
out.println("<searchfilter>");
for( String keyword : filterKeywords ) {
out.println("<keyword>" + keyword + "</keyword>");
}
out.println("</searchfilter>");
}
out.println("</capabilities>");
out.flush();
out.close();
} catch( IOException e ) {
logger.warning(e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
| 3,267 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
DuplicateSwarmRegistrationException.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/DuplicateSwarmRegistrationException.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
public class DuplicateSwarmRegistrationException extends IOException {
public DuplicateSwarmRegistrationException(String torrentHashStr) {
super(torrentHashStr);
}
private static final long serialVersionUID = 1L;
}
| 305 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CommunityAccount.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/CommunityAccount.java | package edu.washington.cs.oneswarm.community2.server;
import java.security.Principal;
import edu.washington.cs.oneswarm.community2.server.CommunityDAO.UserRole;
public class CommunityAccount implements Principal {
private String username;
private String pw_hash;
private int registrations;
private long uid;
public int getRegistrations() {
return registrations;
}
public void setRegistrations(int registrations) {
this.registrations = registrations;
}
public int getMaxRegistrations() {
return max_registrations;
}
public void setMaxRegistrations(int max_registrations) {
this.max_registrations = max_registrations;
}
public boolean canModerate() {
for( String s : roles ) {
if( s.equals(UserRole.ADMIN.getTag()) ) {
return true;
}
if( s.equals(UserRole.MODERATOR.getTag()) ) {
return true;
}
}
return false;
}
public boolean isAdmin() {
for( String s : roles ) {
if( s.equals(UserRole.ADMIN.getTag()) ) {
return true;
}
}
return false;
}
private int max_registrations;
private String[] roles;
public CommunityAccount( String username, String pw_hash, String [] roles, int registrations, int max_registrations, long uid ) {
this.username = username;
this.pw_hash = pw_hash;
this.roles = roles;
this.registrations = registrations;
this.max_registrations = max_registrations;
this.uid = uid;
}
public CommunityAccount() {} // for serialization support
public long getID() {
return uid;
}
public String getName() {
return username;
}
public String getHash() {
return pw_hash;
}
public String [] getRoles() {
return roles;
}
public void setRoles( String [] roles) {
this.roles = roles;
}
public int hashCode() {
return username.hashCode();
}
public boolean equals( Object rhs ) {
if( rhs instanceof CommunityAccount ) {
return ((CommunityAccount)rhs).username.equals(username);
}
return false;
}
public String toString() {
StringBuilder sb = new StringBuilder();
for( String r : getRoles() ) {
sb.append(r + " ");
}
return "Name: " + getName() + " Roles: " + sb.toString() + " ID: " + getID();
}
}
| 2,172 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
EmbeddedServer.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/EmbeddedServer.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URLEncoder;
import java.security.KeyStore;
import java.security.MessageDigest;
import java.security.Principal;
import java.security.UnrecoverableKeyException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Formatter;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.LogManager;
import java.util.logging.Logger;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.NCSARequestLog;
import org.mortbay.jetty.Request;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.handler.RequestLogHandler;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.security.Constraint;
import org.mortbay.jetty.security.ConstraintMapping;
import org.mortbay.jetty.security.HashUserRealm;
import org.mortbay.jetty.security.SecurityHandler;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.jetty.webapp.JettyWebXmlConfiguration;
import org.mortbay.jetty.webapp.WebAppContext;
import org.mortbay.jetty.webapp.WebInfConfiguration;
import org.mortbay.jetty.webapp.WebXmlConfiguration;
import org.mortbay.thread.QueuedThreadPool;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.shared.KeyRegistrationRecord;
import edu.washington.cs.oneswarm.community2.utils.GangliaStat;
import edu.washington.cs.oneswarm.community2.utils.IPFilter;
import edu.washington.cs.oneswarm.community2.utils.IPServletFilter;
import edu.washington.cs.oneswarm.community2.utils.GangliaStat.StatReporter;
public class EmbeddedServer {
public enum StartupSetting {
MAX_THREADS("max.threads", Integer.valueOf(30)),
GANGLIA_HOST("ganglia.host", null),
GANGLIA_PORT("ganglia.port", Integer.valueOf(8649)),
IP_WHITELIST("ip.whitelist", null),
IP_BLACKLIST("ip.blacklist", null),
HOST("host", null),
PORT("port", Integer.valueOf(8080)),
SSL("ssl", null),
KEYSTORE_PASSWORD("keystore.password", null),
UNENCRYPTED_PORT("unencrypted.port", null),
INFRASTRUCTURE_PEERS("infrastructure.peers", null),
REQUIRE_AUTH_FOR_KEY_REGISTRATION("require.auth.for.key.registration", Boolean.FALSE),
REQUIRE_AUTH_FOR_PUBLISH("require.auth.for.publish", Boolean.TRUE),
REQUEST_LOG_DIRECTORY("request.log.directory", null),
REQUEST_LOG_RETAIN_DAYS("request.log.retain.days", Integer.valueOf(7)),
SEARCH_FILTER_FILE("search.filter.file", null),
JDBC_PROPS("jdbc.properties", "/tmp/jdbc.properties");
;
private String key;
private Object defaultValue;
public String getKey() {
return key;
}
public Object getDefaultValue() {
return defaultValue;
}
private StartupSetting(String key, Object defaultValue) {
this.key = key;
this.defaultValue = defaultValue;
}
}
public enum Setting {
REFRESH_INTERVAL("oneswarm.community.refresh.interval", "The interval between client refreshes of friend lists.", new Integer(10)),
MAX_FRIENDS_RETURNED("max.friends.to.return", "The number of friends to return.", new Integer(26)),
KEY_EXPIRATION_SECONDS("user.expiration.seconds", "The interval after which inactive keys expire.", new Integer(86400)),
REQUIRE_SWARM_MODERATION("require.swarm.moderation", "Don't show submitted swarms until reviewed by a moderator.", Boolean.FALSE),
STORE_TORRENTS("store.torrents", "Store magnet links only. Discard piece data, etc.", Boolean.FALSE),
DISCARD_PREVIEWS("discard.previews", "Discard submitted previews.", Boolean.FALSE),
DONT_DISPLAY_PREVIEWS("dont.display.previews", "Store submitted previews, but do not show them to users.", Boolean.FALSE),
RETAIN_ACCOUNT_INFO("retain.account.info", "Log account names when swarms are submitted.", Boolean.TRUE),
DISABLE_COMMENTS("disable.user.comments", "Disallow all comments, even for registered users.", Boolean.FALSE),
KEEP_COMMENT_IPS("keep.comment.ips", "Retain the IP address of users making comments.", Boolean.TRUE),
DISPLAY_COMMENT_IPS_MODERATORS("display.comment.ips.moderators", "Display comment IPs, if saved, to moderators.", Boolean.TRUE),
KEY_REG_LIMIT_IP("key.registration.limit.ip.default", "The default number of keys that can be registered by a single IP.", new Integer(5)),
KEY_REG_LIMIT_ACCOUNT("key.registration.limit.account.default", "The default key registration limit per account.", new Integer(5)),
SWARMS_PER_PAGE("swarms.per.page", "Number of swarms per-page in category results and files.jsp page.", new Integer(30)),
SWARMS_PER_SEARCH("swarms.per.search.result.page", "Number of swarms displayed per-page in search results.", new Integer(30)),
ALLOW_SIGNUPS("allow.signup", "Allow account creation (other than by administrator).", Boolean.TRUE),
REQUIRE_CAPTCHA("signup.requires.captcha", "Require users to complete a CAPTCHA during signup.", Boolean.TRUE),
SERVER_NAME("community.server.name", "The server name shown to users.", "OneSwarm Community Server"),
MOTD("motd", "Message of the day.", null),
ENABLE_RSS("enable.rss.feeds", "Provide RSS feeds.", Boolean.FALSE),
RSS_BASE_URL("rss.base.url", "The base URL to use when generating RSS feeds.", ""),
INCLUDE_USERNAME_WITH_NICKNAME("include.username.with.nickname", "When returning friends, include both nickname and username.", Boolean.FALSE),
ALLOW_USER_PUBLISHING("allow.user.publishing", "Allow users to publish. (If false, only moderators can publish.)", Boolean.TRUE),
ALLOW_FLOODING("allow.flooding", "Allow very rapid requests rates. (Otherwise, request floods are dropped.)", Boolean.FALSE),
;
private String key, help;
private Object defaultValue;
public String getKey() {
return key;
}
public String getHelp() {
return help;
}
public Object getDefaultValue() {
return defaultValue;
}
private Setting(String key, String help, Object defaultValue) {
this.key = key;
this.help = help;
this.defaultValue = defaultValue;
}
};
private static Logger logger = Logger.getLogger(EmbeddedServer.class.getName());
Server mServer = null;
static final class OurHashRealm extends HashUserRealm {
public OurHashRealm() {
super("OneSwarm Community Server");
}
public Principal authenticate(String username, Object credentials, Request request) {
if (credentials instanceof String) {
Principal p = CommunityDAO.get().authenticate(username, (String) credentials);
if( logger.isLoggable(Level.FINER) ) {
logger.finer("OurHashRealm authenticate(), got principal: " + p);
}
return p;
} else {
if( credentials != null ) {
logger.warning("Got authenticate call, but credentials is not a string. Instead; " + credentials.getClass().getCanonicalName() + " (user: " + username + ")" );
} else {
logger.warning("Authenticate called with null credentials for user: " + username);
}
}
return null;
}
public boolean isUserInRole(Principal p, String role) {
if (p instanceof CommunityAccount) {
logger.finer("isUserInRole " + p + " / " + role);
CommunityAccount cast = (CommunityAccount)p;
if( cast.isAdmin() && role.equals("bozo") == false ) {
logger.finest("Admin " + p + " is also " + role + ", returning true.");
return true;
}
if( cast.canModerate() && role.equals("user") ) {
logger.finest("Moderator " + p + " is also user, returning true.");
return true;
}
for( String userRole : cast.getRoles() ) {
if( userRole.equals(role) ) {
logger.finest("User " + p + " in role " + role + ", returning true.");
return true;
}
}
} else {
logger.warning("isUserInRole without a community account. " +
p != null ? (p + " / " + p.getClass().getName()) :
"null" );
}
logger.finest("User " + p + " not in role, returning false.");
return false;
}
}
public EmbeddedServer(String inHost, int inPort, int inMaxThreads, String keystorePath, final List<IPFilter> whitelist, final List<IPFilter> blacklist) {
CommunityDAO.get();
mServer = new Server();
QueuedThreadPool threadPool = new QueuedThreadPool();
threadPool.setMinThreads(2);
threadPool.setMaxThreads(inMaxThreads);
threadPool.setName("Jetty embedded server thread pool");
threadPool.setDaemon(true);
logger.info("max_threads: " + inMaxThreads);
mServer.setThreadPool(threadPool);
Connector connector;
/**
* We'll at least use this SecurityHandler for the administrator
* interface -- we may also use it for user authentication (if this is
* an authorized-users-only server)
*/
// Constraint constraint = new Constraint();
// constraint.setName(Constraint.__BASIC_AUTH);
// constraint.setRoles(new String[] { ADMIN_ROLE });
// constraint.setAuthenticate(true);
// cm.setConstraint(constraint);
// cm.setPathSpec(ADMIN_SERVLET_PATH + "/*");
// constraintMappings.add(cm);
//
// cm = new ConstraintMapping();
// cm.setConstraint(constraint);
// cm.setPathSpec("/CommunityServerAdmin.html");
// constraintMappings.add(cm);
boolean requireAuthentication = true;
/**
* Install the key registration servlet
*/
Context registrationContext = new Context(mServer, "/community");
SecurityHandler secHandler = new SecurityHandler();
secHandler.setUserRealm(new OurHashRealm());
/**
* We need to support POST messages at /community to retain backwards compatibility
* with 0.6.5 clients.
*/
registrationContext.setAllowNullPathInfo(true);
List<ConstraintMapping> constraintMappings = new ArrayList<ConstraintMapping>();
ConstraintMapping cm = null;
registrationContext.setSecurityHandler(secHandler);
registrationContext.addServlet(new ServletHolder(new KeyRegistrationServlet()), "/");
registrationContext.addFilter(new FilterHolder(new IPServletFilter(whitelist, blacklist)), "/*", org.mortbay.jetty.Handler.ALL);
/**
* Authentication constraint for requesting peers
*/
if (System.getProperty(StartupSetting.REQUIRE_AUTH_FOR_KEY_REGISTRATION.getKey()).equals(Boolean.TRUE.toString())) {
logger.info("Authentication required for key registration / requests");
Constraint constraint = new Constraint();
constraint.setName(Constraint.__BASIC_AUTH);
constraint.setRoles(new String[] { "user", "admin", "moderator" });
constraint.setAuthenticate(true);
cm = new ConstraintMapping();
cm.setConstraint(constraint);
cm.setPathSpec("/*");
constraintMappings.add(cm);
}
secHandler.setConstraintMappings(constraintMappings.toArray(new ConstraintMapping[0]));
/**
* Install the publishing servlet
*/
Context publishContext = new Context(mServer, "/publish");
secHandler = new SecurityHandler();
secHandler.setUserRealm(new OurHashRealm());
publishContext.setSecurityHandler(secHandler);
publishContext.addServlet(new ServletHolder(new SwarmPublishServlet()), "/");
publishContext.addFilter(new FilterHolder(new IPServletFilter(whitelist, blacklist)), "/*", org.mortbay.jetty.Handler.ALL);
/**
* Authentication constraint for publishing swarms
*/
if (System.getProperty(StartupSetting.REQUIRE_AUTH_FOR_PUBLISH.getKey()).equals(Boolean.TRUE.toString())) {
logger.info("Authentication required for swarm publishing");
Constraint constraint = new Constraint();
constraint.setName(Constraint.__BASIC_AUTH);
constraint.setRoles(new String[] { "user", "admin", "moderator" });
constraint.setAuthenticate(true);
cm = new ConstraintMapping();
cm.setConstraint(constraint);
cm.setPathSpec("/*");
constraintMappings.add(cm);
}
secHandler.setConstraintMappings(constraintMappings.toArray(new ConstraintMapping[0]));
/**
* Install the JSP interface.
*/
WebAppContext app = new WebAppContext();
app.setContextPath("/");
app.setWar("./war");
app.setConfigurationClasses(new String[] { JettyWebXmlConfiguration.class.getName(), WebInfConfiguration.class.getName(), WebXmlConfiguration.class.getName() });
app.setParentLoaderPriority(true);
app.getInitParams().put("org.mortbay.jetty.servlet.Default.dirAllowed", "false");
app.getInitParams().put("org.mortbay.jetty.servlet.Default.maxCacheSize", "0");
app.getInitParams().put("org.mortbay.jetty.servlet.Default.cacheControl", "no-store,no-cache,must-revalidate");
app.addFilter(new FilterHolder(new IPServletFilter(whitelist, blacklist)), "/*", org.mortbay.jetty.Handler.ALL);
if (keystorePath != null) {
logger.info("Using SSL...");
/**
* see: http://docs.codehaus.org/display/JETTY/How+to+configure+SSL
*/
connector = new SslSocketConnector();
final SslSocketConnector sslconnector = (SslSocketConnector) connector;
sslconnector.setKeystore(keystorePath);
sslconnector.setPassword(System.getProperty("jetty.ssl.password"));
sslconnector.setKeyPassword(System.getProperty("jetty.ssl.keypassword"));
sslconnector.setNeedClientAuth(false);
sslconnector.setWantClientAuth(false);
sslconnector.setAcceptors(1);
sslconnector.setAcceptQueueSize(100);
logger.fine(new Formatter().format("acceptors: %d acceptQueueSize: %d handshakeTimeout: %d\n",
sslconnector.getAcceptors(),
sslconnector.getAcceptQueueSize(),
sslconnector.getHandshakeTimeout()).toString());
} else {
connector = new SelectChannelConnector();
}
connector.setMaxIdleTime(3000);
connector.setLowResourceMaxIdleTime(1000);
if (inHost != null) {
connector.setHost(inHost);
logger.info("host: " + inHost);
}
connector.setPort(inPort);
mServer.addConnector(connector);
Handler[] handlers = null;
if (System.getProperty(StartupSetting.REQUEST_LOG_DIRECTORY.getKey()) != null) {
RequestLogHandler requestLogHandler = new RequestLogHandler();
NCSARequestLog requestLog = new NCSARequestLog(System.getProperty(StartupSetting.REQUEST_LOG_DIRECTORY.getKey()) + "/communityserver-yyyy_mm_dd.request.log");
requestLog.setRetainDays(Integer.parseInt(System.getProperty(StartupSetting.REQUEST_LOG_RETAIN_DAYS.getKey())));
requestLog.setAppend(true);
requestLog.setExtended(true);
requestLog.setLogTimeZone("GMT");
requestLogHandler.setRequestLog(requestLog);
handlers = new Handler[] { registrationContext, publishContext, app, requestLogHandler };
} else {
handlers = new Handler[] { registrationContext, publishContext, app };
}
mServer.setHandlers(handlers);
// if( System.getProperty("mxbeans") != null ) {
// System.clearProperty("mxbeans");
// MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
// MBeanContainer mBeanContainer = new MBeanContainer(mBeanServer);
// mServer.getContainer().addEventListener(mBeanContainer);
// mBeanContainer.start();
// }
logger.info("port: " + inPort);
}
public void start() {
try {
mServer.start();
logger.info("started embedded server" );
} catch (UnrecoverableKeyException e) {
e.printStackTrace();
logger.severe(e.toString());
System.err.println("Did you remember to set the keystore password property?");
} catch (Exception e) {
e.printStackTrace();
logger.severe(e.toString());
}
}
private static final void set_default_settings() {
for (Setting s : Setting.values()) {
if (s.getDefaultValue() != null) {
System.setProperty(s.getKey(), s.getDefaultValue() != null ? s.getDefaultValue().toString() : null);
} else {
System.clearProperty(s.getKey());
}
}
for (StartupSetting s : StartupSetting.values()) {
if (s.getDefaultValue() != null) {
System.setProperty(s.getKey(), s.getDefaultValue().toString());
} else {
System.clearProperty(s.getKey());
}
}
}
public static final void load_config(String path) {
Properties config = new Properties();
FileInputStream fis = null;
try {
fis = new FileInputStream(path);
config.load(fis);
} catch (Exception e ) {
System.err.println("Error loading configuration file: " + path + "\n" + e.toString());
System.exit(-1);
} finally {
try {
if( fis != null ) {
fis.close();
}
} catch( IOException e ) {}
}
Enumeration<String> params = (Enumeration<String>) config.propertyNames();
while (params.hasMoreElements()) {
String k = params.nextElement();
System.setProperty(k, config.getProperty(k));
}
/**
* Now write out the revised JDBC properties file
*/
Properties jdbc = new Properties();
jdbc.setProperty("jdbcdriver", "com.mysql.jdbc.Driver");
jdbc.setProperty("url", "jdbc:mysql://" + System.getProperty("db.host") + ":" + System.getProperty("db.port") + "/" + System.getProperty("db.name"));
jdbc.setProperty("username", System.getProperty("db.user"));
jdbc.setProperty("password", System.getProperty("db.password"));
jdbc.setProperty("usertable", "valid_accounts");
jdbc.setProperty("usertablekey", "uid");
jdbc.setProperty("usertableuserfield", "username");
jdbc.setProperty("usertablepasswordfield", "password_hash");
jdbc.setProperty("roletable", "roles");
jdbc.setProperty("roletablekey", "role_id");
jdbc.setProperty("roletablerolefield", "role");
jdbc.setProperty("userroletable", "user_roles");
jdbc.setProperty("userroletableuserkey", "uid");
jdbc.setProperty("userroletablerolekey", "role_id");
jdbc.setProperty("cachetime", "0");
FileOutputStream outStream = null;
try {
outStream = new FileOutputStream(System.getProperty("jdbc.properties"));
jdbc.store(outStream, null);
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
outStream.close();
} catch( Exception e ) {}
}
}
public static final void main(String[] args) {
set_default_settings();
load_config(args.length == 0 ? "community.conf" : args[0]);
String host = System.getProperty(StartupSetting.HOST.getKey());
int port = Integer.parseInt(System.getProperty(StartupSetting.PORT.getKey()));
int maxThreads = Integer.parseInt(System.getProperty(StartupSetting.MAX_THREADS.getKey()));
String keystorePath = null;
String keystore_password = null;
Set<String> vpn_ids = new HashSet<String>();
List<IPFilter> whitelist = new ArrayList<IPFilter>();
List<IPFilter> blacklist = new ArrayList<IPFilter>();
String gangliaHost = System.getProperty(StartupSetting.GANGLIA_HOST.getKey());
int gangliaPort = gangliaHost != null ? Integer.parseInt(System.getProperty(StartupSetting.GANGLIA_PORT.getKey())) : -1;
try {
LogManager.getLogManager().readConfiguration(new FileInputStream("logging.properties"));
System.out.println("read log configuration");
} catch (Exception e) {
System.err.println("error reading log config: " + e.toString());
}
if (System.getProperty(StartupSetting.IP_BLACKLIST.getKey()) != null) {
try {
blacklist = parseIPList(System.getProperty(StartupSetting.IP_BLACKLIST.getKey()));
} catch (IOException e) {
e.printStackTrace();
}
}
if (System.getProperty(StartupSetting.IP_WHITELIST.getKey()) != null) {
try {
whitelist = parseIPList(System.getProperty(StartupSetting.IP_WHITELIST.getKey()));
} catch (IOException e) {
e.printStackTrace();
}
}
keystore_password = System.getProperty(StartupSetting.KEYSTORE_PASSWORD.getKey());
keystorePath = System.getProperty(StartupSetting.SSL.getKey());
if (System.getProperty(StartupSetting.INFRASTRUCTURE_PEERS.getKey()) != null) {
try {
vpn_ids = read_vpn_keys(System.getProperty(StartupSetting.INFRASTRUCTURE_PEERS.getKey()));
} catch (IOException e) {
e.printStackTrace();
}
}
CommunityDAO.get().registerVPN(vpn_ids);
if (keystore_password != null) {
System.setProperty("jetty.ssl.keypassword", keystore_password);
System.setProperty("jetty.ssl.password", keystore_password);
}
/**
* Show the HTTPS URL to the user, if using this
*/
if (keystorePath != null) {
try {
KeyStore ks = KeyStore.getInstance("JKS");
ks.load(new FileInputStream(keystorePath), keystore_password.toCharArray());
String alias;
/**
* If it has the default, use it. Otherwise, just use the first
* one.
*/
if (ks.containsAlias("community")) {
alias = "community";
} else {
alias = ks.aliases().nextElement();
}
logger.info("Using alias: " + alias);
MessageDigest digest = MessageDigest.getInstance("SHA-1");
digest.update(ks.getCertificate(alias).getEncoded());
String encodedBase64Hash = URLEncoder.encode(Base64.encode(digest.digest()), "UTF-8");
String oururl = "https://" + (host == null ? "127.0.0.1" : host) + ((port != 443) ? (":" + port) : "") + "/?certhash=" + encodedBase64Hash;
CommunityDAO.get().setURL(oururl);
logger.info("SSL url with hash\n\n" + oururl + "\n\n");
} catch (Exception e) {
e.printStackTrace();
return;
}
} else {
String ourUrl = "http://" + (host == null ? "127.0.0.1" : host) + ((port != 80) ? (":" + port) : "") ;
CommunityDAO.get().setURL(ourUrl);
}
EmbeddedServer embeddedServer = new EmbeddedServer(host, port, maxThreads, keystorePath, whitelist, blacklist);
embeddedServer.start();
if (gangliaHost != null) {
startStatCollector(gangliaHost, gangliaPort, embeddedServer.getConnector());
}
if( System.getProperty(StartupSetting.UNENCRYPTED_PORT.getKey()) != null &&
keystorePath != null ) {
int alt_port = Integer.parseInt(System.getProperty(StartupSetting.UNENCRYPTED_PORT.getKey()));
logger.info("Starting non-ssl server on port: " + alt_port);
(new EmbeddedServer(host, alt_port, maxThreads, null, whitelist, blacklist)).start();
}
try {
while( true ) {
Thread.sleep(1000);
}
} catch( Exception e ) {}
}
private Connector getConnector() {
return mServer.getConnectors()[0];
}
private static void startStatCollector(String host, int port, final Connector connector) {
connector.setStatsOn(true);
logger.fine("Starting stats collector: " + host + ":" + port);
GangliaStat statCollector = new GangliaStat(host, port, 30, 60);
statCollector.addMetric(new StatReporter("os_cs_keys_registered", "keys") {
@Override
public double getValue() {
int length = CommunityDAO.get().getRegisteredKeys().length;
logger.finest("Stats collector: reg users=" + length);
return length;
}
});
statCollector.addMetric(new StatReporter("os_connections_opened", "Connections open") {
public double getValue() {
return connector.getConnectionsOpen();
}});
statCollector.addMetric(new StatReporter("os_connections", "Connections") {
public double getValue() {
return connector.getConnections();
}});
statCollector.addMetric(new StatReporter("os_requests", "Requests") {
public double getValue() {
return connector.getRequests();
}});
statCollector.addMetric(new StatReporter("os_connection_duration", "ms") {
public double getValue() {
double v = connector.getConnectionsDurationAve();
connector.statsReset();
return v;
}
});
statCollector.addMetric(new StatReporter("os_cs_users_online", "users") {
@Override
public double getValue() {
/*
* users are online for 2x the refresh limit + 60 seconds
*/
long online_limit = Long.parseLong(System.getProperty(Setting.REFRESH_INTERVAL.getKey())) * 60 * 1000 + 60 * 1000;
int onlineUsers = 0;
KeyRegistrationRecord[] registeredKeys = CommunityDAO.get().getRegisteredKeys();
for (KeyRegistrationRecord rec : registeredKeys) {
if ((System.currentTimeMillis() - rec.getLastRefreshedDate().getTime()) < online_limit) {
onlineUsers++;
}
}
logger.finest("Stats collector: online users=" + onlineUsers);
return onlineUsers;
}
});
statCollector.addMetric(new StatReporter("os_cs_ram_used", "Bytes") {
@Override
public double getValue() {
Runtime runtime = Runtime.getRuntime();
// System.gc();
// System.gc();
long mem_used = runtime.totalMemory() - runtime.freeMemory();
logger.finest("Stats collector: mem used=" + mem_used);
return mem_used;
}
});
}
private static List<IPFilter> parseIPList(String path) throws IOException {
List<IPFilter> outList = new ArrayList<IPFilter>();
BufferedReader in = new BufferedReader(new FileReader(path));
while (in.ready()) {
String line = in.readLine();
if( line != null ) {
outList.add(new IPFilter(line));
} else {
break;
}
}
return outList;
}
private static Set<String> read_vpn_keys(String path) throws IOException {
Set<String> out = new HashSet<String>();
BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(path)));
while (in.ready()) {
out.add(in.readLine());
}
return out;
}
public static final void usage() {
System.out.println("EmbeddedServer: <config file [community.conf]>\n");
System.exit(0);
}
}
| 25,403 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
PublishedSwarm.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/PublishedSwarm.java | package edu.washington.cs.oneswarm.community2.server;
public class PublishedSwarm {
long swarmID;
String name;
int fileCount;
long totalSize;
long uploadedTimestamp;
String category;
String infohash;
private boolean removed;
long uploadedBy;
boolean needs_moderated;
boolean hasTorrent;
public PublishedSwarm( long swarmID, String name, int fileCount, long totalSize,
long uploadedTimestamp, String category, String infohash, boolean removed,
long uploadedBy, boolean needs_moderated, boolean hasTorrent ) {
this.swarmID = swarmID;
this.name = name;
this.fileCount = fileCount;
this.totalSize = totalSize;
this.category = category;
this.uploadedTimestamp = uploadedTimestamp;
this.infohash = infohash;
this.removed = removed;
this.uploadedBy = uploadedBy;
this.needs_moderated = needs_moderated;
this.hasTorrent = hasTorrent;
}
public boolean isHasTorrent() {
return hasTorrent;
}
public boolean isNeeds_moderated() {
return needs_moderated;
}
public long getUploadedBy() {
return uploadedBy;
}
public boolean isRemoved() {
return removed;
}
public long getSwarmID() {
return swarmID;
}
public String getName() {
return name;
}
public int getFileCount() {
return fileCount;
}
public long getTotalSize() {
return totalSize;
}
public long getUploadedTimestamp() {
return uploadedTimestamp;
}
public String getCategory() {
return category;
}
public String getInfohash() {
return infohash;
}
public String toString() {
return getName() + " (ID: " + getSwarmID() + ")";
}
}
| 1,577 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
DownloadServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/DownloadServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.security.Principal;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
public class DownloadServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(DownloadServlet.class.getName());
public DownloadServlet() {
CommunityDAO.get();
logger.info("Started DL servlet");
}
public void doGet(HttpServletRequest request, HttpServletResponse response) {
try {
CommunityDAO dao = CommunityDAO.get();
long id = Long.parseLong(request.getParameter("id"));
PublishedSwarm swarm = CommunityDAO.get().getSwarm(id);
Principal p = request.getUserPrincipal();
CommunityAccount acct = null;
if( p != null ) {
acct = dao.getAccountForName(p.getName());
}
if( dao.hasPermissions(acct, swarm) == false ) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
return;
}
response.setContentType("application/x-oneswarm");
byte [] b = CommunityDAO.get().getSwarmBytes(id);
if( b == null ) {
logger.warning("Problem during swarm download: null swarm bytes");
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
response.setContentLength(b.length);
response.setHeader("Content-Disposition", "attachment; filename=" + swarm.getName() + ".oneswarm");
response.getOutputStream().write(b);
} catch( Exception e ) {
logger.warning("Problem during swarm download: " + e.toString());
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
e.printStackTrace();
}
}
}
| 1,714 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CommunityDAO.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/CommunityDAO.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.Principal;
import java.sql.Blob;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.gudy.azureus2.core3.util.BDecoder;
import org.gudy.azureus2.core3.util.BEncoder;
import org.gudy.azureus2.core3.util.ByteFormatter;
import org.gudy.azureus2.core3.util.SHA1Hasher;
import org.mortbay.util.TypeUtil;
import com.jolbox.bonecp.BoneCP;
import com.jolbox.bonecp.BoneCPConfig;
import com.sun.org.apache.xerces.internal.impl.dv.util.Base64;
import edu.washington.cs.oneswarm.community2.shared.DuplicateAccountException;
import edu.washington.cs.oneswarm.community2.shared.KeyRegistrationRecord;
import edu.washington.cs.oneswarm.community2.shared.NoSuchUserException;
import edu.washington.cs.oneswarm.community2.test.CryptoUtils;
import edu.washington.cs.oneswarm.community2.utils.MutableLong;
/**
* This class manages the linkage between soft state and the persistent storage of community
* server info.
*
* While it may appear that we try to achieve consistency between on-disk and in-memory state,
* don't be fooled. While we try to maintain
* consistency, it's never going to happen. The problem is that there may be
* several in-flight requests that have just been issued challenges. Also, we
* might have lazy updates, etc. that pruning of old entries will obviate.
*
* Instead of introducing a lot of locks and trying to make everything consistent,
* we're just going to accept that state may be inconsistent at times and fail gracefully.
* Just returning nothing to users is okay -- a missed update isn't the end of the world.
*/
public final class CommunityDAO {
public static final String EXPIRATION_PROPERTY = EmbeddedServer.Setting.KEY_EXPIRATION_SECONDS.getKey();
private static Logger logger = Logger.getLogger(CommunityDAO.class.getName());
BoneCP connectionPool = null;
private List<KeyRegistrationRecord> peers = Collections.synchronizedList(new ArrayList<KeyRegistrationRecord>());
private Map<String, KeyRegistrationRecord> key_to_record = new ConcurrentHashMap<String, KeyRegistrationRecord>();
private Map<Long, KeyRegistrationRecord> id_to_record = new ConcurrentHashMap<Long, KeyRegistrationRecord>();
private Map<KeyRegistrationRecord, Set<KeyRegistrationRecord>> topology = new ConcurrentHashMap<KeyRegistrationRecord, Set<KeyRegistrationRecord>>();
private Map<String, Integer> ips_to_key_counts = Collections.synchronizedMap(new HashMap<String, Integer>());
private long userTimeout;
private String DRIVER = "com.mysql.jdbc.Driver";
private Set<String> mVPN_ids = new HashSet<String>();
private LinkedBlockingQueue<Runnable> mLazyDBQueue = new LinkedBlockingQueue<Runnable>();
private String mURL;
public enum UserRole {
ADMIN("admin", 1),
USER("user", 2),
MODERATOR("moderator", 3),
BOZO("bozo", 4);
private String tag;
private int id;
private UserRole( String tag, int id ) {
this.tag = tag;
this.id = id;
}
public String getTag() { return tag; }
public int getID() { return id; }
public static UserRole roleForString( String inString ) {
for( UserRole r : values() ) {
if( r.getTag().equals(inString) ) {
return r;
}
}
return null;
}
public static UserRole roleForID( int inID ) {
for( UserRole r : values() ) {
if( r.getID() == inID ) {
return r;
}
}
return null;
}
};
private static final String [] TABLES = new String[] {
"user_roles",
"roles",
"topology",
"banned_ips",
"swarm_extras",
"comments",
"published_swarms",
"categories",
"registered_keys",
"valid_accounts"
};
private static final String [] CREATE_TABLES = new String[]{
"CREATE TABLE valid_accounts " +
"(" +
" username VARCHAR(128) CHARSET utf8 NOT NULL UNIQUE KEY, " +
" password_hash CHAR(37), " + // hash is SHA1(username + password)
" registered_keys INTEGER DEFAULT 0, " + // how many keys have been registered by this identity?
" max_registrations INTEGER DEFAULT 5, " + // how many keys can this account register?
" uid SERIAL PRIMARY KEY, " +
" CONSTRAINT unique_username UNIQUE(username) " +
") TYPE=INNODB",
"CREATE TABLE roles " +
"( " +
" role_id SERIAL PRIMARY KEY, " +
" role VARCHAR(32)" +
") TYPE=INNODB",
"CREATE TABLE user_roles " +
"(" +
" uid BIGINT UNSIGNED, " +
" role_id BIGINT UNSIGNED, " +
"" +
" FOREIGN KEY(role_id) REFERENCES roles(role_id) ON DELETE CASCADE, " +
" PRIMARY KEY(uid, role_id)" +
") TYPE=INNODB",
"CREATE TABLE registered_keys " +
"( " +
" public_key VARCHAR(4096) CHARSET ascii NOT NULL, " +
" key_hash INTEGER, " +
" created_by_account BIGINT UNSIGNED, " +
" nick VARCHAR(256) CHARSET utf8 NOT NULL, " + // length needs to be at least CommunityConstants.MAX_NICK_LENGTH
" registration_timestamp TIMESTAMP DEFAULT NOW(), " +
" last_refresh_timestamp TIMESTAMP DEFAULT '1980-10-10 10:00:00', " + // overcome the only one DEFAULT NOW issue, and need a valid timestamp here.
" registration_ip VARCHAR(16) NOT NULL, " +
" db_id SERIAL PRIMARY KEY, " +
" CONSTRAINT UniqueKey UNIQUE(public_key(767)), " +
" FOREIGN KEY(created_by_account) REFERENCES valid_accounts(uid) ON DELETE CASCADE " +
") TYPE=INNODB",
// to support efficient range queries on the hash index
"CREATE INDEX hash_index ON registered_keys(key_hash)",
"CREATE TRIGGER registration_account_increment AFTER INSERT ON registered_keys \n" +
"FOR EACH ROW \n" +
"UPDATE valid_accounts SET registered_keys = registered_keys + 1 WHERE valid_accounts.uid = NEW.created_by_account ",
"CREATE TRIGGER registration_account_decrement AFTER DELETE ON registered_keys \n" +
"FOR EACH ROW \n" +
"UPDATE valid_accounts SET registered_keys = registered_keys - 1 WHERE valid_accounts.uid = OLD.created_by_account ",
"CREATE TABLE topology " +
"(" +
" A_id BIGINT UNSIGNED NOT NULL, \n" +
" B_id BIGINT UNSIGNED NOT NULL, \n" +
" created_timestamp TIMESTAMP DEFAULT NOW(), \n" +
"" +
" PRIMARY KEY(A_id, B_id), \n" +
"" +
" CONSTRAINT symmetry_inequality CHECK( A_id < B_id ), \n" +
" " +
" FOREIGN KEY(A_id) REFERENCES registered_keys(db_id) ON DELETE CASCADE, \n" +
" FOREIGN KEY(B_id) REFERENCES registered_keys(db_id) ON DELETE CASCADE \n" +
") TYPE=INNODB",
"CREATE TABLE categories " +
"(" +
" category VARCHAR(128) CHARSET utf8 NOT NULL PRIMARY KEY" +
") TYPE=INNODB",
"CREATE TABLE published_swarms \n" +
"(" +
" swarmid SERIAL PRIMARY KEY, \n" +
"" +
" name VARCHAR(512) CHARSET utf8 NOT NULL, \n" +
"" +
" num_files INTEGER, \n" +
" total_size BIGINT, \n" +
"" +
" category VARCHAR(128) CHARSET utf8 DEFAULT NULL, " +
"" +
" uploaded_by BIGINT UNSIGNED, \n" +
"" +
" infohash CHAR(40), \n" +
" date_uploaded TIMESTAMP DEFAULT NOW(), \n" +
"" +
" has_torrent BOOLEAN NOT NULL, \n" +
" bin MEDIUMBLOB, \n" +
"" +
" removed BOOLEAN DEFAULT FALSE, \n" +
" needs_moderated BOOLEAN DEFAULT TRUE, " +
"" +
" ip VARCHAR(16) NOT NULL, \n" +
" FOREIGN KEY(uploaded_by) REFERENCES valid_accounts(uid) ON DELETE CASCADE, \n" +
" FOREIGN KEY(category) REFERENCES categories(category) ON DELETE SET NULL, " +
" CONSTRAINT unique_infohash UNIQUE(infohash) " +
") TYPE=INNODB",
"CREATE INDEX uploaded_by_index ON published_swarms(uploaded_by)",
"CREATE INDEX infohash_index ON published_swarms(infohash)",
"CREATE INDEX name_index ON published_swarms(name)",
"CREATE INDEX unmoderated_index ON published_swarms(needs_moderated)",
"CREATE TABLE banned_ips " +
"( " +
" ip VARCHAR(16) NOT NULL PRIMARY KEY" +
") TYPE=MyISAM",
"CREATE TABLE swarm_extras " +
"( " +
" swarmid BIGINT UNSIGNED PRIMARY KEY, " +
" description TEXT CHARSET utf8, \n" +
" downloads INT UNSIGNED DEFAULT 0, " +
" language CHAR(10) DEFAULT NULL, " + // ISO 639-1 ?
"" +
" upvotes INTEGER DEFAULT 0, " +
" downvotes INTEGER DEFAULT 0, " +
"" +
" previewpng MEDIUMBLOB DEFAULT NULL, " +
"" +
" FOREIGN KEY(swarmid) REFERENCES published_swarms(swarmid) ON DELETE CASCADE" +
") TYPE=INNODB",
"CREATE TABLE comments " +
"( " +
" swarmid BIGINT UNSIGNED, " +
" commentid SERIAL PRIMARY KEY, " +
"" +
" accountname VARCHAR(128) CHARSET utf8 NOT NULL, " +
"" +
" time TIMESTAMP DEFAULT NOW(), " +
"" +
" reply_to BIGINT UNSIGNED DEFAULT NULL, " +
"" +
" upvote INTEGER DEFAULT 0, " +
" downvote INTEGER DEFAULT 0, " +
"" +
" ip VARCHAR(16) NOT NULL, " +
"" +
" body TEXT CHARSET utf8 NOT NULL, " +
"" +
" removed BOOLEAN DEFAULT FALSE, " +
"" +
" FOREIGN KEY(accountname) REFERENCES valid_accounts(username) ON DELETE CASCADE, " +
" FOREIGN KEY(swarmid) REFERENCES published_swarms(swarmid) ON DELETE CASCADE " +
" " +
") TYPE=INNODB",
"CREATE INDEX comment_dates ON comments(time)"
};
private static CommunityDAO mInst = null;
protected static final Principal SUPERUSER = new CommunityAccount("admin", null, new String[]{"admin"}, 0, 0, 0);
private static final String[] DEFAULT_CATEGORIES = new String[]{"Other", "Video", "Audio", "Pictures", "Documents"};
public synchronized static CommunityDAO get() {
if( mInst == null ) {
mInst = new CommunityDAO();
}
return mInst;
}
public synchronized KeyRegistrationRecord [] getRegisteredKeys() {
return peers.toArray(new KeyRegistrationRecord[0]);
}
private CommunityDAO() {
// Create the Derby DB
try
{
Class.forName(DRIVER);
}
catch( ClassNotFoundException e )
{
logger.severe(e.toString());
}
try
{
StringBuilder connect = new StringBuilder();
connect.append("jdbc:mysql://");
connect.append(System.getProperty("db.host"));
connect.append(System.getProperty("db.port") == null ? ":3306" : (":"+System.getProperty("db.port")));
connect.append("/"+(System.getProperty("db.name")==null ? "community_db" : System.getProperty("db.name")));
// connect.append("?user=" + System.getProperty("db.user"));
// connect.append("&password=" + System.getProperty("db.password"));
connect.append("?characterEncoding=UTF8&characterSetResults=UTF8&useUnicode=true");
logger.finest("DB connect string: " + connect.toString());
// con = DriverManager.getConnection(connect.toString());
BoneCPConfig config = new BoneCPConfig();
config.setJdbcUrl(connect.toString());
config.setUsername(System.getProperty("db.user"));
config.setPassword(System.getProperty("db.password"));
config.setMinConnectionsPerPartition(5);
config.setMaxConnectionsPerPartition(30);
config.setPartitionCount(1);
connectionPool = new BoneCP(config);
}
catch( SQLException e )
{
logger.severe(e.toString());
e.printStackTrace();
}
// drop_tables();
check_create_tables();
load();
/**
* Remove old peers every hour.
*/
(new Timer("prune old peers", true)).schedule(new TimerTask(){
public void run() {
if( System.getProperty(EXPIRATION_PROPERTY) != null ) {
try {
userTimeout = Long.parseLong(System.getProperty(EXPIRATION_PROPERTY)) * 1000;
logger.info("Using user timeout: " + userTimeout + " seconds");
} catch( Exception e ) {
logger.warning("Invalid user expiration timeout: " + System.getProperty(EXPIRATION_PROPERTY));
}
}
int removedDB = (new SQLStatementProcessor<Integer>("DELETE FROM registered_keys WHERE last_refresh_timestamp < ?") {
Integer process(PreparedStatement s) throws SQLException {
s.setTimestamp(1, new java.sql.Timestamp(System.currentTimeMillis() - userTimeout));
int removed = s.executeUpdate();
return removed;
}
}).doit();
logger.info("Pruned " + removedDB + " old peers");
load();
}}, 1000, 15 * 60 * 1000);
/**
* Drain the lazy DB queue
*/
Thread lazyDrain = new Thread("Lazy DB drainer") {
public void run() {
while( true ) {
try {
mLazyDBQueue.take().run();
}catch( Exception e ) {
e.printStackTrace();
try {
/**
* Slow down if bad things are happening
*/
Thread.sleep(100);
} catch (InterruptedException e1) {}
}
}
}
};
lazyDrain.setDaemon(true);
lazyDrain.start();
}
private void check_create_tables() {
List<String> tables = (new SQLStatementProcessor<List<String>>("SHOW TABLES") {
List<String> process(PreparedStatement s) throws SQLException {
ResultSet rs = s.executeQuery();
List<String> out = new ArrayList<String>();
while( rs.next() ) {
out.add(rs.getString(1));
}
return out;
}
}).doit();
if( tables.size() == 0 ) {
logger.info("Creating DB schema...");
create_tables();
} else if( tables.size() != TABLES.length ) {
logger.warning("DB schema seems out of date. Trying to recreate tables. (This may or may not work)");
create_tables();
}
}
/**
* (Re)load the database and update the soft state once completed.
*/
private void load() {
long startTime = System.currentTimeMillis();
logger.info("Starting reload of soft state...");
// the next version of the soft state
final Map<KeyRegistrationRecord, Set<KeyRegistrationRecord>> topology = new ConcurrentHashMap<KeyRegistrationRecord, Set<KeyRegistrationRecord>>();
final Map<String, KeyRegistrationRecord> key_to_record = new ConcurrentHashMap<String, KeyRegistrationRecord>();
final Map<Long, KeyRegistrationRecord> id_to_record = new ConcurrentHashMap<Long, KeyRegistrationRecord>();
final List<KeyRegistrationRecord> peers = Collections.synchronizedList(new ArrayList<KeyRegistrationRecord>());
final Map<String, Integer> ips_to_key_counts = Collections.synchronizedMap(new HashMap<String, Integer>());
(new SQLStatementProcessor<Void>("SELECT public_key, nick, created_by_account, registration_ip, registration_timestamp, last_refresh_timestamp, db_id FROM registered_keys ORDER BY key_hash ASC") {
Void process(PreparedStatement s) throws SQLException {
ResultSet rs = s.executeQuery();
while( rs.next() ) {
KeyRegistrationRecord fr = new KeyRegistrationRecord( rs.getString("public_key"), rs.getString("nick"), new Date(rs.getTimestamp("registration_timestamp").getTime()),
new Date(rs.getTimestamp("last_refresh_timestamp").getTime()), rs.getString("registration_ip"),
rs.getLong("created_by_account"), rs.getLong("db_id") );
peers.add(fr);
key_to_record.put(fr.getBase64PublicKey(), fr);
id_to_record.put(fr.getID(), fr);
topology.put(fr, Collections.synchronizedSet(new HashSet<KeyRegistrationRecord>()));
}
return null;
}
}).doit();
(new SQLStatementProcessor<Void>("SELECT registration_ip, COUNT(*) FROM registered_keys GROUP BY registration_ip") {
Void process(PreparedStatement s) throws SQLException {
ResultSet rs = s.executeQuery();
while( rs.next() ) {
String reg_ip = rs.getString(1);
Integer count = rs.getInt(2);
ips_to_key_counts.put(reg_ip, count);
logger.finest("ip: " + reg_ip + " registered " + count);
}
return null;
}
}).doit();
// (new SQLStatementProcessor<Void>("SELECT username, registered_keys FROM valid_accounts") {
// Void process(PreparedStatement s) throws SQLException {
// ResultSet rs = s.executeQuery();
// while( rs.next() ) {
// String reg_username = rs.getString(1);
// Integer count = rs.getInt(2);
//
// logger.finest("user: " + reg_username + " registered " + count);
// }
// return null;
// }
// }).doit();
(new SQLStatementProcessor<Void>("SELECT * FROM topology") {
Void process(PreparedStatement s) throws SQLException {
ResultSet rs = s.executeQuery();
while( rs.next() ) {
KeyRegistrationRecord a = id_to_record.get(rs.getLong("A_id"));
KeyRegistrationRecord b = id_to_record.get(rs.getLong("B_id"));
if( a == null ) {
logger.severe("Null soft state key registration record for ID: " + rs.getLong("A_id"));
continue;
}
if( b == null ) {
logger.severe("Null soft state key registration record for ID: " + rs.getLong("B_id"));
}
createFriendLink(topology, a, b, false);
}
return null;
}
}).doit();
synchronized(CommunityDAO.this) {
CommunityDAO.this.key_to_record = key_to_record;
CommunityDAO.this.id_to_record = id_to_record;
CommunityDAO.this.topology = topology;
CommunityDAO.this.peers = peers;
CommunityDAO.this.ips_to_key_counts = ips_to_key_counts;
}
logger.info("db sync took: " + (System.currentTimeMillis() - startTime) + " for " + peers.size());
}
private synchronized void createFriendLink( final Map<KeyRegistrationRecord, Set<KeyRegistrationRecord>> topology, final KeyRegistrationRecord a, final KeyRegistrationRecord b, boolean writeToDB ) {
Set<KeyRegistrationRecord> a_peers = topology.get(a);
Set<KeyRegistrationRecord> b_peers = topology.get(b);
a_peers.add(b);
b_peers.add(a);
if( writeToDB ) {
/**
* Might these db_ids no longer be valid by the time we actually get around to writing this update?
* Sure -- but that's okay, the referential integrity checks will cause a rollback, we'll print an
* error message that can be ignored, and the next time this client refreshes, it will get the correct entries.
*/
try {
mLazyDBQueue.put(new Runnable(){
public void run() {
(new SQLStatementProcessor<Void>("INSERT INTO topology (A_id, B_id) VALUES (?, ?)") {
Void process( PreparedStatement s ) throws SQLException {
if( a.getID() < b.getID() ) {
s.setLong(1, a.getID());
s.setLong(2, b.getID());
} else {
s.setLong(1, b.getID());
s.setLong(2, a.getID());
}
s.executeUpdate();
return null;
}
}).doit();
}
});
} catch (InterruptedException e) {
e.printStackTrace();
logger.warning(e.toString());
}
}
}
public synchronized void drop_tables() {
Connection c = null;
try {
c = connectionPool.getConnection();
clearSoftState();
Statement stmt = c.createStatement();
for( String t : TABLES )
{
try {
stmt.executeUpdate("DROP TABLE IF EXISTS " + t);
} catch( Exception e ) {
logger.warning(e.toString());
}
}
stmt.close();
} catch( Exception e ) {}
finally {
try {
c.close();
} catch( SQLException e ) {}
}
}
public synchronized void create_tables()
{
Connection c = null;
try
{
clearSoftState();
c = connectionPool.getConnection();
Statement s = c.createStatement();
s.executeQuery("show tables");
for( String t : CREATE_TABLES )
{
try {
s.execute(t);
} catch( Exception e ) {
// e.printStackTrace();
logger.warning(e.toString());
logger.warning(t);
}
}
s.close();
/**
* Create roles
*/
PreparedStatement stmt = null;
for( UserRole role : UserRole.values() ) {
stmt = c.prepareStatement("INSERT INTO roles (role) VALUES ('" + role.getTag() + "')");
stmt.executeUpdate();
stmt.close();
}
} catch( SQLException e ) {
e.printStackTrace();
} finally {
try{
c.close();
} catch( SQLException e ) {}
}
try {
createAccount("admin", "", UserRole.ADMIN);
logger.info("Created admin account with blank password -- change the password!");
} catch( DuplicateAccountException e ) {
logger.info("Admin account already exists, skipped creation");
} catch( IOException e ) {
e.printStackTrace();
logger.warning(e.toString());
}
for( String cat : DEFAULT_CATEGORIES ) {
addCategory(cat);
}
}
public void addCategory( final String category ) {
(new SQLStatementProcessor<Void>("INSERT INTO categories (category) VALUES (?)") {
public Void process( PreparedStatement s ) throws SQLException {
s.setString(1, category);
s.executeUpdate();
return null;
}}).doit();
}
private synchronized void clearSoftState() {
peers.clear();
key_to_record.clear();
id_to_record.clear();
ips_to_key_counts.clear();
topology.clear();
}
public synchronized Set<KeyRegistrationRecord> getCurrentRandomPeers( KeyRegistrationRecord inFriend ) {
return topology.get(inFriend);
}
public List<KeyRegistrationRecord> getRandomPeers( final String inBase64Key, final int inDesired, final int inMax, boolean updateRefreshTime ) {
if( isRegistered(inBase64Key) == false ) {
return null;
}
if( updateRefreshTime ) {
updateRefreshTime(inBase64Key);
}
/**
* First retrieve any existing friends.
*/
KeyRegistrationRecord me = key_to_record.get(inBase64Key);
logger.finer("Get random peers for: " + me.getNickname());
Set<KeyRegistrationRecord> friends = topology.get(me);
logger.finer("\t" + friends.size() + " existing random peers in topo");
/**
* Peering policy is to add friends on request until exceeding Max(85% of inNumber, 5).
* The goal here is to avoid having strongly connected components that cannot be broken into by new peers, so we
* always keep a few slots open for the most recent joiner (churn introduces some mixing as well)
*/
long start = System.currentTimeMillis();
KeyRegistrationRecord [] candidates = peers.toArray(new KeyRegistrationRecord[0]);
Collections.shuffle(Arrays.asList(candidates));
logger.finest("\tShuffled " + candidates.length + " candidates in " + (System.currentTimeMillis() - start));
for( KeyRegistrationRecord candidate : candidates ) {
// have we found enough?
if( friends.size() > Math.max(0.85 * (double)inDesired, 5) ) {
break;
}
// skip friends already in the set
if( friends.contains(candidate) ) {
continue;
}
// skip ourselves
if( candidate.equals(me) ) {
continue;
}
// candidate is already past the maximum number allowed
if( topology.get(candidate).size() > inMax ) {
logger.finest("\tCandidate: " + candidate.getNickname() + " has too many: " + topology.get(candidate).size());
continue;
}
/**
* success -- add to output and store in DB. Since we're returning the topology list directly, we
* don't need an extra copy here
*/
logger.finest("\tAdding candidate: " + candidate.getNickname() + " with " + topology.get(candidate).size());
createFriendLink(topology, me, candidate, true);
}
logger.finer("Returning " + friends.size() + " for " + me.getNickname());
return Arrays.asList(friends.toArray(new KeyRegistrationRecord[0]));
}
/**
* This method creates a simple ring topology. This has the unfortunate property that nearby peers have significant overlap
* in peer lists, increasing average path length, which is why we're now using random matching in the default case.
*
* For VPN matching, however, dividing up a circular keyspace is much more desirable since we don't need to
* store anything in the DB and the adjacency overlap problem really doesn't apply, so we're keeping this code
* around in support of that.
*
* @param inBase64Key the requesting key
* @param inNumber the number of output friends
* @param updateRefreshTime should we update the peer's last refreshed time?
* @return a list of nearby FriendRecords
*/
public synchronized List<KeyRegistrationRecord> getNearestPeers( final String inBase64Key, final int inNumber, boolean updateRefreshTime ) {
if( isRegistered(inBase64Key) == false ) {
return null;
}
if( updateRefreshTime ) {
updateRefreshTime(inBase64Key);
}
Set<KeyRegistrationRecord> out = new HashSet<KeyRegistrationRecord>();
int ourIndex = Collections.binarySearch(peers, new KeyRegistrationRecord(inBase64Key));
if( ourIndex < 0 ) {
System.err.println("Inconsist DB/memory state wrt peer: " + inBase64Key);
return null;
}
for( int i=1; i<=inNumber/2; i++ ) {
int plus = (ourIndex+i) % peers.size();
if( plus != ourIndex ) {
out.add(peers.get(plus));
}
int minus = (ourIndex-i) % peers.size();
if( minus != ourIndex ) {
out.add(peers.get(minus >= 0 ? minus : (peers.size() - 1 + minus)));
}
}
return Arrays.asList(out.toArray(new KeyRegistrationRecord[0]));
}
private void updateRefreshTime(final String inBase64Key) {
try {
/**
* Might this client no longer exist by the time this update is attempted? Sure, but that's okay. Aside from a
* scary error message, no harm done.
*/
mLazyDBQueue.put(new Runnable() {
public void run() {
(new SQLStatementProcessor<Void>("UPDATE registered_keys SET last_refresh_timestamp = CURRENT_TIMESTAMP WHERE public_key = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setString(1, inBase64Key);
if( s.executeUpdate() != 1 ) {
logger.warning("Couldn't update last_refresh_timestamp for: " + inBase64Key + " not (or no longer) in DB");
}
else {
KeyRegistrationRecord rec = key_to_record.get(inBase64Key);
if( rec == null ) {
logger.warning("Inconsistent DB/cache state in updateRefreshTime() for key: " + inBase64Key);
}
else {
rec.setLastRefresh( new Date() );
}
}
return null;
}
}).doit();
}
});
KeyRegistrationRecord rec = key_to_record.get(inBase64Key);
if( rec == null ) {
logger.warning("Got update refresh time for a key that wasn't in soft state: " + inBase64Key);
} else {
rec.setLastRefresh(new java.util.Date());
}
} catch( InterruptedException e ) {
e.printStackTrace();
logger.warning(e.toString());
}
}
private synchronized List<KeyRegistrationRecord> getVPNList(final String inBase64Key) {
Set<KeyRegistrationRecord> out = new HashSet<KeyRegistrationRecord>();
/**
* If this key is an infrastructure peer, the nearest (total clients / 1/2total servers).
* Otherwise, return just the
* registered infrastructure peers.
*/
for( String infraKey : mVPN_ids ) {
if( infraKey.equals(inBase64Key) == false &&
key_to_record.containsKey(infraKey) )
{
out.add(key_to_record.get(infraKey));
logger.finest("Adding infrastructure peer " + key_to_record.get(infraKey).getNickname() + " / " + infraKey);
} else {
logger.finest("Skipping own or unregistered infrastructure key: " + infraKey);
}
}
logger.finest("Added " + out.size() + " infrastructure peers to the VPN-response for: " + inBase64Key);
updateRefreshTime(inBase64Key);
/**
* Unlike ordinary operation, we want _all_ the peers to be covered.
* Divide things up so that each registered peer is covered by ~2 infrastructure peers
*/
if( mVPN_ids.contains(inBase64Key) ) {
int number = key_to_record.size() / (int)Math.round(0.5 * mVPN_ids.size());
logger.finest("For infrastructure peer, giving: " + number + " / " + key_to_record.size() + " -- " + inBase64Key);
/**
* We've already updated the refresh time above
*/
out.addAll(getNearestPeers(inBase64Key, number, false));
}
return Arrays.asList(out.toArray(new KeyRegistrationRecord[0]));
}
// for debugging
private void bench() {
String k = key_to_record.keySet().toArray(new String[0])[0];
long start = System.currentTimeMillis();
for( int i=0; i<3000; i++ ) {
isRegistered(k);
// key_to_record.containsKey(k);
}
System.out.println("took: " + (System.currentTimeMillis()-start));
}
public static void main( String[] args ) throws Exception
{
EmbeddedServer.load_config("community.conf");
CommunityDAO rep = CommunityDAO.get();
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
if( args.length > 0 ) {
if( args[0].equals("-dump") ) {
PrintStream out = new PrintStream(new FileOutputStream(args[1]));
for(String t : new String[]{"keys", "valid_accounts"}) {
out.println(t);
rep.dumpTable(t, out, "\n");
}
}
}
while( true )
{
String line;
System.out.print( "\n> " );
System.out.flush();
line = in.readLine();
String [] toks = line.split("\\s+");
try
{
if( line.equals("create") )
{
rep.create_tables();
}
else if( line.equals("bench") ) {
rep.bench();
} else if( line.startsWith("drop") ) {
rep.drop_tables();
}
else if( line.startsWith("show") )
{
rep.dumpTable(toks[1], System.out, "\n");
}
else if( line.equals("newaccount") ) {
if( toks.length != 4 ) {
System.err.println("newaccount <user> <pass> <role>");
continue;
}
try {
CommunityDAO.get().createAccount(toks[1], toks[2], UserRole.roleForString(toks[3]));
} catch( Exception e ) {
e.printStackTrace();
}
}
else if( line.equals("test") )
{
rep.drop_tables();
rep.create_tables();
CryptoUtils c = new CryptoUtils();
long start = System.currentTimeMillis();
for( int i=0; i<100; i++ ) {
String kname = Base64.encode(c.getPair().getPublic().getEncoded()).replaceAll("\n","");
rep.registerUser(kname, "user123-" + kname.substring(40, 45), "1.2.3.4", "admin");
}
}
else if( line.equals("q") )
break;
else
{
Connection con = rep.connectionPool.getConnection();
Statement s = con.createStatement();
if( line.toLowerCase().startsWith("select") )
{
int count =0;
ResultSet rs = s.executeQuery(line);
while( rs.next() )
count++;
System.out.println("count: " + count);
}
else
System.out.println( s.execute(line) + "" );
s.close();
con.close();
}
}
catch( SQLException e )
{
System.err.println(e);
e.printStackTrace();
}
}
}
public void dumpTable(final String table, final PrintStream inOut, final String newline) {
(new SQLStatementProcessor<Void>("SELECT * FROM " + table) {
Void process( PreparedStatement s ) throws SQLException {
ResultSet rs = s.executeQuery();
ResultSetMetaData md = rs.getMetaData();
for( int i=1; i<=md.getColumnCount(); i++ )
{
inOut.print( md.getColumnLabel(i) + " " );
}
inOut.print(newline);
PrintWriter out = new PrintWriter(new OutputStreamWriter(inOut));
int rowCount=0;
while( rs.next() )
{
rowCount++;
for( int i=1; i<=md.getColumnCount(); i++ )
{
out.printf( "%" + md.getColumnLabel(i).length() + "s ", rs.getObject(i) == null ? "null" : rs.getObject(i).toString() );
out.flush();
}
out.flush();
out.print(newline);
}
out.print("-- " + rowCount + " total --" + newline);
out.flush();
return null;
}
}).doit();
}
/**
* To replace boilerplate SQL code.
*/
abstract class SQLStatementProcessor<V> {
String mSQL;
protected Connection con = null;
public SQLStatementProcessor( String sql ) {
mSQL = sql;
}
public V doit() {
PreparedStatement stmt = null;
V out = null;
con = null;
try {
con = connectionPool.getConnection();
stmt = con.prepareStatement(mSQL);
out = process(stmt);
} catch( SQLException e ) {
e.printStackTrace();
logger.warning(e.toString());
}
finally {
try {
stmt.close();
} catch( SQLException e ) {
e.printStackTrace();
logger.warning(e.toString());
}
try {
con.close();
} catch( SQLException e ) {
e.printStackTrace();
logger.warning(e.toString());
}
}
return out;
}
abstract V process(PreparedStatement s) throws SQLException;
}
public synchronized void registerUser(final String key, final String nick, final String remote_ip, final String username) throws DuplicateRegistrationException, TooManyRegistrationsException {
if( isRegistered(key) ) {
throw new DuplicateRegistrationException(key);
}
logger.finest("Registration request: u: " + username + " ip: " + remote_ip + " n: " + nick);
int maxRegistrationsPerIP = 5;
try {
maxRegistrationsPerIP = Integer.parseInt(System.getProperty(EmbeddedServer.Setting.KEY_REG_LIMIT_IP.getKey()));
} catch( Exception e ) {
e.printStackTrace();
}
/**
* "admin" is used to credit registrations on open servers and has no limit -- but, we do
* enforce a limit on the number of registrations per-IP so that people can't
* crawl the server's entire membership.
*/
final MutableLong user_id = new MutableLong(-1); // something sure to fail in case there is some intermediate SQL error
if( username.equals("admin") ) {
user_id.set(1);
if( ips_to_key_counts.containsKey(remote_ip) ) {
if( ips_to_key_counts.get(remote_ip) > maxRegistrationsPerIP &&
remote_ip.equals("127.0.0.1") == false ) { // unlimited registrations from localhost
throw new TooManyRegistrationsException(ips_to_key_counts .get(remote_ip));
}
}
}
/**
* else this is an authenticated server and we have per-username registration limits to enforce
*/
else {
boolean too_many_per_account = (new SQLStatementProcessor<Boolean>("SELECT registered_keys, max_registrations, uid FROM valid_accounts WHERE username = ?") {
Boolean process( PreparedStatement s ) throws SQLException {
s.setString(1, username);
ResultSet rs = s.executeQuery();
if( rs.next() == false ) {
throw new SQLException("No valid_accounts record for username: " + username + " and should have been detected earlier.");
}
user_id.set((int)rs.getLong("uid"));
logger.finer("Checking max registrations for " + username + " " + rs.getInt("registered_keys") + " / " + rs.getInt("max_registrations"));
return rs.getInt("registered_keys") + 1 > rs.getInt("max_registrations");
}
}).doit();
if( too_many_per_account ) {
throw new TooManyRegistrationsException();
}
}
(new SQLStatementProcessor<Void>("INSERT INTO registered_keys (public_key, registration_ip, key_hash, nick, created_by_account) VALUES (?, ?, ?, ?, ?)") {
Void process( PreparedStatement s ) throws SQLException {
s.setString(1, key);
s.setString(2, remote_ip);
s.setInt(3, key.hashCode());
s.setString(4, nick);
s.setLong(5, user_id.get());
s.executeUpdate();
return null;
}
}).doit();
Long new_id = (new SQLStatementProcessor<Long>("SELECT db_id FROM registered_keys WHERE public_key = ?") {
Long process( PreparedStatement s ) throws SQLException {
s.setString(1, key);
ResultSet rs = s.executeQuery();
rs.next();
return rs.getLong(1);
}
}).doit();
/**
* Update all the soft state.
*/
KeyRegistrationRecord neu = new KeyRegistrationRecord(key, nick, new Date(), new Date(), remote_ip, user_id.get(), new_id);
int index = Collections.binarySearch(peers, neu) + 1;
if( Math.abs(index) == peers.size() ) {
peers.add(neu);
} else {
peers.add(Math.abs(index), neu);
}
key_to_record.put(neu.getBase64PublicKey(), neu);
id_to_record.put(neu.getID(), neu);
topology.put(neu, new HashSet<KeyRegistrationRecord>());
if( ips_to_key_counts.get(remote_ip) == null ) {
ips_to_key_counts.put(remote_ip, 1);
} else {
ips_to_key_counts.put(remote_ip, ips_to_key_counts.get(remote_ip)+1);
}
// TODO: debug, remove me.
// for( int i=0; i<peers.size()-1; i++ ) {
// if( peers.get(i).base64key.hashCode() > peers.get(i+1).base64key.hashCode() ) {
//
// for( FriendRecord f : peers ) {
// System.err.println(f);
// }
//
// throw new RuntimeException("not sorted @ " + i + " / " + peers.get(i).nickname);
// }
// }
}
public boolean isRegistered(final String key) {
return key_to_record.containsKey(key);
// return (new SQLStatementProcessor<Boolean>("SELECT db_id FROM registered_keys WHERE public_key = ?") {
// Boolean process( PreparedStatement s ) throws SQLException {
// s.setString(1, key);
// return s.executeQuery().next();
// }
// }).doit();
}
private CommunityAccount accountFromResultSet( Connection con, ResultSet rs ) throws SQLException {
long id = rs.getLong("uid");
/**
* Grab roles from other table. Don't use the wrapper since this is only called
* from process() (i.e., when we already have the lock)
*/
PreparedStatement s = con.prepareStatement("SELECT role_id FROM user_roles WHERE uid = ?");
s.setLong(1, id);
ResultSet roleSet = s.executeQuery();
List<Long> roles = new ArrayList<Long>();
while( roleSet.next() ) {
roles.add(roleSet.getLong(1));
}
s.close();
String [] converted = new String[roles.size()];
for( int i=0; i<converted.length; i++ ) {
// -1 corrects for SQL 1-based indexing
converted[i] = UserRole.values()[roles.get(i).intValue()-1].getTag();
// converted[i] = UserRole.roleForID(roles.get(i).intValue()).getTag();
}
CommunityAccount out = new CommunityAccount(rs.getString("username"),
rs.getString("password_hash"),
converted,
rs.getInt("registered_keys"),
rs.getInt("max_registrations"),
rs.getLong("uid"));
if( logger.isLoggable(Level.FINEST) ) {
logger.finest("Converted account: " + out);
}
return out;
}
public Principal authenticate(final String username, final String credentials) {
logger.info("authenticate called for user: " + username );
CommunityAccount purported = (new SQLStatementProcessor<CommunityAccount>("SELECT * FROM valid_accounts WHERE username = ?") {
CommunityAccount process( PreparedStatement s ) throws SQLException {
s.setString(1, username);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return accountFromResultSet(con, rs);
}
return null;
}
}).doit();
if( purported == null ) {
logger.warning("Unknown user: " + username + " / returning null.");
return null;
}
try {
if( Arrays.equals(getPasswordHash(credentials.toString()),
TypeUtil.fromHexString(purported.getHash().substring("MD5:".length()))) ) {
logger.finest("Passwords match for: " + username);
return purported;
}
} catch (IOException e) {
logger.warning(e.toString());
e.printStackTrace();
}
logger.fine("Password did not match for user: " + username);
return null;
}
public void createAccount(String username, String password, UserRole role) throws DuplicateAccountException, IOException {
/**
* Don't use the SQLStatementProcessor here since we're actually expecting SQLExceptions when there
* are duplicate nicks
*/
PreparedStatement stmt = null;
Connection con = null;
try {
con = connectionPool.getConnection();
stmt = con.prepareStatement("INSERT INTO valid_accounts (username, password_hash, max_registrations) VALUES (?, ?, ?)");
String passHash = "MD5:" + TypeUtil.toHexString(getPasswordHash(password));
stmt.setString(1, username);
stmt.setString(2, passHash);
try {
stmt.setInt(3, Integer.parseInt(System.getProperty(EmbeddedServer.Setting.KEY_REG_LIMIT_ACCOUNT.getKey())));
} catch( NumberFormatException e ) {
e.printStackTrace();
}
stmt.executeUpdate();
stmt.close();
stmt = con.prepareStatement("SELECT uid FROM valid_accounts WHERE username = ?");
stmt.setString(1, username);
ResultSet rs = stmt.executeQuery();
rs.next();
long newID = rs.getLong(1);
stmt.close();
stmt = con.prepareStatement("INSERT INTO user_roles (uid, role_id) VALUES (?, ?)");
stmt.setLong(1, newID);
stmt.setLong(2, role.getID());
stmt.executeUpdate();
stmt.close();
} catch( SQLException e ) {
if( e.toString().toLowerCase().contains("duplicate key value") || e.toString().toLowerCase().contains("duplicate entry") ) {
throw new DuplicateAccountException();
}
e.printStackTrace();
logger.warning(e.toString());
throw new IOException(e.toString());
}
finally {
try {
con.close();
} catch( SQLException e ) {
e.printStackTrace();
logger.warning(e.toString());
}
}
}
private byte[] getPasswordHash(String password) throws IOException {
MessageDigest digest;
try {
digest = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new IOException("No MD5!");
}
digest.update((password).getBytes());
byte [] hash_bytes = digest.digest();
return hash_bytes;
}
public int changePassword(final String username, final String pw) {
return (new SQLStatementProcessor<Integer>("UPDATE valid_accounts SET password_hash = ? WHERE username = ?") {
Integer process( PreparedStatement s ) throws SQLException {
try {
String passHash = "MD5:" + TypeUtil.toHexString(getPasswordHash(pw));
s.setString(1, passHash);
s.setString(2, username);
return s.executeUpdate();
} catch( IOException e ) {
e.printStackTrace();
return 0;
}
}
}).doit();
}
/**
* This method results in the entire DB being reloaded -- call sparingly!
*/
public void deleteAccount(final String username) throws IOException {
if( username.equals("admin") ) {
throw new IOException("Cannot delete admin");
}
final StringBuffer doReload = new StringBuffer();
/**
* For some reason, ON DELETE CASCADE referencing valid_accounts produces a weird derby error,
* so we do this manually. 1) get uid, 2) delete uid's keys, 3) delete uid's account
*/
final Long uid = (new SQLStatementProcessor<Long>("SELECT uid FROM valid_accounts WHERE username = ?") {
Long process( PreparedStatement s ) throws SQLException {
s.setString(1, username);
ResultSet rs = s.executeQuery();
if( !rs.next() ) {
return -1L;
}
return rs.getLong(1);
}
}).doit();
if( uid == -1 ) {
throw new IOException("no such user");
}
logger.fine("got uid: " + uid);
(new SQLStatementProcessor<Void>("DELETE FROM registered_keys WHERE created_by_account = ?") {
Void process( PreparedStatement s ) throws SQLException {
s.setLong(1, uid);
int deleted = 0;
/**
* Best effort consistency -- this delete might take a while, so we might as well delay
* requests that would be inconsistent if possible before the reload
*/
synchronized(CommunityDAO.this) {
deleted = s.executeUpdate();
}
logger.info("Deleted " + deleted + " before deleting account: " + username);
return null;
}
}).doit();
IOException out = (new SQLStatementProcessor<IOException>("DELETE FROM valid_accounts WHERE username = ?") {
IOException process( PreparedStatement s ) throws SQLException {
s.setString(1, username);
logger.finest("Executing delete user update: " + username);
int updated = s.executeUpdate();
if( updated == 0 ) {
return new IOException("No such user? " + updated);
} else {
/**
* we probably just removed a bunch of keys, re-sync
*/
logger.fine("Removed user, setting reload");
doReload.append("t");
}
return null;
}
}).doit();
if( out != null ) {
throw out;
}
if( doReload.length() > 0 ) {
logger.fine("Reloading...");
load();
}
}
public KeyRegistrationRecord getKeyForID( final long id ) {
return id_to_record.get(id);
}
/**
* This method results in the entire DB being reloaded -- call sparingly!
*
* This is called from the admin servlet when manually deleting. Pruning of the table at least
* batches the removals and only reloads once.
*/
public synchronized int deregisterKey(final String base64PublicKey) throws NoSuchUserException {
if( peers.remove(new KeyRegistrationRecord(base64PublicKey)) == false ) {
throw new NoSuchUserException(base64PublicKey);
}
if( key_to_record.remove(base64PublicKey) == null ) {
logger.warning("Inconsistent key->record / peers list state: " + base64PublicKey);
}
int updated = (new SQLStatementProcessor<Integer>("DELETE FROM registered_keys WHERE public_key = ?") {
Integer process( PreparedStatement s ) throws SQLException {
s.setString(1, base64PublicKey);
int updated = s.executeUpdate();
if( updated == 0 ) {
logger.warning("Inconsist DB/peer cache state wrt " + base64PublicKey);
}
return updated;
}
}).doit();
if( updated > 0 ) {
load();
logger.info("Admin removed key: " + base64PublicKey);
}
return updated;
}
public void registerVPN(Set<String> vpn_ids) {
mVPN_ids = vpn_ids;
logger.info("Registered: " + vpn_ids.size() + " infrastructure keys");
}
public List<KeyRegistrationRecord> getPeers(String inBase64Key) {
long start = System.currentTimeMillis();
List<KeyRegistrationRecord> out;
int maxFriendsToReturn = 26;
try {
maxFriendsToReturn = Integer.parseInt(System.getProperty(EmbeddedServer.Setting.MAX_FRIENDS_RETURNED.getKey()));
} catch( Exception e ) {
e.printStackTrace();
}
if( mVPN_ids.size() == 0 ) {
// out = getNearestPeers(inBase64Key, maxFriendsToReturn, true);
out = getRandomPeers(inBase64Key, maxFriendsToReturn, (int)Math.round(1.5 * (double)maxFriendsToReturn), true);
} else {
out = getVPNList(inBase64Key);
}
logger.finer("getPeers() took: " + (System.currentTimeMillis()-start));
return out;
}
public List<CommunityAccount> getAccounts() {
return (new SQLStatementProcessor<List<CommunityAccount>>("SELECT * FROM valid_accounts") {
List<CommunityAccount> process( PreparedStatement s ) throws SQLException {
List<CommunityAccount> out = new ArrayList<CommunityAccount>();
ResultSet rs = s.executeQuery();
while( rs.next() ) {
out.add(accountFromResultSet(con, rs));
}
return out;
}
}).doit();
}
public CommunityAccount getAccountInfo(final long uid) {
return (new SQLStatementProcessor<CommunityAccount>("SELECT * FROM valid_accounts WHERE uid = ?") {
CommunityAccount process( PreparedStatement s ) throws SQLException {
s.setLong(1, uid);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return accountFromResultSet(con, rs);
}
return null;
}
}).doit();
}
public int setMaxRegistrationsForUID( final int maxRegs, final long uid) {
return (new SQLStatementProcessor<Integer>("UPDATE valid_accounts SET max_registrations = ? WHERE uid = ?") {
Integer process( PreparedStatement s ) throws SQLException {
s.setInt(1, maxRegs);
s.setLong(2, uid);
return s.executeUpdate();
}
}).doit();
}
public CommunityAccount getAccountForID( final long uid ) {
return (new SQLStatementProcessor<CommunityAccount>("SELECT * FROM valid_accounts WHERE uid = ?") {
CommunityAccount process( PreparedStatement s ) throws SQLException {
s.setLong(1, uid);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return accountFromResultSet(con, rs);
}
return null;
}
}).doit();
}
public CommunityAccount getAccountForName( final String name ) {
return (new SQLStatementProcessor<CommunityAccount>("SELECT * FROM valid_accounts WHERE username = ?") {
CommunityAccount process( PreparedStatement s ) throws SQLException {
s.setString(1, name);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return accountFromResultSet(con, rs);
}
return null;
}
}).doit();
}
public boolean hasInfrastructurePeers() {
return mVPN_ids.size() > 0;
}
public int getLazyUpdateQueueSize() {
return mLazyDBQueue.size();
}
public void setURL(String ourUrl) {
mURL = ourUrl;
}
public String getURL() {
return mURL;
}
public void update_preview( final long id, final byte [] previewpng ) {
(new SQLStatementProcessor<Void>("UPDATE swarm_extras SET previewpng = ? WHERE swarmid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setLong(2, id);
s.setBytes(1, previewpng);
s.executeUpdate();
return null;
}}).doit();
}
public synchronized void publish_swarm(byte[] torrentbin, byte[] previewpng, final String description,
final String category, final CommunityAccount submitter,
final String fromIp ) throws DuplicateSwarmRegistrationException, IOException {
try {
if( torrentbin == null ) {
throw new IOException("Swarm metadata was null.");
}
Map metainfo = BDecoder.decode(torrentbin);
Map info = (Map) metainfo.get("info");
SHA1Hasher s = new SHA1Hasher();
final byte [] torrent_hash_bytes = s.calculateHash(BEncoder.encode(info));
final String torrent_hash_str = ByteFormatter.encodeString(torrent_hash_bytes);
boolean duplicate = (new SQLStatementProcessor<Boolean>("SELECT infohash FROM published_swarms WHERE infohash = ?") {
Boolean process( PreparedStatement s ) throws SQLException {
s.setString(1, torrent_hash_str);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return true;
}
return false;
}
}).doit();
if( duplicate ) {
throw new DuplicateSwarmRegistrationException(torrent_hash_str);
}
final String torrent_name = new String((byte[])info.get("name"), "UTF-8");
List flist = (List) info.get("files");
final int num_files = flist == null ? 1 : flist.size();
Long length = (Long)info.get("length");
if( length == null ) {
long acc = 0;
for( int i=0; i<flist.size(); i++ ) {
Map file_map = (Map)flist.get(i);
acc += (Long)file_map.get("length");
}
length = acc;
}
if( System.getProperty(EmbeddedServer.Setting.STORE_TORRENTS.getKey()).equals(
Boolean.FALSE.toString()) ) {
torrentbin = null;
logger.finer("Discarding torrent info due to server setting... (" + torrent_name + ")");
}
if( System.getProperty(EmbeddedServer.Setting.DISCARD_PREVIEWS.getKey()).equals(
Boolean.TRUE.toString()) ) {
previewpng = null;
logger.finer("Discarding preview due to server setting... (for torrent: " + torrent_name + ")");
}
final Long length_shadow = length;
final byte [] torrentbin_shadow = torrentbin;
final byte [] previewpng_shadow = previewpng;
boolean success = (new SQLStatementProcessor<Boolean>(
"INSERT INTO published_swarms (name, num_files, total_size, " +
"uploaded_by, infohash, bin, ip, category, has_torrent, needs_moderated) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") {
Boolean process( PreparedStatement s ) throws SQLException {
try {
s.setString(1, torrent_name);
s.setInt(2, num_files);
s.setLong(3, length_shadow);
s.setLong(4, submitter == null ? 1 : submitter.getID()); // if no principal is given, guest uploads are attributed to the admin
s.setString(5, torrent_hash_str);
s.setBytes(6, torrentbin_shadow);
s.setString(7, fromIp);
s.setString(8, category);
s.setBoolean(9, torrentbin_shadow != null);
if( submitter != null ) {
s.setBoolean(10, !submitter.canModerate()); // moderator submissions don't need to be reviewed.
} else {
s.setBoolean(10, true);
}
s.executeUpdate();
return true;
} catch( SQLException e ) {
logger.warning(e.toString());
return false;
}
}
}).doit();
if( success != true ) {
throw new IOException("Insert failed (see previous errors)");
}
/**
* Get the id of the newly inserted swarm in preparation for inserting the extras
*/
final long swarmid = (new SQLStatementProcessor<Long>("SELECT * FROM published_swarms WHERE infohash = ?") {
Long process( PreparedStatement s ) throws SQLException {
s.setString(1, torrent_hash_str);
ResultSet rs = s.executeQuery();
rs.next();
return rs.getLong(1);
}
}).doit();
/**
* Now insert the extras.
*/
(new SQLStatementProcessor<Void>("INSERT INTO swarm_extras (swarmid, previewpng, description) VALUES " +
"(?, ?, ?)") {
Void process( PreparedStatement s ) throws SQLException {
s.setLong(1, swarmid);
s.setBytes(2, previewpng_shadow);
s.setString(3, description);
s.executeUpdate();
return null;
}
}).doit();
} catch( IOException e ) {
throw e;
} catch( Exception e ) {
e.printStackTrace();
throw new IOException(e.toString());
}
}
public PublishedSwarm getSwarm( final long swarmid ) {
return (new SQLStatementProcessor<PublishedSwarm>("SELECT * FROM published_swarms WHERE swarmid = ?" ){
PublishedSwarm process(PreparedStatement s) throws SQLException {
s.setLong(1, swarmid);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return new PublishedSwarm(
rs.getLong("swarmid"),
rs.getString("name"),
rs.getInt("num_files"),
rs.getLong("total_size"),
rs.getTimestamp("date_uploaded").getTime(),
rs.getString("category"),
rs.getString("infohash"),
rs.getBoolean("removed"),
rs.getLong("uploaded_by"),
rs.getBoolean("needs_moderated"),
rs.getBoolean("has_torrent"));
}
return null;
}}).doit();
}
public List<PublishedSwarm> selectSwarms( final String nameMatch, final String categoryMatch, final Long userMatch,
final int offset, final int limit, final String sortBy, final boolean desc, final boolean isModerator ) {
if( nameMatch != null ) {
if( nameMatch.length() <= 2 ) {
return new ArrayList<PublishedSwarm>();
}
}
StringBuilder sb = new StringBuilder();
final List<Object> params = new ArrayList<Object>();
sb.append("SELECT swarmid, name, num_files, total_size, category, date_uploaded, category, " +
"infohash, removed, uploaded_by, needs_moderated, has_torrent FROM published_swarms");
boolean appendedWhere = false;
if( nameMatch != null ) {
appendedWhere = true;
sb.append(" WHERE lower(name) LIKE lower(?)" );
params.add("%"+nameMatch+"%");
}
if( categoryMatch != null ) {
if( !appendedWhere ) {
appendedWhere = true;
sb.append( " WHERE");
} else {
sb.append( " AND");
}
sb.append(" category = ?" );
params.add(categoryMatch);
}
if( isModerator == false &&
System.getProperty(EmbeddedServer.Setting.REQUIRE_SWARM_MODERATION.getKey()).equals(Boolean.TRUE.toString()) ) {
if( !appendedWhere ) {
appendedWhere = true;
sb.append( " WHERE");
} else {
sb.append( " AND");
}
sb.append( " needs_moderated = FALSE");
}
if( isModerator &&
userMatch != null ) {
if( !appendedWhere ) {
appendedWhere = true;
sb.append( " WHERE");
} else {
sb.append( " AND");
}
sb.append( " uploaded_by = ?");
params.add(userMatch);
}
/**
* We can't use a prepared statement for sort ordering, so we need to
* restrict the field value manually.
*/
if( sortBy != null ) {
if( sortBy.equals("name") ||
sortBy.equals("date_uploaded") ||
sortBy.equals("total_size") ||
sortBy.equals("category") ) {
sb.append(" ORDER BY " + sortBy);
if( desc ) {
sb.append(" DESC");
} else {
sb.append(" ASC");
}
} else {
logger.warning("Malformed sort request: " + sortBy);
}
}
if( limit > 0 ) {
sb.append( " LIMIT ?" );
params.add(Integer.valueOf(limit));
}
if( offset > 0 ) {
sb.append( " OFFSET ?" );
params.add(Integer.valueOf(offset));
}
logger.finest("Query string: " + sb.toString());
return (new SQLStatementProcessor<List<PublishedSwarm>>(sb.toString()){
List<PublishedSwarm> process(PreparedStatement s) throws SQLException {
List<PublishedSwarm> out = new ArrayList<PublishedSwarm>();
for( int i=1; i<=params.size(); i++ ) {
Object param = params.get(i-1);
if( param instanceof Integer ) {
s.setInt(i, (Integer)params.get(i-1));
} else if( param instanceof Long ) {
s.setLong(i, (Long)param);
} else if( param instanceof String ) {
s.setString(i, (String)param);
} else {
System.err.println("Parameter of unknown type: " + param.getClass().getName());
}
}
logger.finest(s.toString());
ResultSet rs = s.executeQuery();
while( rs.next() ) {
PublishedSwarm p = new PublishedSwarm(
rs.getLong("swarmid"),
rs.getString("name"),
rs.getInt("num_files"),
rs.getLong("total_size"),
rs.getTimestamp("date_uploaded").getTime(),
rs.getString("category"),
rs.getString("infohash"),
rs.getBoolean("removed"),
rs.getLong("uploaded_by"),
rs.getBoolean("needs_moderated"),
rs.getBoolean("has_torrent"));
out.add(p);
}
return out;
}}).doit();
}
public PublishedSwarmDetails getSwarmDetails( final long swarmid ) {
return (new SQLStatementProcessor<PublishedSwarmDetails>( "SELECT * FROM swarm_extras WHERE swarmid = ?" ){
PublishedSwarmDetails process(PreparedStatement s) throws SQLException {
s.setLong(1, swarmid);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
Blob blob = rs.getBlob("previewpng");
return new PublishedSwarmDetails(
rs.getLong("swarmid"),
rs.getString("description"),
rs.getInt("downloads"),
rs.getString("language"),
rs.getInt("upvotes"),
rs.getInt("downvotes"),
blob != null ? blob.getBytes(1, (int)blob.length()) : null);
} else {
return null;
}
}}).doit();
}
public Comment getComment( final long commentID ) {
return (new SQLStatementProcessor<Comment>( "SELECT * FROM comments WHERE commentid = ?" ){
Comment process(PreparedStatement s) throws SQLException {
s.setLong(1, commentID);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return new Comment(
rs.getLong("swarmid"),
rs.getLong("commentid"),
rs.getString("accountname"),
rs.getTimestamp("time").getTime(),
rs.getLong("reply_to"),
rs.getInt("upvote"),
rs.getInt("downvote"),
rs.getString("ip"),
rs.getString("body"));
}
return null;
}}).doit();
}
public List<Comment> selectComments( final long swarmID, final int offset, final int limit ) {
StringBuilder q = new StringBuilder();
q.append("SELECT * FROM comments WHERE swarmid = ? AND removed = FALSE LIMIT ? OFFSET ? ");
return (new SQLStatementProcessor<List<Comment>>(
q.toString() ){
List<Comment> process(PreparedStatement s) throws SQLException {
s.setLong(1, swarmID);
s.setInt(2, limit);
s.setInt(3, offset);
ResultSet rs = s.executeQuery();
List<Comment> out = new ArrayList<Comment>();
while( rs.next() ) {
out.add(new Comment(
rs.getLong("swarmid"),
rs.getLong("commentid"),
rs.getString("accountname"),
rs.getTimestamp("time").getTime(),
rs.getLong("reply_to"),
rs.getInt("upvote"),
rs.getInt("downvote"),
rs.getString("ip"),
rs.getString("body")));
}
return out;
}}).doit();
}
public void postComment( final String username, final long swarmID, final String comment, final long replyTo, final String ip ) throws IOException {
if( comment.length() == 0 ) {
return;
}
if( comment.length() > 4*1024 ) {
throw new IOException("Too long");
}
Boolean good = (new SQLStatementProcessor<Boolean>(
"INSERT INTO comments (swarmid, accountname, reply_to, ip, body) VALUES (?, ?, ?, ?, ?)" ){
Boolean process(PreparedStatement s) throws SQLException {
s.setLong(1, swarmID);
s.setString(2, username);
s.setLong(3, replyTo);
s.setString(4, ip);
s.setString(5, comment);
s.executeUpdate();
return true;
}}).doit();
if( good == null ) {
throw new IOException("SQL error");
}
}
public byte[] getSwarmBytes(final long id) {
return (new SQLStatementProcessor<byte[]>(
"SELECT bin FROM published_swarms WHERE swarmid = ?") {
byte[] process(PreparedStatement s) throws SQLException {
s.setLong(1, id);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return rs.getBytes(1);
}
return null;
}}).doit();
}
public List<String> getCategories() {
return (new SQLStatementProcessor<List<String>>("SELECT * FROM categories") {
public List<String> process( PreparedStatement s ) throws SQLException {
List<String> out = new ArrayList<String>();
ResultSet rs = s.executeQuery();
while( rs.next() ) {
out.add(rs.getString(1));
}
return out;
}}).doit();
}
public void removeComment( final long id ) {
(new SQLStatementProcessor<Void>("UPDATE comments SET removed = TRUE WHERE commentid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setLong(1, id);
s.executeUpdate();
return null;
}}).doit();
}
public void markSwarmRemoved( final long id, final boolean isRemoved ) {
(new SQLStatementProcessor<Void>("UPDATE published_swarms SET removed = ? WHERE swarmid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setBoolean(1, isRemoved);
s.setLong(2, id);
s.executeUpdate();
return null;
}}).doit();
}
public void deleteSwarm( final long id ) {
(new SQLStatementProcessor<Void>("DELETE FROM published_swarms WHERE swarmid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setLong(1, id);
s.executeUpdate();
return null;
}}).doit();
}
public void setSwarmCategory( final long id, final String category ) throws IOException {
(new SQLStatementProcessor<Void>("UPDATE published_swarms SET category = ? WHERE swarmid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setString(1, category);
s.setLong(2, id);
s.executeUpdate();
return null;
}}).doit();
}
public void updateRole( final long id, final String roleStr ) throws IOException {
if( id == 1 ) {
logger.warning("Can't update roles for admin");
return;
}
final UserRole neuRole = UserRole.roleForString(roleStr);
if( neuRole == null ) {
logger.warning("Unknown role.");
return;
}
(new SQLStatementProcessor<Void>("UPDATE user_roles SET role_id = ? WHERE uid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setLong(1, neuRole.getID());
s.setLong(2, id);
s.executeUpdate();
return null;
}}).doit();
/**
* Only once we've updated the DB!
*/
getAccountForID(id).setRoles(new String[]{neuRole.getTag()});
}
public int getApproximateRowCount( final String table ) {
return (new SQLStatementProcessor<Integer>("SHOW TABLE STATUS LIKE ?") {
public Integer process( PreparedStatement s ) throws SQLException {
s.setString(1, table);
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return rs.getInt("rows");
}
return -1;
}}).doit();
}
public void setSwarmModerated( final long id, final boolean isModerated ) {
(new SQLStatementProcessor<Void>("UPDATE published_swarms SET needs_moderated = ? WHERE swarmid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setBoolean(1, !isModerated);
s.setLong(2, id);
s.executeUpdate();
return null;
}}).doit();
}
public void deleteCategory( final String category ) {
(new SQLStatementProcessor<Void>("DELETE FROM categories WHERE category = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setString(1, category);
s.executeUpdate();
return null;
}}).doit();
}
public void updateDescription( final long id, final String description ) {
(new SQLStatementProcessor<Void>("UPDATE swarm_extras SET description = ? WHERE swarmid = ?") {
public Void process( PreparedStatement s ) throws SQLException {
s.setString(1, description);
s.setLong(2, id);
s.executeUpdate();
return null;
}}).doit();
}
public boolean hasPermissions( CommunityAccount who, PublishedSwarm swarm ) {
if( swarm == null ) {
return false;
}
boolean canModerate = false;
if( who != null ) {
canModerate = who.canModerate();
}
if( canModerate ) {
return true;
}
if( swarm.isNeeds_moderated() && System.getProperty(EmbeddedServer.Setting.REQUIRE_SWARM_MODERATION.getKey()).equals(Boolean.TRUE.toString()) ) {
return false;
}
if( swarm.isRemoved() ) {
return false;
}
return true;
}
public long getNextUnmoderatedID() {
return (new SQLStatementProcessor<Long>("SELECT swarmid FROM published_swarms WHERE needs_moderated = TRUE LIMIT 1") {
public Long process( PreparedStatement s ) throws SQLException {
ResultSet rs = s.executeQuery();
if( rs.next() ) {
return rs.getLong(1);
}
return -1L;
}}).doit();
}
}
| 65,884 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
RSSServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/RSSServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.sun.syndication.feed.synd.SyndContent;
import com.sun.syndication.feed.synd.SyndContentImpl;
import com.sun.syndication.feed.synd.SyndEntry;
import com.sun.syndication.feed.synd.SyndEntryImpl;
import com.sun.syndication.feed.synd.SyndFeed;
import com.sun.syndication.feed.synd.SyndFeedImpl;
import com.sun.syndication.io.FeedException;
import com.sun.syndication.io.SyndFeedOutput;
import edu.washington.cs.oneswarm.community2.utils.StringTools;
public class RSSServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(RSSServlet.class.getName());
public RSSServlet() {
logger.info("RSS Generation servlet started.");
}
public void doGet(HttpServletRequest request, HttpServletResponse response) {
response.setContentType("application/rss+xml");
final CommunityDAO dao = CommunityDAO.get();
final String baseURL = System.getProperty(EmbeddedServer.Setting.RSS_BASE_URL.getKey());
if (baseURL == null) {
logger.warning("No base URL specified. Cannot generate RSS feed.");
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
return;
}
String cat = request.getParameter("cat");
CommunityAccount user = null;
boolean canModerate = false, isAdmin = false;
if (request.getUserPrincipal() != null) {
user = dao.getAccountForName(request.getUserPrincipal().getName());
canModerate = user.canModerate();
isAdmin = user.isAdmin();
}
List<PublishedSwarm> swarms = dao.selectSwarms(null, cat, null, 0, Integer.parseInt(System.getProperty(EmbeddedServer.Setting.SWARMS_PER_PAGE.getKey())), "date_uploaded", true, canModerate);
SyndFeed feed = new SyndFeedImpl();
feed.setFeedType("rss_2.0");
feed.setTitle(System.getProperty(EmbeddedServer.Setting.SERVER_NAME.getKey()) + (cat != null ? ": " + cat : ""));
feed.setLink(baseURL);
feed.setDescription("");
List<SyndEntry> entries = new ArrayList<SyndEntry>();
SyndEntry entry;
SyndContent description;
for (PublishedSwarm s : swarms) {
PublishedSwarmDetails details = dao.getSwarmDetails(s.getSwarmID());
entry = new SyndEntryImpl();
entry.setTitle(s.getName());
entry.setLink(baseURL + "details.jsp?id=" + s.getSwarmID());
entry.setPublishedDate(new Date(s.getUploadedTimestamp()));
description = new SyndContentImpl();
description.setType("text/plain");
description.setValue(details.getDescription() + "\n" + (s.getCategory() != null ? s.getCategory() + " / " : "") + StringTools.formatRate(s.getTotalSize()));
entry.setDescription(description);
entries.add(entry);
}
feed.setEntries(entries);
SyndFeedOutput output = new SyndFeedOutput();
PrintWriter pw;
try {
pw = new PrintWriter(response.getOutputStream());
output.output(feed, pw);
pw.flush();
pw.close();
} catch (IOException e) {
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} catch (FeedException e) {
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
| 3,424 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
CategoriesServlet.java | /FileExtraction/Java_unseen/CSEMike_OneSwarm-Community-Server/src/edu/washington/cs/oneswarm/community2/server/CategoriesServlet.java | package edu.washington.cs.oneswarm.community2.server;
import java.io.IOException;
import java.io.PrintStream;
import java.util.List;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
public class CategoriesServlet extends javax.servlet.http.HttpServlet {
private static final long serialVersionUID = 1L;
private static Logger logger = Logger.getLogger(CategoriesServlet.class.getName());
public CategoriesServlet() {}
public void doGet(HttpServletRequest request, HttpServletResponse response) {
try {
List<String> cats = CommunityDAO.get().getCategories();
PrintStream out = new PrintStream(response.getOutputStream());
out.println("<categories>");
for( String category : cats ) {
out.println("<category name=\"" + category + "\"/>");
}
out.println("</categories>");
out.flush();
out.close();
} catch( IOException e ) {
logger.warning(e.toString());
e.printStackTrace();
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
| 1,083 | Java | .java | CSEMike/OneSwarm-Community-Server | 10 | 3 | 5 | 2009-11-11T17:20:04Z | 2011-05-19T17:17:41Z |
GalapagosTestConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/GalapagosTestConfig.java | package com.hermesworld.ais.galapagos;
import com.hermesworld.ais.galapagos.security.SecurityConfig;
import com.hermesworld.ais.galapagos.security.impl.OAuthConfigController;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository;
@Configuration
public class GalapagosTestConfig {
@MockBean
private OAuthConfigController mockController;
@MockBean
private SecurityConfig securityConfig;
@MockBean
private ClientRegistrationRepository clientRegistrationRepository;
}
| 652 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ContextStartupTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/ContextStartupTest.java | package com.hermesworld.ais.galapagos;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Import;
import java.security.Security;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@SpringBootTest
@Import(GalapagosTestConfig.class)
class ContextStartupTest {
@Autowired
private ApplicationContext context;
// mock the KafkaClusters implementation as we do not have a live Kafka server here
@MockBean
private KafkaClusters kafkaClusters;
@BeforeAll
static void setupSecurity() {
Security.setProperty("crypto.policy", "unlimited");
Security.addProvider(new BouncyCastleProvider());
}
@Test
void testStartupContext() {
assertNotNull(kafkaClusters);
assertNotNull(context);
}
}
| 1,195 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicControllerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/topics/controller/TopicControllerTest.java | package com.hermesworld.ais.galapagos.topics.controller;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.events.GalapagosEventManagerMock;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig;
import com.hermesworld.ais.galapagos.topics.service.ValidatingTopicService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.http.HttpStatus;
import org.springframework.web.server.ResponseStatusException;
import java.time.LocalDate;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.nullable;
import static org.mockito.Mockito.*;
class TopicControllerTest {
@MockBean
private KafkaClusters kafkaClusters;
private ApplicationsService applicationsService;
private NamingService namingService;
private CurrentUserService userService;
private SubscriptionService subscriptionService;
private GalapagosTopicConfig topicConfig;
private GalapagosEventManagerMock eventManager;
private KafkaCluster kafkaTestCluster;
private TopicBasedRepositoryMock<TopicMetadata> topicRepository;
@BeforeEach
void feedMocks() {
kafkaClusters = mock(KafkaClusters.class);
applicationsService = mock(ApplicationsService.class);
namingService = mock(NamingService.class);
userService = mock(CurrentUserService.class);
subscriptionService = mock(SubscriptionService.class);
topicConfig = mock(GalapagosTopicConfig.class);
eventManager = new GalapagosEventManagerMock();
kafkaTestCluster = mock(KafkaCluster.class);
topicRepository = new TopicBasedRepositoryMock<>();
when(kafkaTestCluster.getId()).thenReturn("test");
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test"));
when(kafkaTestCluster.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository);
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(kafkaTestCluster));
}
@Test
@DisplayName("it should not change the deprecation if topic description is changed")
void testDontResetDeprecationWhenTopicDescChanges() {
TopicMetadata topic = new TopicMetadata();
topic.setOwnerApplicationId("app-1");
topic.setName("topic-1");
topic.setDeprecated(true);
topic.setEolDate(LocalDate.of(2299, 12, 4));
topic.setDescription("this topic is not a nice one :(");
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic));
UpdateTopicDto dto = new UpdateTopicDto(null, null, "updated description goes here", true);
when(topicService.updateTopicDescription("test", "topic-1", "updated description goes here"))
.thenReturn(FutureUtil.noop());
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
controller.updateTopic("test", "topic-1", dto);
verify(topicService, times(1)).updateTopicDescription("test", "topic-1", "updated description goes here");
verify(topicService, times(0)).unmarkTopicDeprecated(nullable(String.class));
}
@Test
@DisplayName("it should change the owner if current user is authorized")
void testChangeTopicOwner_positive() throws Exception {
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic));
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
when(topicService.changeTopicOwner("test", "topic-1", "producer1")).thenReturn(FutureUtil.noop());
ChangeTopicOwnerDto dto = new ChangeTopicOwnerDto();
dto.setProducerApplicationId("producer1");
controller.changeTopicOwner("test", "topic-1", dto);
verify(topicService, times(1)).changeTopicOwner("test", "topic-1", "producer1");
}
@Test
@DisplayName("it should not change the owner if current user is not authorized")
void testChangeTopicOwner_negative() {
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic));
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
ChangeTopicOwnerDto dto = new ChangeTopicOwnerDto();
dto.setProducerApplicationId("producer1");
try {
controller.changeTopicOwner("test", "topic-1", dto);
fail("should fail because current user is not authorized");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.FORBIDDEN, e.getStatusCode());
}
}
@Test
@DisplayName("Can add producers for which I am not authorized")
void testAddTopicProducer_notAuthorizedForProducer_positive() {
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
// WHEN I am authorized for the topic owning application, but not the producer application
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
when(applicationsService.isUserAuthorizedFor("app-9")).thenReturn(false);
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setOwnerApplicationId("app-1");
when(topicService.getTopic("test", "testtopic")).thenReturn(Optional.of(metadata));
when(topicService.addTopicProducer("test", "testtopic", "app-9")).thenReturn(FutureUtil.noop());
// THEN adding the producer must still succeed
AddProducerDto producerDto = new AddProducerDto();
producerDto.setProducerApplicationId("app-9");
controller.addProducerToTopic("test", "testtopic", producerDto);
verify(topicService, times(1)).addTopicProducer("test", "testtopic", "app-9");
}
@Test
@DisplayName("Cannot add producer if not authorized for topic (but for producer)")
void testAddTopicProducer_notAuthorizedForTopic_negative() {
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
// WHEN I am authorized for the producer, but not the topic owning application
when(applicationsService.isUserAuthorizedFor("app-9")).thenReturn(true);
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(false);
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setOwnerApplicationId("app-1");
when(topicService.getTopic("test", "testtopic")).thenReturn(Optional.of(metadata));
when(topicService.addTopicProducer("test", "testtopic", "app-9")).thenReturn(FutureUtil.noop());
// THEN adding the producer must fail
AddProducerDto producerDto = new AddProducerDto();
producerDto.setProducerApplicationId("app-9");
try {
controller.addProducerToTopic("test", "testtopic", producerDto);
fail("ResponseStatusException expected, but adding producer succeeded");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.FORBIDDEN, e.getStatusCode());
}
}
@Test
@DisplayName("Can remove producers for which I am not authorized")
void testRemoveTopicProducer_notAuthorizedForProducer_positive() {
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
// WHEN I am authorized for the topic owning application, but not the producer application
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
when(applicationsService.isUserAuthorizedFor("app-9")).thenReturn(false);
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setOwnerApplicationId("app-1");
when(topicService.getTopic("test", "testtopic")).thenReturn(Optional.of(metadata));
when(topicService.removeTopicProducer("test", "testtopic", "app-9")).thenReturn(FutureUtil.noop());
// THEN adding the producer must still succeed
controller.removeProducerFromTopic("test", "testtopic", "app-9");
verify(topicService, times(1)).removeTopicProducer("test", "testtopic", "app-9");
}
@Test
@DisplayName("user cant skip compability check if not admin")
void testSkipCombatCheckForSchemas_userNotAuthorized() {
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
AddSchemaVersionDto dto = new AddSchemaVersionDto();
dto.setJsonSchema(new JSONObject(Map.of("a", "1", "b", "2")).toString());
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setOwnerApplicationId("app-1");
when(topicService.listTopics("test")).thenReturn(List.of(metadata));
when(userService.isAdmin()).thenReturn(false);
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
try {
controller.addTopicSchemaVersion("test", "testtopic", true, dto);
fail("HttpStatus.FORBIDDEN expected, but skipping check succeeded");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.FORBIDDEN, e.getStatusCode());
}
}
@Test
@DisplayName("Cannot remove producer if not authorized for topic (but for producer)")
void testRemoveTopicProducer_notAuthorizedForTopic_negative() {
ValidatingTopicService topicService = mock(ValidatingTopicService.class);
TopicController controller = new TopicController(topicService, kafkaClusters, applicationsService,
namingService, userService);
// WHEN I am authorized for the producer, but not the topic owning application
when(applicationsService.isUserAuthorizedFor("app-9")).thenReturn(true);
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(false);
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setOwnerApplicationId("app-1");
when(topicService.getTopic("test", "testtopic")).thenReturn(Optional.of(metadata));
when(topicService.removeTopicProducer("test", "testtopic", "app-9")).thenReturn(FutureUtil.noop());
// THEN removing the producer must fail
try {
controller.removeProducerFromTopic("test", "testtopic", "app-9");
fail("ResponseStatusException expected, but removing producer succeeded");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.FORBIDDEN, e.getStatusCode());
}
}
}
| 12,931 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ValidatingTopicServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/topics/impl/ValidatingTopicServiceImplTest.java | package com.hermesworld.ais.galapagos.topics.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.topics.service.impl.ValidatingTopicServiceImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import java.time.LocalDate;
import java.time.Period;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class ValidatingTopicServiceImplTest {
private GalapagosTopicConfig topicConfig;
@BeforeEach
void init() {
topicConfig = mock(GalapagosTopicConfig.class);
when(topicConfig.getMinDeprecationTime()).thenReturn(Period.ofDays(10));
}
@Test
void testCannotDeleteSubscribedTopic() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setType(TopicType.EVENTS);
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("99");
subscription.setTopicName("testtopic");
subscription.setClientApplicationId("2");
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
when(subscriptionService.getSubscriptionsForTopic("_env1", "testtopic", false))
.thenReturn(List.of(subscription));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
assertFalse(service.canDeleteTopic("_env1", "testtopic"));
}
@Test
void testCannotDeleteStagedPublicTopic() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setType(TopicType.EVENTS);
TopicMetadata meta2 = new TopicMetadata(meta1);
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
when(topicService.getTopic("_env2", "testtopic")).thenReturn(Optional.of(meta2));
when(clusters.getEnvironmentIds()).thenReturn(List.of("_env1", "_env2"));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
assertFalse(service.canDeleteTopic("_env1", "testtopic"));
assertTrue(service.canDeleteTopic("_env2", "testtopic"));
}
@Test
void canDeleteTopic_internal_positiv() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setType(TopicType.INTERNAL);
KafkaEnvironmentConfig envMeta = mock(KafkaEnvironmentConfig.class);
when(envMeta.isStagingOnly()).thenReturn(false);
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
when(clusters.getEnvironmentIds()).thenReturn(List.of("_env1"));
when(clusters.getEnvironmentMetadata("_env1")).thenReturn(Optional.of(envMeta));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
assertTrue(service.canDeleteTopic("_env1", "testtopic"));
}
@Test
void canDeleteTopic_internal_negative() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setType(TopicType.INTERNAL);
KafkaEnvironmentConfig envMeta = mock(KafkaEnvironmentConfig.class);
when(envMeta.isStagingOnly()).thenReturn(true);
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
when(clusters.getEnvironmentIds()).thenReturn(List.of("_env1"));
when(clusters.getEnvironmentMetadata("_env1")).thenReturn(Optional.of(envMeta));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
assertFalse(service.canDeleteTopic("_env1", "testtopic"));
}
@Test
@DisplayName("Should throw Exception when trying to add Producer to Topic on staging-only Stage")
void addTopicProducerOnOnlyStagingEnv_negative() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setType(TopicType.EVENTS);
KafkaEnvironmentConfig envMeta = mock(KafkaEnvironmentConfig.class);
when(envMeta.isStagingOnly()).thenReturn(true);
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
when(clusters.getEnvironmentIds()).thenReturn(List.of("_env1"));
when(clusters.getEnvironmentMetadata("_env1")).thenReturn(Optional.of(envMeta));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
try {
service.addTopicProducer("_env1", "testtopic", "producer1").get();
fail("Expected exception trying to add Producer to Topic on staging-only Stage");
}
catch (ExecutionException | InterruptedException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
@Test
@DisplayName("Should throw Exception when trying to delete Producer from Topic on staging-only Stage")
void deleteProducerFromTopicOnOnlyStagingEnv_negative() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setType(TopicType.EVENTS);
meta1.setProducers(List.of("producer1"));
KafkaEnvironmentConfig envMeta = mock(KafkaEnvironmentConfig.class);
when(envMeta.isStagingOnly()).thenReturn(true);
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
when(clusters.getEnvironmentIds()).thenReturn(List.of("_env1"));
when(clusters.getEnvironmentMetadata("_env1")).thenReturn(Optional.of(envMeta));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
try {
service.removeTopicProducer("_env1", "testtopic", "producer1").get();
fail("Expected exception trying to remove Producer from Topic on staging-only Stage");
}
catch (ExecutionException | InterruptedException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
@Test
void canDeleteTopic_withSubscribersAndEolDatePast() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setDeprecated(true);
meta1.setDeprecationText("deprecated now");
meta1.setType(TopicType.EVENTS);
meta1.setEolDate(LocalDate.of(2020, 9, 10));
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("99");
subscription.setTopicName("testtopic");
subscription.setClientApplicationId("2");
when(subscriptionService.getSubscriptionsForTopic("_env1", "testtopic", false))
.thenReturn(List.of(subscription));
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
assertTrue(service.canDeleteTopic("_env1", "testtopic"));
}
@Test
void canDeleteTopic_withSubscribersAndEolDateInFuture() {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaClusters clusters = mock(KafkaClusters.class);
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("testtopic");
meta1.setOwnerApplicationId("1");
meta1.setDeprecated(true);
meta1.setDeprecationText("deprecated now");
meta1.setType(TopicType.EVENTS);
meta1.setEolDate(LocalDate.of(2999, 9, 10));
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("99");
subscription.setTopicName("testtopic");
subscription.setClientApplicationId("2");
when(subscriptionService.getSubscriptionsForTopic("_env1", "testtopic", false))
.thenReturn(List.of(subscription));
when(topicService.getTopic("_env1", "testtopic")).thenReturn(Optional.of(meta1));
ValidatingTopicServiceImpl service = new ValidatingTopicServiceImpl(topicService, subscriptionService,
mock(ApplicationsService.class), clusters, topicConfig, false);
assertFalse(service.canDeleteTopic("_env1", "testtopic"));
}
}
| 11,402 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/topics/service/impl/TopicServiceImplTest.java | package com.hermesworld.ais.galapagos.topics.service.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.applications.impl.KnownApplicationImpl;
import com.hermesworld.ais.galapagos.events.GalapagosEventManagerMock;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.naming.InvalidTopicNameException;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.schemas.IncompatibleSchemaException;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.*;
import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig;
import com.hermesworld.ais.galapagos.topics.config.TopicSchemaConfig;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.mockito.invocation.InvocationOnMock;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.core.io.ClassPathResource;
import org.springframework.util.StreamUtils;
import java.nio.charset.StandardCharsets;
import java.time.LocalDate;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
class TopicServiceImplTest {
@MockBean
private KafkaClusters kafkaClusters;
private ApplicationsService applicationsService;
private NamingService namingService;
private CurrentUserService userService;
private GalapagosTopicConfig topicConfig;
private GalapagosEventManagerMock eventManager;
private KafkaCluster kafkaTestCluster;
private TopicBasedRepositoryMock<TopicMetadata> topicRepository;
private TopicBasedRepositoryMock<SchemaMetadata> schemaRepository;
@BeforeEach
void feedMocks() {
kafkaClusters = mock(KafkaClusters.class);
applicationsService = mock(ApplicationsService.class);
namingService = mock(NamingService.class);
userService = mock(CurrentUserService.class);
topicConfig = mock(GalapagosTopicConfig.class);
eventManager = new GalapagosEventManagerMock();
kafkaTestCluster = mock(KafkaCluster.class);
topicRepository = new TopicBasedRepositoryMock<>();
schemaRepository = new TopicBasedRepositoryMock<>();
when(kafkaTestCluster.getId()).thenReturn("test");
when(kafkaTestCluster.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository);
when(kafkaTestCluster.getRepository("schemas", SchemaMetadata.class)).thenReturn(schemaRepository);
when(kafkaTestCluster.getActiveBrokerCount()).thenReturn(CompletableFuture.completedFuture(5));
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(kafkaTestCluster));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app-1");
when(applicationsService.getApplicationMetadata("test", "app-1")).thenReturn(Optional.of(app1));
KnownApplication kapp1 = new KnownApplicationImpl("app-1", "App 1");
when(applicationsService.getKnownApplication("app-1")).thenReturn(Optional.of(kapp1));
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
when(topicConfig.getMaxPartitionCount()).thenReturn(10);
when(topicConfig.getDefaultPartitionCount()).thenReturn(6);
when(topicConfig.getStandardReplicationFactor()).thenReturn(2);
when(topicConfig.getCriticalReplicationFactor()).thenReturn(4);
when(topicConfig.getSchemas()).thenReturn(new TopicSchemaConfig());
}
@Test
void testCreateTopic_positive() throws Exception {
List<InvocationOnMock> createInvs = new ArrayList<>();
when(kafkaTestCluster.createTopic(any(), any())).then(inv -> {
createInvs.add(inv);
return FutureUtil.noop();
});
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setDescription("Desc");
topic1.setOwnerApplicationId("app-1");
topic1.setSubscriptionApprovalRequired(true);
topic1.setType(TopicType.EVENTS);
service.createTopic("test", topic1, 8, Map.of("some.property", "some.value")).get();
assertEquals(1, createInvs.size());
@SuppressWarnings("OptionalGetWithoutIsPresent")
TopicMetadata savedTopic = topicRepository.getObject("topic-1").get();
assertEquals("Desc", savedTopic.getDescription());
assertEquals("app-1", savedTopic.getOwnerApplicationId());
assertEquals(TopicType.EVENTS, savedTopic.getType());
assertTrue(savedTopic.isSubscriptionApprovalRequired());
assertEquals("topic-1", createInvs.get(0).getArgument(0));
TopicCreateParams params = createInvs.get(0).getArgument(1);
assertEquals(2, params.getReplicationFactor());
assertEquals(8, params.getNumberOfPartitions());
assertEquals("some.value", params.getTopicConfigs().get("some.property"));
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleTopicCreated", eventManager.getSinkInvocations().get(0).getMethod().getName());
}
@Test
void testCreateTopic_downToMaxPartitions() throws Exception {
List<InvocationOnMock> createInvs = new ArrayList<>();
when(kafkaTestCluster.createTopic(any(), any())).then(inv -> {
createInvs.add(inv);
return FutureUtil.noop();
});
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setDescription("Desc");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
service.createTopic("test", topic1, 14, Map.of()).get();
assertEquals(1, createInvs.size());
TopicCreateParams params = createInvs.get(0).getArgument(1);
// must be set to default partitions (see feedMocks)
assertEquals(6, params.getNumberOfPartitions());
}
@Test
void testCreateTopic_criticalReplicationFactor() throws Exception {
List<InvocationOnMock> createInvs = new ArrayList<>();
when(kafkaTestCluster.createTopic(any(), any())).then(inv -> {
createInvs.add(inv);
return FutureUtil.noop();
});
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setDescription("Desc");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topic1.setCriticality(Criticality.CRITICAL);
service.createTopic("test", topic1, 3, Map.of()).get();
assertEquals(1, createInvs.size());
TopicCreateParams params = createInvs.get(0).getArgument(1);
// must be set to configured critical replication factor
assertEquals(4, params.getReplicationFactor());
}
@Test
void testCreateTopic_replicationFactor_downToNumBrokers() throws Exception {
List<InvocationOnMock> createInvs = new ArrayList<>();
// 6 is more than the 5 brokers we have, so should be downed to 5
when(topicConfig.getCriticalReplicationFactor()).thenReturn(6);
when(kafkaTestCluster.createTopic(any(), any())).then(inv -> {
createInvs.add(inv);
return FutureUtil.noop();
});
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setDescription("Desc");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topic1.setCriticality(Criticality.CRITICAL);
service.createTopic("test", topic1, 3, Map.of()).get();
assertEquals(1, createInvs.size());
TopicCreateParams params = createInvs.get(0).getArgument(1);
// must be equal to number of brokers, as higher is not allowed
assertEquals(5, params.getReplicationFactor());
}
@Test
void testCreateTopic_useDefaultPartitions() throws Exception {
List<InvocationOnMock> createInvs = new ArrayList<>();
when(kafkaTestCluster.createTopic(any(), any())).then(inv -> {
createInvs.add(inv);
return FutureUtil.noop();
});
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setDescription("Desc");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
service.createTopic("test", topic1, null, Map.of()).get();
assertEquals(1, createInvs.size());
TopicCreateParams params = createInvs.get(0).getArgument(1);
// must be set to default partitions (see feedMocks)
assertEquals(6, params.getNumberOfPartitions());
}
@Test
void testCreateTopic_nameValidationFails() throws Exception {
List<InvocationOnMock> createInvs = new ArrayList<>();
when(kafkaTestCluster.createTopic(any(), any())).then(inv -> {
createInvs.add(inv);
return FutureUtil.noop();
});
doThrow(new InvalidTopicNameException("Invalid!")).when(namingService).validateTopicName(any(), any(), any());
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setDescription("Desc");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
try {
service.createTopic("test", topic1, null, Map.of()).get();
fail("Expected exception when creating topic for which name validation fails");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof InvalidTopicNameException);
}
assertEquals(0, createInvs.size());
}
@Test
@DisplayName("should add producer to topic")
void addTopicProducerTest_positive() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1);
service.addTopicProducer("test", "topic-1", "producer1").get();
TopicMetadata savedTopic = topicRepository.getObject("topic-1").get();
assertFalse(savedTopic.getProducers().isEmpty());
assertEquals("producer1", savedTopic.getProducers().get(0));
}
@Test
@DisplayName("should fail adding a producer to commands topic")
void addTopicProducerTest_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.COMMANDS);
topicRepository.save(topic1).get();
try {
service.addTopicProducer("test", "topic-1", "producer1").get();
fail("Expected exception when adding a producer to commands topic");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
@Test
@DisplayName("should delete producer from topic")
void deleteTopicProducersTest_positive() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setProducers(List.of("producer1", "producer2", "producer3", "producer4"));
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
service.removeTopicProducer("test", "topic-1", "producer3").get();
TopicMetadata savedTopic = topicRepository.getObject("topic-1").get();
assertEquals(3, savedTopic.getProducers().size());
assertFalse(savedTopic.getProducers().contains("producer3"));
}
@Test
@DisplayName("should not be able to delete producer from commands topic")
void deleteTopicProducersTest_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setProducers(List.of("producer1", "producer2", "producer3", "producer4"));
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.COMMANDS);
topicRepository.save(topic1).get();
try {
service.removeTopicProducer("test", "topic-1", "producer3").get();
fail("Expected exception when deleting producer from commands topic");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
TopicMetadata savedTopic = topicRepository.getObject("topic-1").get();
assertEquals(4, savedTopic.getProducers().size());
assertTrue(savedTopic.getProducers().contains("producer3"));
}
}
@Test
@DisplayName("should promote a producer to new Topic owner")
void changeOwnerOfTopicTest_positive() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topic1.setProducers(List.of("producer1", "producer2", "producer3", "producer4"));
topicRepository.save(topic1).get();
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2", "test3"));
service.changeTopicOwner("test", "topic-1", "producer1").get();
TopicMetadata savedTopic = topicRepository.getObject("topic-1").get();
assertEquals(4, savedTopic.getProducers().size());
assertEquals("producer1", savedTopic.getOwnerApplicationId());
assertTrue(savedTopic.getProducers().contains("app-1"));
}
@Test
@DisplayName("should not promote a producer to new Topic owner for internal topics")
void changeOwnerOfTopicTest_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.INTERNAL);
topic1.setProducers(List.of("producer1", "producer2", "producer3", "producer4"));
topicRepository.save(topic1).get();
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2", "test3"));
try {
service.changeTopicOwner("test", "topic-1", "producer1").get();
fail("exception expected when trying no change owner of internal topic");
}
catch (Exception e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
@Test
void testDeleteLatestSchemaVersion() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
SchemaMetadata latestSchema = new SchemaMetadata();
latestSchema.setId("9999");
latestSchema.setTopicName("topic-1");
latestSchema.setCreatedBy("testuser");
latestSchema.setJsonSchema(buildJsonSchema(List.of("propA", "propB"), List.of("string", "string")));
latestSchema.setSchemaVersion(2);
schemaRepository.save(schema1).get();
schemaRepository.save(latestSchema).get();
service.deleteLatestTopicSchemaVersion("test", "topic-1").get();
assertFalse(schemaRepository.getObject(latestSchema.getId()).isPresent());
}
@Test
void testDeleteLatestSchemaVersionStaged_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
KafkaCluster prodCluster = mock(KafkaCluster.class);
when(kafkaClusters.getEnvironment("prod")).thenReturn(Optional.of(prodCluster));
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "prod"));
TopicBasedRepositoryMock<TopicMetadata> prodTopicRepository = new TopicBasedRepositoryMock<>();
TopicBasedRepositoryMock<SchemaMetadata> prodSchemaRepository = new TopicBasedRepositoryMock<>();
when(prodCluster.getRepository("topics", TopicMetadata.class)).thenReturn(prodTopicRepository);
when(prodCluster.getRepository("schemas", SchemaMetadata.class)).thenReturn(prodSchemaRepository);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
prodTopicRepository.save(topic1).get();
SchemaMetadata schema = new SchemaMetadata();
schema.setId("1234");
schema.setTopicName("topic-1");
schema.setCreatedBy("otheruser");
schema.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema.setSchemaVersion(1);
schemaRepository.save(schema).get();
prodSchemaRepository.save(schema).get();
try {
service.deleteLatestTopicSchemaVersion("test", "topic-1").get();
fail("Exception expected, but none thrown");
}
catch (Exception e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
assertTrue(schemaRepository.getObject(schema.getId()).isPresent());
}
@Test
void testDeleteLatestSchemaVersionWithSubscriber_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema = new SchemaMetadata();
schema.setId("1234");
schema.setTopicName("topic-1");
schema.setCreatedBy("otheruser");
schema.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema.setSchemaVersion(1);
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("50");
subscription.setTopicName("topic-1");
subscription.setClientApplicationId("2");
SubscriptionService subscriptionService = mock(SubscriptionService.class);
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", false)).thenReturn(List.of(subscription));
ValidatingTopicServiceImpl validatingService = new ValidatingTopicServiceImpl(service, subscriptionService,
applicationsService, kafkaClusters, topicConfig, false);
schemaRepository.save(schema).get();
try {
validatingService.deleteLatestTopicSchemaVersion("test", "topic-1").get();
fail("Exception expected, but none thrown");
}
catch (Exception e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
assertTrue(schemaRepository.getObject(schema.getId()).isPresent());
}
@Test
void testAddSchemaVersion_sameSchema() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
schemaRepository.save(schema1).get();
String newSchema = buildJsonSchema(List.of("propA"), List.of("string"));
try {
service.addTopicSchemaVersion("test", "topic-1", newSchema, null, SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("addTopicSchemaVersion() should have failed because same schema should not be added again");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testAddSchemaVersion_incompatibleSchema() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
schemaRepository.save(schema1).get();
String newSchema = buildJsonSchema(List.of("propB"), List.of("integer"));
try {
service.addTopicSchemaVersion("test", "topic-1", newSchema, null, SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("addTopicSchemaVersion() should have failed for incompatible schema");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IncompatibleSchemaException);
}
}
@Test
@DisplayName("should not to check for compatibility if skipCompatCheck is set to true")
void testAddSchemaVersion_skipCompatibleSchemaCheckForAdmins() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setTopicName("topic-1");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
schemaRepository.save(schema1).get();
when(userService.isAdmin()).thenReturn(true);
String newSchema = buildJsonSchema(List.of("propB"), List.of("integer"));
service.addTopicSchemaVersion("test", "topic-1", newSchema, "some change decs",
SchemaCompatCheckMode.SKIP_SCHEMA_CHECK).get();
}
@Test
void testAddSchemaVersion_withMetadata() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
schemaRepository.save(schema1).get();
SchemaMetadata schema2 = new SchemaMetadata();
schema2.setId("9999");
schema2.setTopicName("topic-1");
schema2.setCreatedBy("testuser");
schema2.setJsonSchema(buildJsonSchema(List.of("propA", "propB"), List.of("string", "string")));
schema2.setSchemaVersion(2);
schema2.setChangeDescription("some nice description :)");
SchemaMetadata newSchemaMetadata = service
.addTopicSchemaVersion("test", schema2, SchemaCompatCheckMode.CHECK_SCHEMA).get();
assertEquals("9999", newSchemaMetadata.getId());
assertEquals(2, newSchemaMetadata.getSchemaVersion());
assertTrue(newSchemaMetadata.getJsonSchema().contains("propB"));
assertEquals("testuser", schema2.getCreatedBy());
}
@Test
void testAddSchemaVersion_withMetadata_illegalVersionNo_empty() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(2);
try {
service.addTopicSchemaVersion("test", schema1, SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("addTopicSchemaVersion() should have failed because version #2 and no version existing for topic");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testAddSchemaVersion_withMetadata_illegalVersionNo_notMatching() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
schemaRepository.save(schema1).get();
SchemaMetadata schema2 = new SchemaMetadata();
schema2.setId("1235");
schema2.setTopicName("topic-1");
schema2.setCreatedBy("otheruser");
schema2.setJsonSchema(buildJsonSchema(List.of("propA", "propB"), List.of("string", "string")));
schema2.setSchemaVersion(3);
try {
service.addTopicSchemaVersion("test", schema2, SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("addTopicSchemaVersion() should have failed because version #3 and only version #1 existing for topic");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testAddSchemaVersion_invalidSchema() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
try {
service.addTopicSchemaVersion("test", "topic-1", "{ \"title\": 17 }", null,
SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("addTopicSchemaVersion() should have failed because JSON is no JSON schema");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testAddSchemaVersion_invalidJson() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
try {
service.addTopicSchemaVersion("test", "topic-1", "{", null, SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("addTopicSchemaVersion() should have failed because no valid JSON");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testAddSchemaVersion_DataObjectSimpleAtJSONSchema() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
String testJsonSchema = StreamUtils.copyToString(
new ClassPathResource("/schema-compatibility/dataObjectSimple.schema.json").getInputStream(),
StandardCharsets.UTF_8);
topicRepository.save(topic1).get();
try {
service.addTopicSchemaVersion("test", "topic-1", testJsonSchema, null, SchemaCompatCheckMode.CHECK_SCHEMA)
.get();
fail("addTopicSchemaVersion() should have failed because there is a Data-Object in JSON Schema");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testAddSchemaVersion_DataObjectNestedAtJSONSchema() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
String testJsonSchema = StreamUtils.copyToString(
new ClassPathResource("/schema-compatibility/dataObjectNested.schema.json").getInputStream(),
StandardCharsets.UTF_8);
topicRepository.save(topic1).get();
service.addTopicSchemaVersion("test", "topic-1", testJsonSchema, null, SchemaCompatCheckMode.CHECK_SCHEMA)
.get();
}
@Test
void testAddSchemaVersion_NoSchemaProp() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
String testJsonSchema = StreamUtils.copyToString(
new ClassPathResource("/schema-compatibility/noSchemaProp.schema.json").getInputStream(),
StandardCharsets.UTF_8);
topicRepository.save(topic1).get();
try {
service.addTopicSchemaVersion("test", "topic-1", testJsonSchema, null, SchemaCompatCheckMode.CHECK_SCHEMA)
.get();
fail("addTopicSchemaVersion() should have failed because there is no schema prop in JSON Schema");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testSetSubscriptionApprovalRequired_positive() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
service.setSubscriptionApprovalRequiredFlag("test", "topic-1", true).get();
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleTopicSubscriptionApprovalRequiredFlagChanged",
eventManager.getSinkInvocations().get(0).getMethod().getName());
topic1 = topicRepository.getObject("topic-1").orElseThrow();
assertTrue(topic1.isSubscriptionApprovalRequired());
service.setSubscriptionApprovalRequiredFlag("test", "topic-1", false).get();
assertEquals(2, eventManager.getSinkInvocations().size());
assertEquals("handleTopicSubscriptionApprovalRequiredFlagChanged",
eventManager.getSinkInvocations().get(1).getMethod().getName());
topic1 = topicRepository.getObject("topic-1").orElseThrow();
assertFalse(topic1.isSubscriptionApprovalRequired());
}
@Test
void testSetSubscriptionApprovalRequired_internalTopic() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.INTERNAL);
topicRepository.save(topic1).get();
try {
service.setSubscriptionApprovalRequiredFlag("test", "topic-1", true).get();
fail("Expected exception when trying to set subscriptionApprovalRequired flag on internal topic");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
assertEquals(0, eventManager.getSinkInvocations().size());
}
@Test
void testSetSubscriptionApprovalRequired_noop() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.INTERNAL);
topic1.setSubscriptionApprovalRequired(true);
topicRepository.save(topic1).get();
service.setSubscriptionApprovalRequiredFlag("test", "topic-1", true).get();
assertEquals(0, eventManager.getSinkInvocations().size());
}
@Test
@DisplayName("should stage new owner on all stages immediately")
void testChangeOwnerStaging() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
KafkaCluster testCluster2 = mock(KafkaCluster.class);
when(testCluster2.getId()).thenReturn("test2");
TopicBasedRepositoryMock<TopicMetadata> topicRepository2 = new TopicBasedRepositoryMock<>();
when(testCluster2.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository2);
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2"));
when(kafkaClusters.getEnvironment("test2")).thenReturn(Optional.of(testCluster2));
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topic.setProducers(new ArrayList<>(List.of("producer1")));
topicRepository.save(topic).get();
topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository2.save(topic).get();
service.changeTopicOwner("test", "topic-1", "producer1").get();
assertEquals("producer1", service.getTopic("test", "topic-1").get().getOwnerApplicationId());
assertTrue(service.getTopic("test", "topic-1").get().getProducers().contains("app-1"));
assertEquals("producer1", service.getTopic("test2", "topic-1").get().getOwnerApplicationId());
assertTrue(service.getTopic("test2", "topic-1").get().getProducers().contains("app-1"));
}
@Test
void testDeprecateTopic_positive() throws Exception {
KafkaCluster testCluster2 = mock(KafkaCluster.class);
when(testCluster2.getId()).thenReturn("test2");
KafkaCluster testCluster3 = mock(KafkaCluster.class);
when(testCluster3.getId()).thenReturn("test3");
TopicBasedRepositoryMock<TopicMetadata> topicRepository2 = new TopicBasedRepositoryMock<>();
TopicBasedRepositoryMock<TopicMetadata> topicRepository3 = new TopicBasedRepositoryMock<>();
when(testCluster2.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository2);
when(testCluster3.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository3);
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2", "test3"));
when(kafkaClusters.getEnvironment("test2")).thenReturn(Optional.of(testCluster2));
when(kafkaClusters.getEnvironment("test3")).thenReturn(Optional.of(testCluster3));
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository.save(topic).get();
topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository2.save(topic).get();
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
service.markTopicDeprecated("topic-1", "Because test", LocalDate.of(2020, 10, 1)).get();
assertTrue(service.getTopic("test", "topic-1").map(TopicMetadata::isDeprecated).orElse(false));
assertTrue(service.getTopic("test2", "topic-1").map(TopicMetadata::isDeprecated).orElse(false));
assertFalse(service.getTopic("test3", "topic-1").isPresent());
}
@Test
void testDeprecateTopic_noSuchTopic() throws Exception {
KafkaCluster testCluster2 = mock(KafkaCluster.class);
when(testCluster2.getId()).thenReturn("test2");
KafkaCluster testCluster3 = mock(KafkaCluster.class);
when(testCluster3.getId()).thenReturn("test3");
TopicBasedRepositoryMock<TopicMetadata> topicRepository2 = new TopicBasedRepositoryMock<>();
TopicBasedRepositoryMock<TopicMetadata> topicRepository3 = new TopicBasedRepositoryMock<>();
when(testCluster2.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository2);
when(testCluster3.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository3);
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2", "test3"));
when(kafkaClusters.getEnvironment("test2")).thenReturn(Optional.of(testCluster2));
when(kafkaClusters.getEnvironment("test3")).thenReturn(Optional.of(testCluster3));
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository.save(topic).get();
topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository2.save(topic).get();
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
try {
service.markTopicDeprecated("topic-2", "Because test", LocalDate.of(2020, 10, 1)).get();
fail("Exception expected when marking not existing topic as deprecated, but succeeded");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof NoSuchElementException);
}
}
@Test
void testunmarkTopicDeprecated() throws Exception {
KafkaCluster testCluster2 = mock(KafkaCluster.class);
when(testCluster2.getId()).thenReturn("test2");
KafkaCluster testCluster3 = mock(KafkaCluster.class);
when(testCluster3.getId()).thenReturn("test3");
TopicBasedRepositoryMock<TopicMetadata> topicRepository2 = new TopicBasedRepositoryMock<>();
TopicBasedRepositoryMock<TopicMetadata> topicRepository3 = new TopicBasedRepositoryMock<>();
when(testCluster2.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository2);
when(testCluster3.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository3);
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2", "test3"));
when(kafkaClusters.getEnvironment("test2")).thenReturn(Optional.of(testCluster2));
when(kafkaClusters.getEnvironment("test3")).thenReturn(Optional.of(testCluster3));
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setDeprecated(true);
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository.save(topic).get();
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
service.unmarkTopicDeprecated("topic-1").get();
assertFalse(service.getTopic("test", "topic-1").get().isDeprecated());
}
@Test
void testChangeDescOfTopic() throws Exception {
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setDescription("this topic is not a nice one :(");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository.save(topic).get();
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
service.updateTopicDescription("test", "topic-1", "this topic is now a nice one :)").get();
TopicMetadata savedTopic = topicRepository.getObject("topic-1").get();
assertEquals("this topic is now a nice one :)", savedTopic.getDescription());
}
@Test
void testAddSchemaVersion_DataObjectNestedAtJSONSchemaAndDataTopic() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.DATA);
String testJsonSchema = StreamUtils.copyToString(
new ClassPathResource("/schema-compatibility/dataObjectNested.schema.json").getInputStream(),
StandardCharsets.UTF_8);
topicRepository.save(topic1).get();
service.addTopicSchemaVersion("test", "topic-1", testJsonSchema, null, SchemaCompatCheckMode.CHECK_SCHEMA)
.get();
}
@Test
void testAddSchemaVersion_WithChangeDesc() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.DATA);
topicRepository.save(topic1).get();
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("1234");
schema1.setTopicName("topic-1");
schema1.setCreatedBy("otheruser");
schema1.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema1.setSchemaVersion(1);
schemaRepository.save(schema1).get();
SchemaMetadata newSchema = new SchemaMetadata();
newSchema.setId("9999");
newSchema.setTopicName("topic-1");
newSchema.setCreatedBy("testuser");
newSchema.setJsonSchema(buildJsonSchema(List.of("propA", "propB"), List.of("string", "string")));
newSchema.setSchemaVersion(2);
newSchema.setChangeDescription("Added new schema which is better");
service.addTopicSchemaVersion("test", newSchema, SchemaCompatCheckMode.CHECK_SCHEMA).get();
String changedDesc = schemaRepository.getObject("9999").get().getChangeDescription();
assertEquals("Added new schema which is better", changedDesc);
}
@Test
void testAddSchemaVersion_WithChangeDesc_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.DATA);
topicRepository.save(topic1).get();
SchemaMetadata newSchema = new SchemaMetadata();
newSchema.setId("9999");
newSchema.setTopicName("topic-1");
newSchema.setCreatedBy("testuser");
newSchema.setJsonSchema(buildJsonSchema(List.of("propA", "propB"), List.of("string", "string")));
newSchema.setSchemaVersion(1);
newSchema.setChangeDescription("Added new schema which is better");
try {
service.addTopicSchemaVersion("test", newSchema, SchemaCompatCheckMode.CHECK_SCHEMA).get();
fail("Exception expected when adding change description for first published schema");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
}
@Test
void testDeleteSchemaWithSub_positive() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema = new SchemaMetadata();
schema.setId("1234");
schema.setTopicName("topic-1");
schema.setCreatedBy("otheruser");
schema.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema.setSchemaVersion(1);
schemaRepository.save(schema).get();
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("50");
subscription.setTopicName("topic-1");
subscription.setClientApplicationId("2");
SubscriptionService subscriptionService = mock(SubscriptionService.class);
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", false)).thenReturn(List.of(subscription));
ValidatingTopicServiceImpl validatingService = new ValidatingTopicServiceImpl(service, subscriptionService,
applicationsService, kafkaClusters, topicConfig, true);
validatingService.deleteLatestTopicSchemaVersion("test", "topic-1").get();
assertFalse(schemaRepository.getObject("1234").isPresent());
}
@Test
void testDeleteSchemaWithSub_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topicRepository.save(topic1).get();
SchemaMetadata schema = new SchemaMetadata();
schema.setId("1234");
schema.setTopicName("topic-1");
schema.setCreatedBy("otheruser");
schema.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema.setSchemaVersion(1);
schemaRepository.save(schema).get();
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("50");
subscription.setTopicName("topic-1");
subscription.setClientApplicationId("2");
SubscriptionService subscriptionService = mock(SubscriptionService.class);
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", false)).thenReturn(List.of(subscription));
ValidatingTopicServiceImpl validatingService = new ValidatingTopicServiceImpl(service, subscriptionService,
applicationsService, kafkaClusters, topicConfig, false);
assertTrue(schemaRepository.getObject("1234").isPresent());
try {
validatingService.deleteLatestTopicSchemaVersion("test", "topic-1").get();
fail("Exception expected when trying to delete schema with subscribers when schemaDeleteWithSub is set to false");
}
catch (Exception e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
@Test
void testDeleteLatestSchemaVersionStagedSchemaDeleteSub_negative() throws Exception {
TopicServiceImpl service = new TopicServiceImpl(kafkaClusters, applicationsService, namingService, userService,
topicConfig, eventManager);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
ValidatingTopicServiceImpl validatingService = new ValidatingTopicServiceImpl(service, subscriptionService,
applicationsService, kafkaClusters, topicConfig, true);
KafkaCluster prodCluster = mock(KafkaCluster.class);
when(kafkaClusters.getEnvironment("prod")).thenReturn(Optional.of(prodCluster));
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "prod"));
KafkaEnvironmentConfig config = mock(KafkaEnvironmentConfig.class);
when(config.isStagingOnly()).thenReturn(true);
when(kafkaClusters.getEnvironmentMetadata("prod")).thenReturn(Optional.of(config));
TopicBasedRepositoryMock<TopicMetadata> prodTopicRepository = new TopicBasedRepositoryMock<>();
TopicBasedRepositoryMock<SchemaMetadata> prodSchemaRepository = new TopicBasedRepositoryMock<>();
when(prodCluster.getRepository("topics", TopicMetadata.class)).thenReturn(prodTopicRepository);
when(prodCluster.getRepository("schemas", SchemaMetadata.class)).thenReturn(prodSchemaRepository);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setId("50");
subscription.setTopicName("topic-1");
subscription.setClientApplicationId("2");
when(subscriptionService.getSubscriptionsForTopic("prod", "topic-1", false)).thenReturn(List.of(subscription));
prodTopicRepository.save(topic1).get();
SchemaMetadata schema = new SchemaMetadata();
schema.setId("1234");
schema.setTopicName("topic-1");
schema.setCreatedBy("otheruser");
schema.setJsonSchema(buildJsonSchema(List.of("propA"), List.of("string")));
schema.setSchemaVersion(1);
prodSchemaRepository.save(schema).get();
try {
validatingService.deleteLatestTopicSchemaVersion("prod", "topic-1").get();
fail("Exception expected, but none thrown");
}
catch (Exception e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
private static String buildJsonSchema(List<String> propertyNames, List<String> propertyTypes) {
JSONObject props = new JSONObject();
for (int i = 0; i < propertyNames.size(); i++) {
String pn = propertyNames.get(i);
String tp = propertyTypes.get(i);
JSONObject prop = new JSONObject();
prop.put("type", tp);
props.put(pn, prop);
}
JSONObject schema = new JSONObject();
schema.put("$schema", "someUrl");
schema.put("properties", props);
return schema.toString();
}
}
| 55,999 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicServiceImplIntegrationTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/topics/service/impl/TopicServiceImplIntegrationTest.java | package com.hermesworld.ais.galapagos.topics.service.impl;
import com.hermesworld.ais.galapagos.GalapagosTestConfig;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.changes.ChangeData;
import com.hermesworld.ais.galapagos.events.*;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.notifications.NotificationService;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.config.GalapagosTopicConfig;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.HasKey;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.context.annotation.Import;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.stereotype.Component;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Tests that Security Context data is passed correctly to event context, even when performing the quite complex,
* chained <code>markTopicDeprecated</code> and <code>unmarkTopicDeprecated</code> operations.
*/
@SpringBootTest
@Import({ GalapagosTestConfig.class, TopicServiceImplIntegrationTest.TestEventListener.class })
class TopicServiceImplIntegrationTest {
@Autowired
private CurrentUserService currentUserService;
@Autowired
private GalapagosEventManager eventManager;
@Autowired
private TestEventListener eventListener;
@MockBean
private KafkaClusters clusters;
@MockBean
private NotificationService notificationService;
private final AtomicInteger threadNo = new AtomicInteger();
private final ExecutorService executorService = Executors
.newCachedThreadPool(r -> new Thread(r, "inttest-" + threadNo.incrementAndGet()));
private final TopicBasedRepositoryMock<TopicMetadata> topicRepository1 = new DecoupledTopicBasedRepositoryMock<>(
executorService);
private final TopicBasedRepositoryMock<TopicMetadata> topicRepository2 = new DecoupledTopicBasedRepositoryMock<>(
executorService);
@BeforeEach
void feedMocks() {
KafkaCluster cluster1 = mock(KafkaCluster.class);
KafkaCluster cluster2 = mock(KafkaCluster.class);
when(cluster1.getId()).thenReturn("dev");
when(cluster2.getId()).thenReturn("int");
when(cluster1.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository1);
when(cluster2.getRepository("topics", TopicMetadata.class)).thenReturn(topicRepository2);
when(cluster1.getRepository("changelog", ChangeData.class))
.thenReturn(new DecoupledTopicBasedRepositoryMock<>(executorService));
when(cluster2.getRepository("changelog", ChangeData.class))
.thenReturn(new DecoupledTopicBasedRepositoryMock<>(executorService));
when(clusters.getEnvironmentIds()).thenReturn(List.of("dev", "int"));
when(clusters.getProductionEnvironmentId()).thenReturn("int");
when(clusters.getEnvironment("dev")).thenReturn(Optional.of(cluster1));
when(clusters.getEnvironment("int")).thenReturn(Optional.of(cluster2));
when(notificationService.notifySubscribers(any(), any(), any(), any())).thenReturn(FutureUtil.noop());
}
@AfterEach
void shutdown() throws Exception {
executorService.shutdown();
assertTrue(executorService.awaitTermination(1, TimeUnit.MINUTES));
}
@Test
void testTopicDeprecated_passesCurrentUser() throws Exception {
ApplicationsService applicationsService = mock(ApplicationsService.class);
NamingService namingService = mock(NamingService.class);
GalapagosTopicConfig topicSettings = mock(GalapagosTopicConfig.class);
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository1.save(topic).get();
topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topicRepository2.save(topic).get();
TopicServiceImpl service = new TopicServiceImpl(clusters, applicationsService, namingService,
currentUserService, topicSettings, eventManager);
SecurityContext securityContext = mock(SecurityContext.class);
Authentication auth = mock(Authentication.class);
when(auth.getPrincipal()).thenReturn(new Object());
when(auth.getName()).thenReturn("testUser");
when(securityContext.getAuthentication()).thenReturn(auth);
SecurityContextHolder.setContext(securityContext);
service.markTopicDeprecated("topic-1", "deprecated", LocalDate.of(2999, 1, 1)).get();
assertEquals(2, eventListener.deprecationEvents.size());
assertEquals("testUser",
eventListener.deprecationEvents.get(1).getContext().getContextValue("username").orElse(null));
}
@Test
void testTopicUndeprecated_passesCurrentUser() throws Exception {
ApplicationsService applicationsService = mock(ApplicationsService.class);
NamingService namingService = mock(NamingService.class);
GalapagosTopicConfig topicSettings = mock(GalapagosTopicConfig.class);
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topic.setDeprecated(true);
topicRepository1.save(topic).get();
topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-1");
topic.setType(TopicType.EVENTS);
topic.setDeprecated(true);
topicRepository2.save(topic).get();
TopicServiceImpl service = new TopicServiceImpl(clusters, applicationsService, namingService,
currentUserService, topicSettings, eventManager);
SecurityContext securityContext = mock(SecurityContext.class);
Authentication auth = mock(Authentication.class);
when(auth.getPrincipal()).thenReturn(new Object());
when(auth.getName()).thenReturn("testUser");
when(securityContext.getAuthentication()).thenReturn(auth);
SecurityContextHolder.setContext(securityContext);
service.unmarkTopicDeprecated("topic-1").get();
assertEquals(2, eventListener.undeprecationEvents.size());
assertEquals("testUser",
eventListener.undeprecationEvents.get(1).getContext().getContextValue("username").orElse(null));
}
@Component
public static class TestEventListener implements TopicEventsListener {
private final List<TopicEvent> deprecationEvents = new ArrayList<>();
private final List<TopicEvent> undeprecationEvents = new ArrayList<>();
@Override
public CompletableFuture<Void> handleTopicCreated(TopicCreatedEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleTopicDeleted(TopicEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleTopicDescriptionChanged(TopicEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleTopicDeprecated(TopicEvent event) {
deprecationEvents.add(event);
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicUndeprecated(TopicEvent event) {
undeprecationEvents.add(event);
return FutureUtil.noop();
}
@Override
public CompletableFuture<Void> handleTopicSchemaAdded(TopicSchemaAddedEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleTopicSchemaDeleted(TopicSchemaRemovedEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleTopicSubscriptionApprovalRequiredFlagChanged(TopicEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleAddTopicProducer(TopicAddProducerEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleRemoveTopicProducer(TopicRemoveProducerEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
@Override
public CompletableFuture<Void> handleTopicOwnerChanged(TopicOwnerChangeEvent event) {
throw new UnsupportedOperationException("Unexpected event received during test");
}
}
/**
* A mock implementation for a TopicBasedRepository which behaves more like the original, Kafka-based one, in that
* it completes each save operation asynchronously, on a new Thread.
*
* @param <T> Type of the elements stored in the repository.
*/
private static class DecoupledTopicBasedRepositoryMock<T extends HasKey> extends TopicBasedRepositoryMock<T> {
private final ExecutorService executorService;
public DecoupledTopicBasedRepositoryMock(ExecutorService executorService) {
this.executorService = executorService;
}
@Override
public CompletableFuture<Void> save(T value) {
return super.save(value).thenCompose(o -> CompletableFuture.runAsync(() -> {
try {
Thread.sleep(200);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}, executorService));
}
}
}
| 11,504 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SubscriptionsControllerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/subscriptions/SubscriptionsControllerTest.java | package com.hermesworld.ais.galapagos.subscriptions;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.invocation.InvocationOnMock;
import org.springframework.http.HttpStatus;
import org.springframework.web.server.ResponseStatusException;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.subscriptions.controller.SubscriptionsController;
import com.hermesworld.ais.galapagos.subscriptions.controller.UpdateSubscriptionDto;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
class SubscriptionsControllerTest {
private KafkaClusters kafkaClusters;
private SubscriptionService subscriptionService;
private ApplicationsService applicationsService;
private TopicService topicService;
private SubscriptionMetadata subscription = new SubscriptionMetadata();
@BeforeEach
void initMocks() {
subscription.setId("sub-1");
subscription.setTopicName("topic-1");
subscription.setClientApplicationId("app-1");
subscription.setState(SubscriptionState.PENDING);
kafkaClusters = mock(KafkaClusters.class);
subscriptionService = mock(SubscriptionService.class);
applicationsService = mock(ApplicationsService.class);
topicService = mock(TopicService.class);
}
@Test
void testUpdateSubscription_positive() throws Exception {
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-2");
topic1.setSubscriptionApprovalRequired(true);
List<InvocationOnMock> invocations = new ArrayList<>();
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", true)).thenReturn(List.of(subscription));
when(subscriptionService.updateSubscriptionState(any(), any(), any())).then(inv -> {
invocations.add(inv);
return FutureUtil.noop();
});
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic1));
when(applicationsService.isUserAuthorizedFor("app-2")).thenReturn(true);
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(false);
SubscriptionsController controller = new SubscriptionsController(subscriptionService, applicationsService,
topicService, kafkaClusters);
UpdateSubscriptionDto updateData = new UpdateSubscriptionDto();
updateData.setNewState(SubscriptionState.APPROVED);
controller.updateApplicationSubscription("test", "topic-1", "sub-1", updateData);
assertEquals(1, invocations.size());
assertEquals(SubscriptionState.APPROVED, invocations.get(0).getArgument(2));
}
@Test
void testUpdateSubscription_invalidUser_clientAppOwner() throws Exception {
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("app-2");
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", true)).thenReturn(List.of(subscription));
when(subscriptionService.updateSubscriptionState(any(), any(), any()))
.thenThrow(UnsupportedOperationException.class);
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic));
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
when(applicationsService.isUserAuthorizedFor("app-2")).thenReturn(false);
SubscriptionsController controller = new SubscriptionsController(subscriptionService, applicationsService,
topicService, kafkaClusters);
UpdateSubscriptionDto updateData = new UpdateSubscriptionDto();
updateData.setNewState(SubscriptionState.APPROVED);
try {
controller.updateApplicationSubscription("test", "topic-1", "sub-1", updateData);
fail("Expected FORBIDDEN for owner of CLIENT application");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.FORBIDDEN, e.getStatusCode());
}
}
@Test
void testUpdateSubscription_invalidTopic_forSubscription() throws Exception {
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-2");
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("topic-2");
topic2.setOwnerApplicationId("app-2");
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", true)).thenReturn(List.of(subscription));
when(subscriptionService.updateSubscriptionState(any(), any(), any()))
.thenThrow(UnsupportedOperationException.class);
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic1));
when(topicService.getTopic("test", "topic-2")).thenReturn(Optional.of(topic2));
when(applicationsService.isUserAuthorizedFor("app-2")).thenReturn(true);
SubscriptionsController controller = new SubscriptionsController(subscriptionService, applicationsService,
topicService, kafkaClusters);
UpdateSubscriptionDto updateData = new UpdateSubscriptionDto();
updateData.setNewState(SubscriptionState.APPROVED);
try {
controller.updateApplicationSubscription("test", "topic-2", "sub-1", updateData);
fail("Expected NOT_FOUND as subscription does not match topic");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.NOT_FOUND, e.getStatusCode());
}
}
@Test
void testUpdateSubscription_topicHasFlagNotSet() throws Exception {
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-2");
topic1.setSubscriptionApprovalRequired(false);
when(subscriptionService.getSubscriptionsForTopic("test", "topic-1", true)).thenReturn(List.of(subscription));
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic1));
when(applicationsService.isUserAuthorizedFor("app-2")).thenReturn(true);
SubscriptionsController controller = new SubscriptionsController(subscriptionService, applicationsService,
topicService, kafkaClusters);
UpdateSubscriptionDto updateData = new UpdateSubscriptionDto();
updateData.setNewState(SubscriptionState.APPROVED);
try {
controller.updateApplicationSubscription("test", "topic-1", "sub-1", updateData);
fail("Expected BAD_REQUEST for updating subscription state for topic which does not require subscription approval");
}
catch (ResponseStatusException e) {
assertEquals(HttpStatus.BAD_REQUEST, e.getStatusCode());
}
}
}
| 7,427 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SubscriptionServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/subscriptions/service/impl/SubscriptionServiceImplTest.java | package com.hermesworld.ais.galapagos.subscriptions.service.impl;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.events.GalapagosEventManagerMock;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import static org.junit.jupiter.api.Assertions.*;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class SubscriptionServiceImplTest {
private KafkaClusters kafkaClusters;
private ApplicationsService applicationsService;
private TopicService topicService;
private GalapagosEventManagerMock eventManager;
private TopicBasedRepositoryMock<SubscriptionMetadata> repository;
@BeforeEach
void setupMocks() {
kafkaClusters = mock(KafkaClusters.class);
KafkaCluster testCluster = mock(KafkaCluster.class);
repository = new TopicBasedRepositoryMock<>();
when(testCluster.getRepository("subscriptions", SubscriptionMetadata.class)).thenReturn(repository);
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app-1");
ApplicationMetadata app2 = new ApplicationMetadata();
app2.setApplicationId("app-2");
applicationsService = mock(ApplicationsService.class);
when(applicationsService.getApplicationMetadata("test", "app-1")).thenReturn(Optional.of(app1));
when(applicationsService.getApplicationMetadata("test", "app-2")).thenReturn(Optional.of(app2));
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setType(TopicType.EVENTS);
topic1.setOwnerApplicationId("app-2");
topic1.setSubscriptionApprovalRequired(false);
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("topic-2");
topic2.setType(TopicType.INTERNAL);
topic2.setOwnerApplicationId("app-2");
TopicMetadata topic3 = new TopicMetadata();
topic3.setName("topic-3");
topic3.setType(TopicType.EVENTS);
topic3.setOwnerApplicationId("app-2");
topic3.setSubscriptionApprovalRequired(true);
topicService = mock(TopicService.class);
when(topicService.getTopic("test", "topic-1")).thenReturn(Optional.of(topic1));
when(topicService.getTopic("test", "topic-2")).thenReturn(Optional.of(topic2));
when(topicService.getTopic("test", "topic-3")).thenReturn(Optional.of(topic3));
eventManager = new GalapagosEventManagerMock();
}
@Test
void testAddSubscription_positive() throws Exception {
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
SubscriptionMetadata metadata = service.addSubscription("test", "topic-1", "app-1", "Some Desc").get();
assertNotNull(metadata.getId());
assertEquals("topic-1", metadata.getTopicName());
assertEquals("app-1", metadata.getClientApplicationId());
assertEquals(SubscriptionState.APPROVED, metadata.getState());
assertEquals("Some Desc", metadata.getDescription());
assertNotNull(repository.getObject(metadata.getId()));
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionCreated", eventManager.getSinkInvocations().get(0).getMethod().getName());
}
@Test
void testAddSubscription_pending() throws Exception {
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
SubscriptionMetadata metadata = service.addSubscription("test", "topic-3", "app-1", null).get();
assertNotNull(metadata.getId());
assertEquals("topic-3", metadata.getTopicName());
assertEquals("app-1", metadata.getClientApplicationId());
assertEquals(SubscriptionState.PENDING, metadata.getState());
assertNull(metadata.getDescription());
assertNotNull(repository.getObject(metadata.getId()));
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionCreated", eventManager.getSinkInvocations().get(0).getMethod().getName());
}
@Test
void testAddSubscription_fail_internalTopic() throws Exception {
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
try {
service.addSubscription("test", "topic-2", "app-1", null).get();
fail("Expected exception when trying to subscribe an internal topic");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
}
assertEquals(0, eventManager.getSinkInvocations().size());
}
@Test
void testAddSubscription_directMetadata() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.APPROVED);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
SubscriptionMetadata createdMeta = service.addSubscription("test", sub).get();
assertNotEquals(sub.getId(), createdMeta.getId());
assertEquals(sub.getState(), createdMeta.getState());
assertEquals(sub.getClientApplicationId(), createdMeta.getClientApplicationId());
assertEquals(sub.getTopicName(), createdMeta.getTopicName());
}
@Test
void testUpdateSubscriptionState_positive() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.PENDING);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
repository.save(sub).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
service.updateSubscriptionState("test", "123", SubscriptionState.APPROVED).get();
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionUpdated", eventManager.getSinkInvocations().get(0).getMethod().getName());
SubscriptionMetadata savedSub = repository.getObject("123").orElseThrow();
assertEquals(SubscriptionState.APPROVED, savedSub.getState());
}
@Test
void testUpdateSubscriptionState_rejected_deletes() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.PENDING);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
repository.save(sub).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
service.updateSubscriptionState("test", "123", SubscriptionState.REJECTED).get();
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionUpdated", eventManager.getSinkInvocations().get(0).getMethod().getName());
// subscription should instantly be deleted
assertFalse(repository.getObject("123").isPresent());
}
@Test
void testUpdateSubscriptionState_canceled_deletes() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.APPROVED);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
repository.save(sub).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
service.updateSubscriptionState("test", "123", SubscriptionState.CANCELED).get();
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionUpdated", eventManager.getSinkInvocations().get(0).getMethod().getName());
// subscription should instantly be deleted
assertFalse(repository.getObject("123").isPresent());
}
@Test
void testUpdateSubscriptionState_rejected_mapsToCanceled() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.APPROVED);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
repository.save(sub).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
service.updateSubscriptionState("test", "123", SubscriptionState.REJECTED).get();
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionUpdated", eventManager.getSinkInvocations().get(0).getMethod().getName());
SubscriptionMetadata metadata = eventManager.getSinkInvocations().get(0).getArgument(0);
assertEquals(SubscriptionState.CANCELED, metadata.getState());
// subscription should instantly be deleted
assertFalse(repository.getObject("123").isPresent());
}
@Test
void testUpdateSubscriptionState_noop() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.APPROVED);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
repository.save(sub).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
service.updateSubscriptionState("test", "123", SubscriptionState.APPROVED).get();
assertEquals(0, eventManager.getSinkInvocations().size());
assertSame(repository.getObject("123").orElseThrow(), sub); // intentionally object identity
}
@Test
void testDeleteSubscription() throws Exception {
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setState(SubscriptionState.APPROVED);
sub.setClientApplicationId("app-1");
sub.setTopicName("topic-3");
repository.save(sub).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
service.deleteSubscription("test", "123").get();
assertEquals(1, eventManager.getSinkInvocations().size());
assertEquals("handleSubscriptionDeleted", eventManager.getSinkInvocations().get(0).getMethod().getName());
assertFalse(repository.getObject("123").isPresent());
}
@Test
void testGetSubscriptionsForTopic() throws Exception {
SubscriptionMetadata sub1 = new SubscriptionMetadata();
sub1.setId("123");
sub1.setState(SubscriptionState.APPROVED);
sub1.setClientApplicationId("app-1");
sub1.setTopicName("topic-3");
SubscriptionMetadata sub2 = new SubscriptionMetadata();
sub2.setId("124");
sub2.setState(SubscriptionState.PENDING);
sub2.setClientApplicationId("app-3");
sub2.setTopicName("topic-3");
repository.save(sub1).get();
repository.save(sub2).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
List<SubscriptionMetadata> result = service.getSubscriptionsForTopic("test", "topic-3", false);
assertEquals(1, result.size());
assertEquals("123", result.get(0).getId());
result = service.getSubscriptionsForTopic("test", "topic-3", true);
assertEquals(2, result.size());
assertEquals("123", result.get(0).getId());
}
@Test
void testGetSubscriptionsOfApplication() throws Exception {
SubscriptionMetadata sub1 = new SubscriptionMetadata();
sub1.setId("123");
sub1.setState(SubscriptionState.APPROVED);
sub1.setClientApplicationId("app-1");
sub1.setTopicName("topic-3");
SubscriptionMetadata sub2 = new SubscriptionMetadata();
sub2.setId("124");
sub2.setState(SubscriptionState.PENDING);
sub2.setClientApplicationId("app-3");
sub2.setTopicName("topic-3");
repository.save(sub1).get();
repository.save(sub2).get();
SubscriptionServiceImpl service = new SubscriptionServiceImpl(kafkaClusters, applicationsService, topicService,
eventManager);
List<SubscriptionMetadata> result = service.getSubscriptionsOfApplication("test", "app-1", false);
assertEquals(1, result.size());
assertEquals("123", result.get(0).getId());
result = service.getSubscriptionsOfApplication("test", "app-3", false);
assertEquals(0, result.size());
result = service.getSubscriptionsOfApplication("test", "app-3", true);
assertEquals(1, result.size());
assertEquals("124", result.get(0).getId());
}
}
| 14,160 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ViewAclsJobTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/adminjobs/impl/ViewAclsJobTest.java | package com.hermesworld.ais.galapagos.adminjobs.impl;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.json.JSONArray;
import org.json.JSONObject;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.boot.ApplicationArguments;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.util.FutureUtil;
class ViewAclsJobTest {
private ByteArrayOutputStream stdoutData = new ByteArrayOutputStream();
private PrintStream oldOut;
@BeforeEach
void setup() {
oldOut = System.out;
System.setOut(new PrintStream(stdoutData));
}
@AfterEach
void cleanup() {
System.setOut(oldOut);
}
@Test
void testJsonMapping() throws Exception {
KafkaClusters clusters = mock(KafkaClusters.class);
KafkaCluster cluster = mock(KafkaCluster.class);
when(cluster.visitAcls(any())).then(inv -> {
Function<AclBinding, Boolean> fn = inv.getArgument(0);
AclBinding binding1 = new AclBinding(new ResourcePattern(ResourceType.GROUP, "group1", PatternType.LITERAL),
new AccessControlEntry("dummy", "localhost", AclOperation.ALTER, AclPermissionType.ALLOW));
AclBinding binding2 = new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "topic1", PatternType.PREFIXED),
new AccessControlEntry("alice", "otherhost", AclOperation.READ, AclPermissionType.DENY));
fn.apply(binding1);
fn.apply(binding2);
return FutureUtil.noop();
});
when(clusters.getEnvironment("test")).thenReturn(Optional.of(cluster));
ViewAclsJob job = new ViewAclsJob(clusters);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("kafka.environment")).thenReturn(List.of("test"));
job.run(args);
String stdout = new String(stdoutData.toByteArray());
assertTrue(stdout.contains("[{\""));
String jsonData = stdout.substring(stdout.indexOf("[{\""));
int endIndex = jsonData.indexOf('\r');
if (endIndex == -1) {
endIndex = jsonData.indexOf('\n');
}
JSONArray arr = new JSONArray(jsonData);
assertEquals(2, arr.length());
// only some checks for now
JSONObject obj1 = arr.getJSONObject(0);
assertEquals("LITERAL", obj1.getJSONObject("pattern").getString("patternType"));
assertEquals("localhost", obj1.getJSONObject("entry").getString("host"));
JSONObject obj2 = arr.getJSONObject(1);
assertEquals("topic1", obj2.getJSONObject("pattern").getString("name"));
assertEquals("DENY", obj2.getJSONObject("entry").getString("permissionType"));
}
}
| 3,645 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ImportBackupJobTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/adminjobs/impl/ImportBackupJobTest.java | package com.hermesworld.ais.galapagos.adminjobs.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.springframework.boot.ApplicationArguments;
import java.io.File;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class ImportBackupJobTest {
private ImportBackupJob job;
private KafkaClusters kafkaClusters;
private KafkaCluster testCluster;
private final File testFile = new File("src/test/resources/backup-test.json");
private TopicBasedRepositoryMock<TopicMetadata> topicRepository;
@BeforeEach
void setUp() {
kafkaClusters = mock(KafkaClusters.class);
testCluster = mock(KafkaCluster.class);
when(testCluster.getId()).thenReturn("prod");
when(kafkaClusters.getEnvironment("prod")).thenReturn(Optional.of(testCluster));
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("prod"));
job = new ImportBackupJob(kafkaClusters);
topicRepository = new TopicBasedRepositoryMock<>() {
@Override
public Class<TopicMetadata> getValueClass() {
return TopicMetadata.class;
}
@Override
public String getTopicName() {
return "topics";
}
};
}
@Test
@DisplayName("should import backup from import file")
void importBackupTest() throws Exception {
// given
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("import.file")).thenReturn(List.of(testFile.getPath()));
when(args.getOptionValues("clearRepos")).thenReturn(List.of("false"));
when(testCluster.getRepositories()).thenReturn(List.of(topicRepository));
// when
job.run(args);
// then
assertTrue(topicRepository.getObject("de.hlg.events.sales.my-topic-nice").isPresent());
}
@Test
@DisplayName("should not clear non-imported environments")
void importBackup_noClearOnOtherEnv() throws Exception {
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("import.file")).thenReturn(List.of(testFile.getPath()));
when(args.getOptionValues("clearRepos")).thenReturn(List.of("true"));
when(testCluster.getRepositories()).thenReturn(List.of(topicRepository));
KafkaCluster devCluster = mock(KafkaCluster.class);
when(devCluster.getId()).thenReturn("dev");
TopicBasedRepositoryMock<TopicMetadata> devRepository = new TopicBasedRepositoryMock<>() {
@Override
public Class<TopicMetadata> getValueClass() {
return TopicMetadata.class;
}
@Override
public String getTopicName() {
return "topics";
}
@Override
public CompletableFuture<Void> delete(TopicMetadata value) {
return CompletableFuture.failedFuture(
new UnsupportedOperationException("Should not call delete() on dev environment during import"));
}
};
TopicMetadata meta = new TopicMetadata();
meta.setName("devtopic");
devRepository.save(meta).get();
when(devCluster.getRepositories()).thenReturn(List.of(devRepository));
when(kafkaClusters.getEnvironment("dev")).thenReturn(Optional.of(devCluster));
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("dev", "prod"));
job.run(args);
}
@Test
@DisplayName("should import backup from import file and old metadata in repos should still be present")
void importBackupWithoutClearingExistingRepos() throws Exception {
// given
ApplicationArguments args = mock(ApplicationArguments.class);
topicRepository.save(buildTopicMetadata()).get();
// when
when(args.getOptionValues("import.file")).thenReturn(List.of(testFile.getPath()));
when(args.getOptionValues("clearRepos")).thenReturn(List.of("false"));
when(testCluster.getRepositories()).thenReturn(List.of(topicRepository));
// then
job.run(args);
assertTrue(topicRepository.getObject("de.hlg.events.sales.my-topic-nice").isPresent());
assertEquals(2, topicRepository.getObjects().size());
}
@Test
@DisplayName("should import backup from import file and old metadata in repos should be not present")
void importBackupWithClearingExistingRepos() throws Exception {
// given
ApplicationArguments args = mock(ApplicationArguments.class);
topicRepository.save(buildTopicMetadata()).get();
// when
when(args.getOptionValues("import.file")).thenReturn(List.of(testFile.getPath()));
when(args.getOptionValues("clearRepos")).thenReturn(List.of("true"));
when(testCluster.getRepositories()).thenReturn(List.of(topicRepository));
// then
job.run(args);
assertTrue(topicRepository.getObject("de.hlg.events.sales.my-topic-nice").isPresent());
assertEquals(1, topicRepository.getObjects().size());
}
@Test
@DisplayName("should throw exception because no import file is set")
void importBackupTest_noFileOption() throws Exception {
// given
ApplicationArguments args = mock(ApplicationArguments.class);
topicRepository.save(buildTopicMetadata()).get();
// when
when(args.getOptionValues("clearRepos")).thenReturn(List.of("true"));
when(testCluster.getRepositories()).thenReturn(List.of(topicRepository));
// then
try {
job.run(args);
fail("job.run() should have thrown an error since there is no import file given");
}
catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
@Test
@DisplayName("should throw exception because no clearRepos option given")
void importBackupTest_noClearReposOption() throws Exception {
// given
ApplicationArguments args = mock(ApplicationArguments.class);
topicRepository.save(buildTopicMetadata()).get();
// when
when(args.getOptionValues("import.file")).thenReturn(List.of(testFile.getPath()));
when(testCluster.getRepositories()).thenReturn(List.of(topicRepository));
// then
try {
job.run(args);
fail("job.run() should have thrown an error since there is no clearRepos option given");
}
catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
@Test
@DisplayName("should return correct job name")
void importBackupTest_correctJobName() {
assertEquals("import-backup", job.getJobName());
}
private TopicMetadata buildTopicMetadata() {
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setDescription("Testtopic description");
metadata.setOwnerApplicationId("123");
metadata.setType(TopicType.EVENTS);
return metadata;
}
}
| 7,658 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
GenerateToolingCertificateJobTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/adminjobs/impl/GenerateToolingCertificateJobTest.java | package com.hermesworld.ais.galapagos.adminjobs.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationConfig;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaUser;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentsConfig;
import com.hermesworld.ais.galapagos.kafka.util.AclSupport;
import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.util.CertificateUtil;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.boot.ApplicationArguments;
import org.springframework.core.io.ClassPathResource;
import org.springframework.util.StreamUtils;
import java.io.*;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.Security;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.Base64;
import java.util.Enumeration;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class GenerateToolingCertificateJobTest {
@Mock
private KafkaClusters kafkaClusters;
@Mock
private KafkaCluster testCluster;
@Mock
private KafkaEnvironmentsConfig kafkaConfig;
@Mock
private NamingService namingService;
@Mock
private AclSupport aclSupport;
private final File testFile = new File("target/test.p12");
private ByteArrayOutputStream stdoutData;
private PrintStream oldOut;
private static final String DATA_MARKER = "CERTIFICATE DATA: ";
@BeforeEach
void feedMocks() throws Exception {
Security.setProperty("crypto.policy", "unlimited");
Security.addProvider(new BouncyCastleProvider());
when(testCluster.getId()).thenReturn("test");
when(testCluster.updateUserAcls(any())).thenReturn(CompletableFuture.completedFuture(null));
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
KafkaEnvironmentConfig config = mock(KafkaEnvironmentConfig.class);
when(config.getAuthenticationMode()).thenReturn("certificates");
when(kafkaClusters.getEnvironmentMetadata("test")).thenReturn(Optional.of(config));
CertificatesAuthenticationConfig certConfig = new CertificatesAuthenticationConfig();
certConfig.setApplicationCertificateValidity("P30D");
certConfig.setCaCertificateFile(new ClassPathResource("/certificates/ca.cer"));
certConfig.setCaKeyFile(new ClassPathResource("/certificates/ca.key"));
certConfig.setCertificatesWorkdir("target/certificates");
certConfig.setClientDn("cn=galapagos_test_user");
KafkaAuthenticationModule authModule = new CertificatesAuthenticationModule("test", certConfig);
authModule.init().get();
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authModule));
when(kafkaConfig.getMetadataTopicsPrefix()).thenReturn("galapagos.testing.");
ApplicationPrefixes testPrefixes = mock(ApplicationPrefixes.class);
lenient().when(testPrefixes.getInternalTopicPrefixes()).thenReturn(List.of("test.galapagos.internal."));
when(testPrefixes.getTransactionIdPrefixes()).thenReturn(List.of("test.galapagos.internal."));
when(testPrefixes.getConsumerGroupPrefixes()).thenReturn(List.of("galapagos."));
when(namingService.getAllowedPrefixes(any())).thenReturn(testPrefixes);
lenient().when(aclSupport.getRequiredAclBindings(any(), any(), any(), anyBoolean())).thenReturn(List.of());
// redirect STDOUT to String
oldOut = System.out;
stdoutData = new ByteArrayOutputStream();
System.setOut(new PrintStream(stdoutData));
}
@AfterEach
void cleanup() {
// noinspection ResultOfMethodCallIgnored
testFile.delete();
System.setOut(oldOut);
}
@Test
void testStandard() throws Exception {
GenerateToolingCertificateJob job = new GenerateToolingCertificateJob(kafkaClusters, aclSupport, namingService,
kafkaConfig);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("output.filename")).thenReturn(List.of(testFile.getPath()));
when(args.getOptionValues("kafka.environment")).thenReturn(List.of("test"));
job.run(args);
FileInputStream fis = new FileInputStream(testFile);
byte[] readData = StreamUtils.copyToByteArray(fis);
X509Certificate cert = extractCertificate(readData);
assertEquals("galapagos", CertificateUtil.extractCn(cert.getSubjectX500Principal().getName()));
// and no data on STDOUT
assertFalse(stdoutData.toString().contains(DATA_MARKER));
// verify that correct internal prefix has been used (from config!)
ArgumentCaptor<KafkaUser> userCaptor = ArgumentCaptor.forClass(KafkaUser.class);
ArgumentCaptor<ApplicationMetadata> captor = ArgumentCaptor.forClass(ApplicationMetadata.class);
verify(testCluster, times(1)).updateUserAcls(userCaptor.capture());
userCaptor.getValue().getRequiredAclBindings();
verify(aclSupport, atLeast(1)).getRequiredAclBindings(eq("test"), captor.capture(), any(), anyBoolean());
assertEquals(1, captor.getValue().getInternalTopicPrefixes().size());
assertEquals("galapagos.testing.", captor.getValue().getInternalTopicPrefixes().get(0));
}
@Test
void testDataOnStdout() throws Exception {
GenerateToolingCertificateJob job = new GenerateToolingCertificateJob(kafkaClusters, aclSupport, namingService,
kafkaConfig);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("kafka.environment")).thenReturn(List.of("test"));
job.run(args);
// data must be on STDOUT
String stdout = stdoutData.toString();
assertTrue(stdout.contains(DATA_MARKER));
String line = stdout.substring(stdout.indexOf(DATA_MARKER));
line = line.substring(DATA_MARKER.length(), line.indexOf('\n'));
// Windows hack
if (line.endsWith("\r")) {
line = line.substring(0, line.length() - 1);
}
byte[] readData = Base64.getDecoder().decode(line);
X509Certificate cert = extractCertificate(readData);
assertEquals("galapagos", CertificateUtil.extractCn(cert.getSubjectX500Principal().getName()));
}
private X509Certificate extractCertificate(byte[] p12Data)
throws KeyStoreException, CertificateException, IOException, NoSuchAlgorithmException {
KeyStore p12 = KeyStore.getInstance("pkcs12");
p12.load(new ByteArrayInputStream(p12Data), "changeit".toCharArray());
Enumeration<String> e = p12.aliases();
X509Certificate cert = null;
while (e.hasMoreElements()) {
String alias = e.nextElement();
if (cert != null) {
throw new IllegalStateException("More than one certificate in .p12 data");
}
cert = (X509Certificate) p12.getCertificate(alias);
}
return cert;
}
}
| 8,160 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ImportKnownApplicationsJobTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/adminjobs/impl/ImportKnownApplicationsJobTest.java | package com.hermesworld.ais.galapagos.adminjobs.impl;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hermesworld.ais.galapagos.applications.impl.KnownApplicationImpl;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.boot.ApplicationArguments;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class ImportKnownApplicationsJobTest {
private KafkaClusters kafkaClusters;
private final File fileWithOutInfoUrl = new File("src/test/resources/test-applications.json");
private final File fileWithInfoUrl = new File("src/test/resources/test-applications-infoUrl.json");
private TopicBasedRepositoryMock<KnownApplicationImpl> appRepository;
private ObjectMapper mapper;
@BeforeEach
void setUp() {
mapper = JsonUtil.newObjectMapper();
kafkaClusters = mock(KafkaClusters.class);
KafkaCluster testCluster = mock(KafkaCluster.class);
when(testCluster.getId()).thenReturn("test");
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
appRepository = new TopicBasedRepositoryMock<>();
when(kafkaClusters.getGlobalRepository("known-applications", KnownApplicationImpl.class))
.thenReturn(appRepository);
}
@Test
void reImportAfterAppChanges() throws Exception {
List<KnownApplicationImpl> knownApplications = mapper.readValue(fileWithOutInfoUrl, new TypeReference<>() {
});
ImportKnownApplicationsJob job = new ImportKnownApplicationsJob(kafkaClusters);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("applications.import.file")).thenReturn(List.of(fileWithInfoUrl.getPath()));
knownApplications.forEach(app -> safeGet(appRepository.save(app)));
// redirect STDOUT to check update count
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
try {
System.setOut(new PrintStream(buffer));
job.run(args);
}
finally {
System.setOut(oldOut);
}
String output = buffer.toString(StandardCharsets.UTF_8);
assertTrue(output.contains("\n1 new application(s) imported."));
// noinspection OptionalGetWithoutIsPresent
assertEquals("https://www.google.com", appRepository.getObject("app-1").get().getInfoUrl().toString());
}
@Test
void importApps_alreadyIdentical() throws Exception {
List<KnownApplicationImpl> knownApplications = mapper.readValue(fileWithOutInfoUrl, new TypeReference<>() {
});
ImportKnownApplicationsJob job = new ImportKnownApplicationsJob(kafkaClusters);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("applications.import.file")).thenReturn(List.of(fileWithOutInfoUrl.getPath()));
TopicBasedRepositoryMock<KnownApplicationImpl> appRepository = new TopicBasedRepositoryMock<>();
knownApplications.forEach(app -> safeGet(appRepository.save(app)));
when(kafkaClusters.getGlobalRepository("known-applications", KnownApplicationImpl.class))
.thenReturn(appRepository);
// redirect STDOUT to check update count
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
try {
System.setOut(new PrintStream(buffer));
job.run(args);
}
finally {
System.setOut(oldOut);
}
String output = buffer.toString(StandardCharsets.UTF_8);
assertTrue(output.contains("\n0 new application(s) imported."));
}
@Test
void importApps_positiv() throws Exception {
ImportKnownApplicationsJob job = new ImportKnownApplicationsJob(kafkaClusters);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("applications.import.file")).thenReturn(List.of(fileWithOutInfoUrl.getPath()));
TopicBasedRepositoryMock<KnownApplicationImpl> appRepository = new TopicBasedRepositoryMock<>();
when(kafkaClusters.getGlobalRepository("known-applications", KnownApplicationImpl.class))
.thenReturn(appRepository);
// redirect STDOUT to check update count
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
try {
System.setOut(new PrintStream(buffer));
job.run(args);
}
finally {
System.setOut(oldOut);
}
String output = buffer.toString(StandardCharsets.UTF_8);
assertTrue(output.contains("\n5 new application(s) imported."));
assertEquals(5, appRepository.getObjects().size());
assertTrue(appRepository.getObject("2222").isPresent());
// noinspection OptionalGetWithoutIsPresent
assertEquals("High Five", appRepository.getObject("F.I.V.E").get().getName());
}
private void safeGet(CompletableFuture<?> future) {
try {
future.get();
}
catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
} | 6,014 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CreateBackupJobTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/adminjobs/impl/CreateBackupJobTest.java | package com.hermesworld.ais.galapagos.adminjobs.impl;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.springframework.boot.ApplicationArguments;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.*;
class CreateBackupJobTest {
private KafkaClusters kafkaClusters;
private final Map<String, TopicBasedRepositoryMock<?>> testRepo = new HashMap<>();
private final Map<String, TopicBasedRepositoryMock<?>> prodRepo = new HashMap<>();
private ObjectMapper mapper;
@BeforeEach
void setUp() {
testRepo.put("topics", new TopicBasedRepositoryMock<TopicMetadata>() {
@Override
public Class<TopicMetadata> getValueClass() {
return TopicMetadata.class;
}
@Override
public String getTopicName() {
return "topics";
}
@Override
public Collection<TopicMetadata> getObjects() {
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("topic-1");
meta1.setOwnerApplicationId("app-1");
meta1.setType(TopicType.EVENTS);
return List.of(meta1);
}
});
testRepo.put("subscriptions", new TopicBasedRepositoryMock<SubscriptionMetadata>() {
@Override
public Class<SubscriptionMetadata> getValueClass() {
return SubscriptionMetadata.class;
}
@Override
public String getTopicName() {
return "subscriptions";
}
@Override
public Collection<SubscriptionMetadata> getObjects() {
SubscriptionMetadata sub1 = new SubscriptionMetadata();
sub1.setId("123");
sub1.setClientApplicationId("app-1");
sub1.setTopicName("topic-1");
sub1.setState(SubscriptionState.APPROVED);
return List.of(sub1);
}
});
testRepo.put("application-owner-requests", new TopicBasedRepositoryMock<ApplicationOwnerRequest>() {
@Override
public Class<ApplicationOwnerRequest> getValueClass() {
return ApplicationOwnerRequest.class;
}
@Override
public String getTopicName() {
return "application-owner-requests";
}
@Override
public Collection<ApplicationOwnerRequest> getObjects() {
ApplicationOwnerRequest req = new ApplicationOwnerRequest();
req.setApplicationId("app-1");
req.setId("1");
req.setUserName("myUser");
req.setState(RequestState.APPROVED);
return List.of(req);
}
});
prodRepo.put("topics", new TopicBasedRepositoryMock<TopicMetadata>() {
@Override
public Class<TopicMetadata> getValueClass() {
return TopicMetadata.class;
}
@Override
public String getTopicName() {
return "topics";
}
@Override
public Collection<TopicMetadata> getObjects() {
TopicMetadata meta1 = new TopicMetadata();
meta1.setName("topic-2");
meta1.setOwnerApplicationId("app-2");
meta1.setType(TopicType.EVENTS);
return List.of(meta1);
}
});
prodRepo.put("subscriptions", new TopicBasedRepositoryMock<SubscriptionMetadata>() {
@Override
public Class<SubscriptionMetadata> getValueClass() {
return SubscriptionMetadata.class;
}
@Override
public String getTopicName() {
return "subscriptions";
}
@Override
public Collection<SubscriptionMetadata> getObjects() {
SubscriptionMetadata sub1 = new SubscriptionMetadata();
sub1.setId("12323");
sub1.setClientApplicationId("app-12");
sub1.setTopicName("topic-2");
sub1.setState(SubscriptionState.APPROVED);
return List.of(sub1);
}
});
prodRepo.put("application-owner-requests", new TopicBasedRepositoryMock<ApplicationOwnerRequest>() {
@Override
public Class<ApplicationOwnerRequest> getValueClass() {
return ApplicationOwnerRequest.class;
}
@Override
public String getTopicName() {
return "application-owner-requests";
}
@Override
public Collection<ApplicationOwnerRequest> getObjects() {
ApplicationOwnerRequest req = new ApplicationOwnerRequest();
req.setApplicationId("app-2");
req.setId("2");
req.setUserName("myUser2");
req.setState(RequestState.APPROVED);
return List.of(req);
}
});
mapper = JsonUtil.newObjectMapper();
kafkaClusters = mock(KafkaClusters.class);
KafkaCluster testCluster = mock(KafkaCluster.class);
KafkaCluster prodCluster = mock(KafkaCluster.class);
when(testCluster.getId()).thenReturn("test");
when(prodCluster.getId()).thenReturn("prod");
doReturn(testRepo.values()).when(testCluster).getRepositories();
doReturn(prodRepo.values()).when(prodCluster).getRepositories();
// noinspection SuspiciousMethodCalls
when(testCluster.getRepository(any(), any())).then(inv -> testRepo.get(inv.getArgument(0)));
// noinspection SuspiciousMethodCalls
when(prodCluster.getRepository(any(), any())).then(inv -> prodRepo.get(inv.getArgument(0)));
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(kafkaClusters.getEnvironment("prod")).thenReturn(Optional.of(prodCluster));
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "prod"));
}
@Test
@DisplayName("it should create a backup from all the metadata currently saved within Galapagos")
void createBackUp_success() throws Exception {
CreateBackupJob job = new CreateBackupJob(kafkaClusters);
ApplicationArguments args = mock(ApplicationArguments.class);
when(args.getOptionValues("create.backup.file")).thenReturn(List.of("true"));
try {
job.run(args);
}
catch (Exception e) {
e.printStackTrace();
}
String backUpJson = Files.readString(Path.of("backup.json"));
JsonNode jsonNode = mapper.readTree(backUpJson);
String topicName = jsonNode.get("test").get("topics").get("topic-1").get("name").toString();
String topicType = jsonNode.get("test").get("topics").get("topic-1").get("type").toString();
String clientApplicationIdSub = jsonNode.get("test").get("subscriptions").get("123").get("clientApplicationId")
.toString();
String subId = jsonNode.get("test").get("subscriptions").get("123").get("id").toString();
String aorId = jsonNode.get("test").get("application-owner-requests").get("1").get("id").toString();
String aorState = jsonNode.get("test").get("application-owner-requests").get("1").get("state").toString();
String username = jsonNode.get("test").get("application-owner-requests").get("1").get("userName").toString();
String topicNameProd = jsonNode.get("prod").get("topics").get("topic-2").get("name").toString();
String topicTypeProd = jsonNode.get("prod").get("topics").get("topic-2").get("type").toString();
String clientApplicationIdSubProd = jsonNode.get("prod").get("subscriptions").get("12323")
.get("clientApplicationId").toString();
String subIdProd = jsonNode.get("prod").get("subscriptions").get("12323").get("id").toString();
String aorIdProd = jsonNode.get("prod").get("application-owner-requests").get("2").get("id").toString();
String aorStateProd = jsonNode.get("prod").get("application-owner-requests").get("2").get("state").toString();
String usernameProd = jsonNode.get("prod").get("application-owner-requests").get("2").get("userName")
.toString();
// test data
assertEquals("\"topic-1\"", topicName);
assertEquals("\"EVENTS\"", topicType);
assertEquals("\"app-1\"", clientApplicationIdSub);
assertEquals("\"123\"", subId);
assertEquals("\"1\"", aorId);
assertEquals("\"APPROVED\"", aorState);
assertEquals("\"myUser\"", username);
// prod data
assertEquals("\"topic-2\"", topicNameProd);
assertEquals("\"EVENTS\"", topicTypeProd);
assertEquals("\"app-12\"", clientApplicationIdSubProd);
assertEquals("\"12323\"", subIdProd);
assertEquals("\"2\"", aorIdProd);
assertEquals("\"APPROVED\"", aorStateProd);
assertEquals("\"myUser2\"", usernameProd);
}
}
| 10,061 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SchemaCompatibilityValidatorTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/schema/SchemaCompatibilityValidatorTest.java | package com.hermesworld.ais.galapagos.schema;
import com.hermesworld.ais.galapagos.schemas.ConsumerCompatibilityErrorHandler;
import com.hermesworld.ais.galapagos.schemas.IncompatibleSchemaException;
import com.hermesworld.ais.galapagos.schemas.ProducerCompatibilityErrorHandler;
import com.hermesworld.ais.galapagos.schemas.SchemaCompatibilityValidator;
import org.everit.json.schema.Schema;
import org.everit.json.schema.loader.SchemaLoader;
import org.json.JSONObject;
import org.junit.jupiter.api.Test;
import org.springframework.util.StreamUtils;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class SchemaCompatibilityValidatorTest {
@Test
public void testAddAdditionalPropertiesOnObject_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test01a"), readSchema("test01b"));
});
}
@Test
public void testAddRequiredPropertyOnObject_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test02a"), readSchema("test02b"));
}
@Test
public void testRemoveOneOfSchema_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test03a"), readSchema("test03b"));
}
@Test
public void testAddOneOfSchema_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test03a"), readSchema("test03c"));
});
}
@Test
public void testAddArrayRestriction_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test03a"), readSchema("test03d"));
}
@Test
public void testRelaxArrayRestriction_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test03d"), readSchema("test03e"));
});
}
@Test
public void testAddPropertyWithAdditionalProperties_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test04a"), readSchema("test04b"));
}
@Test
public void testAddStringLimits_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test04a"), readSchema("test04c"));
}
@Test
public void testRelaxStringLimits_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test04c"), readSchema("test04d"));
});
}
@Test
public void testRemoveEnumValue_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test05a"), readSchema("test05b"));
}
@Test
public void testAddEnumValue_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test05a"), readSchema("test05c"));
});
}
@Test
public void testNotMoreLiberal_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test06a"), readSchema("test06b"));
}
@Test
public void testNotMoreStrict_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test06a"), readSchema("test06c"));
});
}
@Test
public void testTotallyDifferent_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test01a"), readSchema("test03a"));
});
}
@Test
public void testAnyOfReplacedBySubschema_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test07a"), readSchema("test07b"));
}
@Test
public void testAnyOfReplacedByIncompatibleSchema_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test07a"), readSchema("test07c"));
});
}
@Test
public void testIntegerToNumber_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test08a"), readSchema("test08b"));
});
}
@Test
public void testIntegerStaysInteger_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test08a"), readSchema("test08c"));
}
@Test
public void testNumberToInteger_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test08b"), readSchema("test08a"));
}
@Test
public void testRemoveOptionalWithNoAdditionalProperties_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test09a"), readSchema("test09b"));
}
@Test
public void testRemoveOptionalWithAdditionalProperties_fail() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
verifyConsumerCompatibleTo(readSchema("test09a"), readSchema("test09c"));
});
}
@Test
public void testPatternField_success() throws Exception {
verifyConsumerCompatibleTo(readSchema("test-pattern-field"),
readSchema("test-pattern-field-with-another-prop"));
}
@Test
public void testProducerCompatible_failWithAdditionalProperty() throws Exception {
assertThrows(IncompatibleSchemaException.class, () -> {
ProducerCompatibilityErrorHandler errorHandler = new ProducerCompatibilityErrorHandler(false);
new SchemaCompatibilityValidator(readSchema("test10b"), readSchema("test10a"), errorHandler).validate();
});
}
@Test
public void testProducerCompatible_liberalAdditionalProperty() throws Exception {
ProducerCompatibilityErrorHandler errorHandler = new ProducerCompatibilityErrorHandler(true);
new SchemaCompatibilityValidator(readSchema("test10b"), readSchema("test10a"), errorHandler).validate();
}
@Test
public void testConsumerCompatible_liberalRemoveProperty() throws Exception {
ConsumerCompatibilityErrorHandler errorHandler = new ConsumerCompatibilityErrorHandler(true);
new SchemaCompatibilityValidator(readSchema("test10b"), readSchema("test10a"), errorHandler).validate();
}
private static Schema readSchema(String id) {
try (InputStream in = SchemaCompatibilityValidatorTest.class.getClassLoader()
.getResourceAsStream("schema-compatibility/" + id + ".schema.json")) {
String data = StreamUtils.copyToString(in, StandardCharsets.UTF_8);
JSONObject obj = new JSONObject(data);
return SchemaLoader.load(obj);
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void verifyConsumerCompatibleTo(Schema oldSchema, Schema newSchema)
throws IncompatibleSchemaException {
new SchemaCompatibilityValidator(oldSchema, newSchema, new ConsumerCompatibilityErrorHandler(false)).validate();
}
}
| 7,131 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ChangeBaseTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/changes/impl/ChangeBaseTest.java | package com.hermesworld.ais.galapagos.changes.impl;
import java.beans.PropertyDescriptor;
import java.io.File;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import com.hermesworld.ais.galapagos.changes.ApplyChangeContext;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionState;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.springframework.beans.BeanUtils;
class ChangeBaseTest {
/**
* Tests that each private field in all of the subclasses of ChangeBase have a Getter for all of their private
* fields, because this getter is highly important for correct serialization of the changes. This test makes heavy
* use of Reflection!
*/
@Test
void testGettersForFields() {
String packageName = ChangeBase.class.getPackageName();
ClassLoader cl = ChangeBaseTest.class.getClassLoader();
// get class candidates
File f = new File("target/classes");
f = new File(f, packageName.replace('.', '/'));
File[] classFiles = f.listFiles((dir, name) -> name.endsWith(".class"));
assertNotNull(classFiles);
for (File cf : classFiles) {
String className = packageName + "." + cf.getName().replace(".class", "");
Class<?> clazz;
try {
clazz = cl.loadClass(className);
}
catch (Throwable t) {
// ignore! May be something else
continue;
}
if (ChangeBase.class.isAssignableFrom(clazz) && !clazz.getName().equals(ChangeBase.class.getName())) {
assertGettersForFields(clazz);
}
}
}
private void assertGettersForFields(Class<?> clazz) {
Field[] fields = clazz.getDeclaredFields();
for (Field f : fields) {
int mods = f.getModifiers();
if (Modifier.isPrivate(mods) && !Modifier.isStatic(mods)) {
PropertyDescriptor pd = BeanUtils.getPropertyDescriptor(clazz, f.getName());
if (pd == null || pd.getReadMethod() == null) {
fail("No getter for property " + f.getName() + " in class " + clazz.getName());
}
}
}
}
@Test
void testStageSubscription() throws Exception {
SubscriptionMetadata sub1 = new SubscriptionMetadata();
sub1.setId("123");
sub1.setClientApplicationId("app-1");
sub1.setTopicName("topic-1");
sub1.setState(SubscriptionState.PENDING);
AtomicBoolean createCalled = new AtomicBoolean();
SubscriptionService subscriptionService = mock(SubscriptionService.class);
when(subscriptionService.addSubscription(any(), any(), any(), any()))
.thenThrow(UnsupportedOperationException.class);
when(subscriptionService.addSubscription(any(), any())).then(inv -> {
createCalled.set(true);
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("999");
sub.setState(SubscriptionState.APPROVED);
sub.setTopicName("topic-1");
sub.setClientApplicationId("app-1");
return CompletableFuture.completedFuture(sub);
});
when(subscriptionService.updateSubscriptionState(any(), any(), any()))
.thenThrow(UnsupportedOperationException.class);
ApplyChangeContext context = mock(ApplyChangeContext.class);
when(context.getTargetEnvironmentId()).thenReturn("target");
when(context.getSubscriptionService()).thenReturn(subscriptionService);
ChangeBase.subscribeTopic(sub1).applyTo(context).get();
assertTrue(createCalled.get());
}
}
| 4,138 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ChangesServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/changes/impl/ChangesServiceImplTest.java | package com.hermesworld.ais.galapagos.changes.impl;
import java.time.LocalDate;
import java.time.ZonedDateTime;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.hermesworld.ais.galapagos.changes.ChangeData;
import com.hermesworld.ais.galapagos.changes.ChangeType;
import com.hermesworld.ais.galapagos.events.GalapagosEventContext;
import com.hermesworld.ais.galapagos.events.TopicCreatedEvent;
import com.hermesworld.ais.galapagos.events.TopicEvent;
import com.hermesworld.ais.galapagos.events.TopicSchemaAddedEvent;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.security.AuditPrincipal;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.util.HasKey;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class ChangesServiceImplTest {
private GalapagosEventContext context;
private ChangesServiceImpl impl;
private AuditPrincipal principal;
@BeforeEach
void buildMocks() {
KafkaClusters clusters = mock(KafkaClusters.class);
impl = new ChangesServiceImpl(clusters);
MockCluster cluster = new MockCluster("_test");
when(clusters.getEnvironment("_test")).thenReturn(Optional.of(cluster.getCluster()));
principal = new AuditPrincipal("testuser", "Test User");
context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster.getCluster());
when(context.getContextValue("principal")).thenReturn(Optional.of(principal));
}
@Test
void testChangeListener_createTopic() {
TopicMetadata metadata = buildTopicMetadata();
TopicCreateParams params = new TopicCreateParams(2, 2);
TopicCreatedEvent event = new TopicCreatedEvent(context, metadata, params);
impl.handleTopicCreated(event);
List<ChangeData> log = impl.getChangeLog("_test");
assertEquals(1, log.size());
ChangeData data = log.get(0);
assertEquals(principal.getFullName(), data.getPrincipalFullName());
assertEquals(ChangeType.TOPIC_CREATED, data.getChange().getChangeType());
}
@Test
void testChangeListener_deleteTopic() {
TopicMetadata metadata = buildTopicMetadata();
TopicEvent event = new TopicEvent(context, metadata);
impl.handleTopicDeleted(event);
List<ChangeData> log = impl.getChangeLog("_test");
assertEquals(1, log.size());
ChangeData data = log.get(0);
assertEquals(principal.getFullName(), data.getPrincipalFullName());
assertEquals(ChangeType.TOPIC_DELETED, data.getChange().getChangeType());
}
@Test
void testChangeListener_topicDescriptionChanged() {
TopicMetadata metadata = buildTopicMetadata();
TopicEvent event = new TopicEvent(context, metadata);
impl.handleTopicDescriptionChanged(event);
List<ChangeData> log = impl.getChangeLog("_test");
assertEquals(1, log.size());
ChangeData data = log.get(0);
assertEquals(principal.getFullName(), data.getPrincipalFullName());
assertEquals(ChangeType.TOPIC_DESCRIPTION_CHANGED, data.getChange().getChangeType());
}
@Test
void testChangeListener_topicDeprecated() {
TopicMetadata metadata = buildTopicMetadata();
metadata.setDeprecated(true);
metadata.setDeprecationText("do not use");
metadata.setEolDate(LocalDate.of(2099, 1, 1));
TopicEvent event = new TopicEvent(context, metadata);
impl.handleTopicDeprecated(event);
List<ChangeData> log = impl.getChangeLog("_test");
assertEquals(1, log.size());
ChangeData data = log.get(0);
assertEquals(principal.getFullName(), data.getPrincipalFullName());
assertEquals(ChangeType.TOPIC_DEPRECATED, data.getChange().getChangeType());
}
@Test
void testChangeListener_topicUndeprecated() {
TopicMetadata metadata = buildTopicMetadata();
TopicEvent event = new TopicEvent(context, metadata);
impl.handleTopicUndeprecated(event);
List<ChangeData> log = impl.getChangeLog("_test");
assertEquals(1, log.size());
ChangeData data = log.get(0);
assertEquals(principal.getFullName(), data.getPrincipalFullName());
assertEquals(ChangeType.TOPIC_UNDEPRECATED, data.getChange().getChangeType());
}
@Test
void testChangeListener_topicSchemaVersionPublished() {
TopicMetadata metadata = buildTopicMetadata();
SchemaMetadata schema = new SchemaMetadata();
schema.setId("99");
schema.setCreatedBy("testuser");
schema.setCreatedAt(ZonedDateTime.now());
schema.setJsonSchema("{}");
schema.setSchemaVersion(1);
schema.setTopicName("testtopic");
TopicSchemaAddedEvent event = new TopicSchemaAddedEvent(context, metadata, schema);
impl.handleTopicSchemaAdded(event);
List<ChangeData> log = impl.getChangeLog("_test");
assertEquals(1, log.size());
ChangeData data = log.get(0);
assertEquals(principal.getFullName(), data.getPrincipalFullName());
assertEquals(ChangeType.TOPIC_SCHEMA_VERSION_PUBLISHED, data.getChange().getChangeType());
}
private TopicMetadata buildTopicMetadata() {
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setDescription("Testtopic description");
metadata.setOwnerApplicationId("123");
metadata.setType(TopicType.EVENTS);
return metadata;
}
private static class MockCluster {
private KafkaCluster cluster;
private Map<String, TopicBasedRepositoryMock<?>> repositories = new HashMap<>();
public MockCluster(String id) {
cluster = mock(KafkaCluster.class);
when(cluster.getId()).thenReturn(id);
when(cluster.getRepository(any(), any())).then(inv -> repositories.computeIfAbsent(inv.getArgument(0),
key -> new TopicBasedRepositoryMock<HasKey>()));
}
public KafkaCluster getCluster() {
return cluster;
}
}
}
| 6,758 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ChangeDeserializerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/changes/impl/ChangeDeserializerTest.java | package com.hermesworld.ais.galapagos.changes.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hermesworld.ais.galapagos.changes.Change;
import com.hermesworld.ais.galapagos.changes.ChangeType;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.junit.jupiter.api.Test;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
class ChangeDeserializerTest {
@Test
void testCompoundChangeDeser() throws Exception {
// ChangesDeserializer is registered in the ObjectMapper by this method
ObjectMapper mapper = JsonUtil.newObjectMapper();
TopicMetadata topic = new TopicMetadata();
topic.setName("topic-1");
topic.setOwnerApplicationId("123");
topic.setType(TopicType.EVENTS);
ChangeBase change1 = ChangeBase.createTopic(topic, new TopicCreateParams(2, 1));
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("999");
schema1.setCreatedAt(ZonedDateTime.of(2020, 5, 26, 16, 19, 10, 0, ZoneOffset.UTC));
schema1.setCreatedBy("testuser");
schema1.setJsonSchema("{ }");
schema1.setSchemaVersion(1);
ChangeBase change2 = ChangeBase.publishTopicSchemaVersion("topic-1", schema1);
ChangeBase compound = ChangeBase.compoundChange(change1, List.of(change2));
String json = mapper.writeValueAsString(compound);
ChangeBase deser = (ChangeBase) mapper.readValue(json, Change.class);
assertEquals(ChangeType.COMPOUND_CHANGE, deser.getChangeType());
}
}
| 1,869 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationOwnerRequestTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/request/ApplicationOwnerRequestTest.java | package com.hermesworld.ais.galapagos.request;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.junit.jupiter.api.Test;
class ApplicationOwnerRequestTest {
@Test
void testApplicationOwnerRequestSerializable() throws Exception {
ObjectMapper mapper = JsonUtil.newObjectMapper();
ApplicationOwnerRequest request = new ApplicationOwnerRequest();
request.setApplicationId("{strange\" ÄpplicationId!");
request.setUserName("User");
request.setState(RequestState.APPROVED);
request.setComments("Very long string\nwith windows newlines\r\n");
String json = mapper.writeValueAsString(request);
request = mapper.readValue(json, ApplicationOwnerRequest.class);
assertEquals("{strange\" ÄpplicationId!", request.getApplicationId());
assertEquals("User", request.getUserName());
assertTrue(request.getState() == RequestState.APPROVED);
assertEquals("Very long string\nwith windows newlines\r\n", request.getComments());
}
}
| 1,355 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CaseStrategyConverterBindingIntegrationTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/naming/config/CaseStrategyConverterBindingIntegrationTest.java | package com.hermesworld.ais.galapagos.naming.config;
import com.hermesworld.ais.galapagos.GalapagosTestConfig;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.context.annotation.Import;
import org.springframework.test.context.TestPropertySource;
import static org.junit.jupiter.api.Assertions.assertEquals;
@SpringBootTest
@TestPropertySource(locations = "classpath:test-case-strategies.properties")
@Import(GalapagosTestConfig.class)
class CaseStrategyConverterBindingIntegrationTest {
@Autowired
private NamingConfig config;
@SuppressWarnings("unused")
@MockBean
private KafkaClusters clusters;
@Test
void testConversion() {
assertEquals(CaseStrategy.PASCAL_CASE, config.getNormalizationStrategy());
}
}
| 1,009 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CaseStrategyConverterBindingTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/naming/config/CaseStrategyConverterBindingTest.java | package com.hermesworld.ais.galapagos.naming.config;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
class CaseStrategyConverterBindingTest {
@Test
void testValidValues() {
CaseStrategyConverterBinding converter = new CaseStrategyConverterBinding();
assertEquals(CaseStrategy.PASCAL_CASE, converter.convert("PascalCase"));
assertEquals(CaseStrategy.CAMEL_CASE, converter.convert("camelCase"));
assertEquals(CaseStrategy.KEBAB_CASE, converter.convert("kebab-case"));
assertEquals(CaseStrategy.SNAKE_CASE, converter.convert("SNAKE_CASE"));
assertEquals(CaseStrategy.LOWERCASE, converter.convert("lowercase"));
}
@Test
void testNoCaseConversion() {
assertThrows(IllegalArgumentException.class, () -> {
CaseStrategyConverterBinding converter = new CaseStrategyConverterBinding();
converter.convert("pascalCase");
});
}
@Test
void testNoEnumNameUse() {
assertThrows(IllegalArgumentException.class, () -> {
CaseStrategyConverterBinding converter = new CaseStrategyConverterBinding();
converter.convert("PASCAL_CASE");
});
}
@Test
void testInvalidValue() {
assertThrows(IllegalArgumentException.class, () -> {
CaseStrategyConverterBinding converter = new CaseStrategyConverterBinding();
converter.convert("someValue");
});
}
}
| 1,553 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NamingServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/naming/impl/NamingServiceImplTest.java | package com.hermesworld.ais.galapagos.naming.impl;
import com.hermesworld.ais.galapagos.applications.BusinessCapability;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes;
import com.hermesworld.ais.galapagos.naming.InvalidTopicNameException;
import com.hermesworld.ais.galapagos.naming.config.AdditionNamingRules;
import com.hermesworld.ais.galapagos.naming.config.CaseStrategy;
import com.hermesworld.ais.galapagos.naming.config.NamingConfig;
import com.hermesworld.ais.galapagos.naming.config.TopicNamingConfig;
import com.hermesworld.ais.galapagos.topics.TopicType;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class NamingServiceImplTest {
private AdditionNamingRules rules;
private NamingConfig config;
private KnownApplication app;
private BusinessCapability cap1;
private BusinessCapability cap2;
@BeforeEach
void feedMocks() {
rules = new AdditionNamingRules();
rules.setAllowedSeparators(".");
rules.setAllowPascalCase(true);
rules.setAllowKebabCase(true);
rules.setAllowCamelCase(false);
TopicNamingConfig eventConfig = new TopicNamingConfig();
eventConfig.setNameFormat("de.hlg.events.{business-capability}.{addition}");
eventConfig.setAdditionRules(rules);
config = new NamingConfig();
config.setEvents(eventConfig);
config.setInternalTopicPrefixFormat("{app-or-alias}.internal.");
config.setConsumerGroupPrefixFormat("groups.{app-or-alias}.");
config.setTransactionalIdPrefixFormat("transactions.{app-or-alias}.");
app = mock(KnownApplication.class);
when(app.getName()).thenReturn("Track & Trace");
when(app.getAliases()).thenReturn(Set.of());
cap1 = mock(BusinessCapability.class);
when(cap1.getName()).thenReturn("Orders");
cap2 = mock(BusinessCapability.class);
when(cap2.getName()).thenReturn("Sales");
when(app.getBusinessCapabilities()).thenReturn(List.of(cap1, cap2));
}
@Test
void testAllowInternalTopicNamesForConsumerGroups() {
config.setAllowInternalTopicNamesAsConsumerGroups(true);
NamingServiceImpl service = new NamingServiceImpl(config);
ApplicationPrefixes prefixes = service.getAllowedPrefixes(app);
assertEquals(1, prefixes.getInternalTopicPrefixes().size());
assertEquals(2, prefixes.getConsumerGroupPrefixes().size());
assertTrue(prefixes.getInternalTopicPrefixes().contains("track-trace.internal."));
assertTrue(prefixes.getConsumerGroupPrefixes().contains("track-trace.internal."));
assertTrue(prefixes.getConsumerGroupPrefixes().contains("groups.track-trace."));
config.setAllowInternalTopicNamesAsConsumerGroups(false);
service = new NamingServiceImpl(config);
prefixes = service.getAllowedPrefixes(app);
assertEquals(1, prefixes.getInternalTopicPrefixes().size());
assertEquals(1, prefixes.getConsumerGroupPrefixes().size());
assertTrue(prefixes.getInternalTopicPrefixes().contains("track-trace.internal."));
assertTrue(prefixes.getConsumerGroupPrefixes().contains("groups.track-trace."));
}
@Test
void testGetTopicNameSuggestion() {
NamingServiceImpl service = new NamingServiceImpl(config);
assertEquals("de.hlg.events.orders.my-topic", service.getTopicNameSuggestion(TopicType.EVENTS, app, cap1));
assertEquals("de.hlg.events.sales.my-topic", service.getTopicNameSuggestion(TopicType.EVENTS, app, cap2));
assertEquals("track-trace.internal.my-topic", service.getTopicNameSuggestion(TopicType.INTERNAL, app, null));
// check that different normalization is considered correctly
config.setNormalizationStrategy(CaseStrategy.SNAKE_CASE);
assertEquals("de.hlg.events.ORDERS.MY_TOPIC", service.getTopicNameSuggestion(TopicType.EVENTS, app, cap1));
assertEquals("TRACK_TRACE.internal.MY_TOPIC", service.getTopicNameSuggestion(TopicType.INTERNAL, app, null));
}
@Test
void testGetAllowedPrefixes() {
when(app.getAliases()).thenReturn(Set.of("tt"));
NamingServiceImpl service = new NamingServiceImpl(config);
ApplicationPrefixes prefixes = service.getAllowedPrefixes(app);
assertEquals(2, prefixes.getInternalTopicPrefixes().size());
assertEquals(2, prefixes.getConsumerGroupPrefixes().size());
assertEquals(2, prefixes.getTransactionIdPrefixes().size());
assertTrue(prefixes.getInternalTopicPrefixes().contains("tt.internal."));
assertTrue(prefixes.getInternalTopicPrefixes().contains("track-trace.internal."));
assertTrue(prefixes.getConsumerGroupPrefixes().contains("groups.track-trace."));
assertTrue(prefixes.getConsumerGroupPrefixes().contains("groups.tt."));
assertTrue(prefixes.getTransactionIdPrefixes().contains("transactions.track-trace."));
assertTrue(prefixes.getTransactionIdPrefixes().contains("transactions.tt."));
config.setConsumerGroupPrefixFormat("groups.{application}.");
service = new NamingServiceImpl(config);
prefixes = service.getAllowedPrefixes(app);
assertEquals(2, prefixes.getInternalTopicPrefixes().size());
assertEquals(1, prefixes.getConsumerGroupPrefixes().size());
assertEquals(2, prefixes.getTransactionIdPrefixes().size());
assertTrue(prefixes.getConsumerGroupPrefixes().contains("groups.track-trace."));
}
@Test
void testValidateTopicName() throws InvalidTopicNameException {
NamingServiceImpl service = new NamingServiceImpl(config);
service.validateTopicName("de.hlg.events.orders.my-custom-order", TopicType.EVENTS, app);
service.validateTopicName("de.hlg.events.sales.MyCustomOrder", TopicType.EVENTS, app);
service.validateTopicName("de.hlg.events.orders.My.Custom.Order", TopicType.EVENTS, app);
// must be valid - could be "kebab-case" with only one word
service.validateTopicName("de.hlg.events.sales.mycustomorder", TopicType.EVENTS, app);
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.orders.myCustomOrder", TopicType.EVENTS, app));
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.orders.My-Custom-Order", TopicType.EVENTS, app));
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.orders.MY_CUSTOM_ORDER", TopicType.EVENTS, app));
// wrong prefix
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.orders.my-custom-order", TopicType.EVENTS, app));
// invalid business capability
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.shipment.my-custom-order", TopicType.EVENTS, app));
rules.setAllowPascalCase(false);
rules.setAllowKebabCase(false);
service.validateTopicName("de.hlg.events.orders.mycustomorder", TopicType.EVENTS, app);
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.orders.my-custom-order", TopicType.EVENTS, app));
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.orders.MyCustomOrder", TopicType.EVENTS, app));
rules.setAllowSnakeCase(true);
service.validateTopicName("de.hlg.events.orders.MY_CUSTOM_ORDER", TopicType.EVENTS, app);
expectException(InvalidTopicNameException.class,
() -> service.validateTopicName("de.hlg.events.orders.mycustomorder", TopicType.EVENTS, app));
}
@Test
void testNormalize_simpleCases() {
NamingConfig config = mock(NamingConfig.class);
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.PASCAL_CASE);
NamingServiceImpl service = new NamingServiceImpl(config);
assertEquals("TrackTrace", service.normalize("Track & Trace"));
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.CAMEL_CASE);
assertEquals("trackTrace", service.normalize("Track & Trace"));
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.KEBAB_CASE);
assertEquals("track-trace", service.normalize("Track & Trace"));
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.SNAKE_CASE);
assertEquals("TRACK_TRACE", service.normalize("Track & Trace"));
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.LOWERCASE);
assertEquals("tracktrace", service.normalize("Track & Trace"));
}
@Test
void testNormalize_lead_trail() {
NamingConfig config = mock(NamingConfig.class);
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.KEBAB_CASE);
NamingServiceImpl service = new NamingServiceImpl(config);
assertEquals("lead-space", service.normalize(" Lead Space"));
assertEquals("lead-special", service.normalize("!!Lead Special"));
assertEquals("lead-and-trail-space", service.normalize(" Lead and Trail Space "));
assertEquals("trail-special", service.normalize("Trail Special?# "));
}
@Test
void testNormalize_localization() {
NamingConfig config = mock(NamingConfig.class);
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.KEBAB_CASE);
NamingServiceImpl service = new NamingServiceImpl(config);
assertEquals("neue-auftraege", service.normalize("Neue Aufträge"));
assertEquals("tres-bien", service.normalize("Trés bien"));
assertEquals("neue-aenderungen", service.normalize("Neue »Änderungen«"));
when(config.getNormalizationStrategy()).thenReturn(CaseStrategy.PASCAL_CASE);
assertEquals("NeueAenderungen", service.normalize("Neue änderungen"));
}
// TODO replace with JUnit 5 operator when migrated to JUnit 5
private void expectException(Class<? extends Exception> exceptionClass, ThrowingRunnable code) {
try {
code.run();
fail("Expected exception of type " + exceptionClass.getName() + ", but no exception was thrown");
}
catch (Exception e) {
if (!exceptionClass.isAssignableFrom(e.getClass())) {
e.printStackTrace();
fail("Expected exception of type " + exceptionClass.getName() + ", but caught "
+ e.getClass().getName());
}
}
}
@FunctionalInterface
private interface ThrowingRunnable {
void run() throws Exception;
}
}
| 11,075 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SecurityConfigIntegrationTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/security/SecurityConfigIntegrationTest.java | package com.hermesworld.ais.galapagos.security;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.controller.ApplicationsController;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.security.config.GalapagosSecurityProperties;
import com.hermesworld.ais.galapagos.staging.StagingService;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.security.oauth2.client.servlet.OAuth2ClientAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.boot.test.web.server.LocalServerPort;
import org.springframework.http.*;
import org.springframework.security.oauth2.jwt.Jwt;
import org.springframework.security.oauth2.jwt.JwtDecoder;
import java.net.URI;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
@SpringBootTest(classes = { SecurityConfig.class, ApplicationsController.class,
GalapagosSecurityProperties.class }, webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
@EnableAutoConfiguration(exclude = OAuth2ClientAutoConfiguration.class)
class SecurityConfigIntegrationTest {
@LocalServerPort
private int port;
@Autowired
private TestRestTemplate restTemplate;
@SuppressWarnings("unused")
@MockBean
private ApplicationsService applicationsService;
@SuppressWarnings("unused")
@MockBean
private StagingService stagingService;
@SuppressWarnings("unused")
@MockBean
private KafkaClusters kafkaClusters;
@MockBean
private JwtDecoder jwtDecoder;
@BeforeEach
void initJwtStuff() {
when(jwtDecoder.decode(any())).thenAnswer(inv -> {
String token = inv.getArgument(0);
Map<String, Object> headers = Map.of("alg", "HS256", "typ", "JWT");
Map<String, Object> claims = Map.of("sub", "abc123", "iat", "123", "my_roles", token.replace(".", " "));
return new Jwt(token, Instant.now(), Instant.now().plus(1, ChronoUnit.DAYS), headers, claims);
});
}
@Test
void test_apiAccessProtected() {
ResponseEntity<String> response = restTemplate.getForEntity("http://localhost:" + port + "/api/me/requests",
String.class);
assertEquals(HttpStatus.UNAUTHORIZED.value(), response.getStatusCode().value());
}
@Test
void test_apiAccess_missingUserRole() {
testApiWithRole("/api/me/requests", "NOT_A_USER", HttpStatus.FORBIDDEN.value());
}
@Test
void test_apiAccess_withUserRole() {
testApiWithRole("/api/me/requests", "USER", HttpStatus.OK.value());
}
@Test
void test_apiAccess_adminEndpoint_withUserRole() {
testApiWithRole("/api/admin/requests", "USER", HttpStatus.FORBIDDEN.value());
}
@Test
void test_apiAccess_adminEndpoint_withAdminRole() {
testApiWithRole("/api/admin/requests", "USER.ADMIN", HttpStatus.OK.value());
}
private void testApiWithRole(String endpoint, String roleName, int expectedCode) {
String url = "http://localhost:" + port + endpoint;
HttpHeaders headers = new HttpHeaders();
headers.add(HttpHeaders.AUTHORIZATION, "Bearer " + roleName);
HttpEntity<String> request = new RequestEntity<>(headers, HttpMethod.GET, URI.create(url));
ResponseEntity<String> response = restTemplate.exchange(url, HttpMethod.GET, request, String.class);
assertEquals(expectedCode, response.getStatusCode().value());
}
}
| 4,026 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
OAuthConfigControllerIntegrationTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/security/impl/OAuthConfigControllerIntegrationTest.java | package com.hermesworld.ais.galapagos.security.impl;
import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder;
import com.github.tomakehurst.wiremock.client.WireMock;
import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo;
import com.github.tomakehurst.wiremock.junit5.WireMockTest;
import com.hermesworld.ais.galapagos.security.SecurityConfig;
import com.hermesworld.ais.galapagos.security.config.GalapagosSecurityProperties;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
import org.springframework.boot.autoconfigure.security.oauth2.client.servlet.OAuth2ClientAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.boot.test.web.server.LocalServerPort;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.security.oauth2.jwt.JwtDecoder;
import org.springframework.util.StreamUtils;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.when;
@SpringBootTest(classes = { OAuthConfigController.class, SecurityConfig.class,
GalapagosSecurityProperties.class }, webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
@EnableAutoConfiguration(exclude = OAuth2ClientAutoConfiguration.class)
@WireMockTest
class OAuthConfigControllerIntegrationTest {
@LocalServerPort
private int port;
@Autowired
private TestRestTemplate restTemplate;
@MockBean
private OAuth2ClientProperties oauthProperties;
@MockBean
@SuppressWarnings("unused")
private JwtDecoder jwtDecoder;
@BeforeEach
void initOauthPropertiesAndServer(WireMockRuntimeInfo wireMockInfo) {
WireMock wireMock = wireMockInfo.getWireMock();
wireMock.register(WireMock.get("/auth/realms/galapagos/.well-known/openid-configuration")
.willReturn(okForPlainJson(readOpenidConfig(wireMockInfo.getHttpPort()))));
Map<String, OAuth2ClientProperties.Registration> oauthMap = new HashMap<>();
OAuth2ClientProperties.Registration reg = new OAuth2ClientProperties.Registration();
reg.setClientId("test-webapp");
reg.setProvider("keycloak");
reg.setScope(Set.of("email", "openid", "profile"));
oauthMap.put("keycloak", reg);
Map<String, OAuth2ClientProperties.Provider> providerMap = new HashMap<>();
OAuth2ClientProperties.Provider provider = new OAuth2ClientProperties.Provider();
provider.setIssuerUri("http://localhost:" + wireMockInfo.getHttpPort() + "/auth/realms/galapagos");
providerMap.put("keycloak", provider);
when(oauthProperties.getRegistration()).thenReturn(oauthMap);
when(oauthProperties.getProvider()).thenReturn(providerMap);
}
@Test
void test_getOauthConfig() {
ResponseEntity<String> response = restTemplate.getForEntity("http://localhost:" + port + "/oauth2/config.json",
String.class);
assertTrue(response.getStatusCode().is2xxSuccessful());
JSONObject config = new JSONObject(response.getBody());
assertEquals("test_username", config.get("userNameClaim"));
assertEquals("my_roles", config.get("rolesClaim"));
assertEquals("display_name", config.get("displayNameClaim"));
assertEquals("test-webapp", config.get("clientId"));
}
private String readOpenidConfig(int httpPort) {
try (InputStream in = OAuthConfigControllerIntegrationTest.class.getClassLoader()
.getResourceAsStream("openid-config.json")) {
return StreamUtils.copyToString(in, StandardCharsets.UTF_8).replace("http://keycloak/",
"http://localhost:" + httpPort + "/");
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
private static ResponseDefinitionBuilder okForPlainJson(String jsonSource) {
return ResponseDefinitionBuilder.responseDefinition().withStatus(HTTP_OK).withBody(jsonSource)
.withHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
}
} | 4,837 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CustomLinksConfigTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/uisupport/controller/CustomLinksConfigTest.java | package com.hermesworld.ais.galapagos.uisupport.controller;
import com.hermesworld.ais.galapagos.uisupport.controller.CustomLinkConfig;
import static org.junit.jupiter.api.Assertions.assertThrows;
import com.hermesworld.ais.galapagos.uisupport.controller.CustomLinksConfig;
import com.hermesworld.ais.galapagos.uisupport.controller.LinkType;
import org.junit.jupiter.api.Test;
import java.util.List;
class CustomLinksConfigTest {
@Test
void testCustomLinksConfigID() {
assertThrows(RuntimeException.class, () -> {
CustomLinkConfig customLinkConfig = generatedCustomLinkConfig(null, "www.test.de", "Test-Label",
LinkType.OTHER);
new CustomLinksConfig().setLinks(List.of(customLinkConfig));
});
}
@Test
void testCustomLinksConfigH_Ref() {
assertThrows(RuntimeException.class, () -> {
CustomLinkConfig customLinkConfig = generatedCustomLinkConfig("42", null, "Test-Label", LinkType.OTHER);
new CustomLinksConfig().setLinks(List.of(customLinkConfig));
});
}
@Test
void testCustomLinksConfigLabel() {
assertThrows(RuntimeException.class, () -> {
CustomLinkConfig customLinkConfig = generatedCustomLinkConfig("42", "www.test.de", null, LinkType.OTHER);
new CustomLinksConfig().setLinks(List.of(customLinkConfig));
});
}
@Test
void testCustomLinksConfigLinkType() {
assertThrows(RuntimeException.class, () -> {
CustomLinkConfig customLinkConfig = generatedCustomLinkConfig("42", "www.test.de", "Test-Label", null);
new CustomLinksConfig().setLinks(List.of(customLinkConfig));
});
}
@Test
void testCustomLinkConfigPositive() {
CustomLinkConfig customLinkConfig = generatedCustomLinkConfig("42", "www.test.de", "Test-Label",
LinkType.OTHER);
new CustomLinksConfig().setLinks(List.of(customLinkConfig));
}
private CustomLinkConfig generatedCustomLinkConfig(String id, String href, String label, LinkType linkType) {
CustomLinkConfig resultCustomLinkConfig = new CustomLinkConfig();
resultCustomLinkConfig.setId(id);
resultCustomLinkConfig.setHref(href);
resultCustomLinkConfig.setLabel(label);
resultCustomLinkConfig.setLinkType(linkType);
return resultCustomLinkConfig;
}
}
| 2,404 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
UISupportControllerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/uisupport/controller/UISupportControllerTest.java | package com.hermesworld.ais.galapagos.uisupport.controller;
import com.hermesworld.ais.galapagos.GalapagosTestConfig;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.context.annotation.Import;
import java.util.List;
import static org.junit.jupiter.api.Assertions.*;
@SpringBootTest
@Import(GalapagosTestConfig.class)
class UISupportControllerTest {
@SuppressWarnings("unused")
@MockBean
private KafkaClusters kafkaClusters;
@Autowired
private UISupportController testController;
@Test
void testCustomLinks() {
List<CustomLinkConfig> links = testController.getCustomLinks();
assertNotNull(links);
for (CustomLinkConfig link : links) {
assertNotNull(link.getId());
assertNotNull(link.getHref());
assertFalse(link.getHref().isBlank());
assertNotNull(link.getLabel());
assertFalse(link.getLabel().isBlank());
assertNotNull(link.getLinkType());
}
}
@Test
void testKafkaDoc() {
List<KafkaConfigDescriptionDto> result = new UISupportController(null, null, null, null, null, null, null)
.getSupportedKafkaConfigs();
assertNotNull(result);
assertTrue(result.size() > 10);
assertTrue(result.stream().filter(d -> d.getConfigDescription().length() > 20).count() > 10);
}
}
| 1,626 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
GalapagosEventManagerMock.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/events/GalapagosEventManagerMock.java | package com.hermesworld.ais.galapagos.events;
import static org.mockito.Mockito.mock;
import java.util.ArrayList;
import java.util.List;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.util.FutureUtil;
public class GalapagosEventManagerMock implements GalapagosEventManager {
private List<InvocationOnMock> sinkInvocations = new ArrayList<>();
@Override
public GalapagosEventSink newEventSink(KafkaCluster kafkaCluster) {
Answer<?> logCall = inv -> {
sinkInvocations.add(inv);
return FutureUtil.noop();
};
return mock(GalapagosEventSink.class, logCall);
}
public List<InvocationOnMock> getSinkInvocations() {
return sinkInvocations;
}
}
| 856 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsControllerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/applications/ApplicationsControllerTest.java | package com.hermesworld.ais.galapagos.applications;
import com.hermesworld.ais.galapagos.applications.controller.ApplicationsController;
import com.hermesworld.ais.galapagos.applications.controller.CertificateRequestDto;
import com.hermesworld.ais.galapagos.applications.controller.CertificateResponseDto;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.staging.Staging;
import com.hermesworld.ais.galapagos.staging.StagingResult;
import com.hermesworld.ais.galapagos.staging.StagingService;
import com.hermesworld.ais.galapagos.staging.impl.StagingServiceImpl;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.junit.jupiter.api.Test;
import java.io.OutputStream;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
class ApplicationsControllerTest {
private final ApplicationsService applicationsService = mock(ApplicationsService.class);
private final StagingService stagingService = mock(StagingService.class);
private final KafkaClusters kafkaClusters = mock(KafkaClusters.class);
@Test
void testUpdateApplicationCertificateDependentOnStageName() {
// Arrange
String applicationId = "testapp-1";
String environmentId = "devtest";
CertificateRequestDto certificateRequestDto = new CertificateRequestDto();
certificateRequestDto.setGenerateKey(true);
KnownApplication knownApp = mock(KnownApplication.class);
when(knownApp.getName()).thenReturn("TestApp");
KafkaEnvironmentConfig kafkaEnvironmentConfig = mock(KafkaEnvironmentConfig.class);
ApplicationsController controller = new ApplicationsController(applicationsService, stagingService,
kafkaClusters);
when(applicationsService.getKnownApplication(any())).thenReturn(Optional.of(knownApp));
when(applicationsService.isUserAuthorizedFor(any())).thenReturn(true);
when(kafkaClusters.getEnvironmentMetadata(environmentId)).thenReturn(Optional.of(kafkaEnvironmentConfig));
when(applicationsService.registerApplicationOnEnvironment(any(), any(), any(), any())).then(inv -> {
OutputStream os = inv.getArgument(3);
os.write(new byte[] { 1, 2, 3, 4 });
os.flush();
return FutureUtil.noop();
});
// Act
CertificateResponseDto testee = controller.updateApplicationCertificate(applicationId, environmentId,
certificateRequestDto);
// Assert
assertEquals("testapp_devtest.p12", testee.getFileName());
}
@Test
void testStagingWithoutSchema_include_failure() throws Exception {
TopicService topicService = mock(TopicService.class);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
KafkaEnvironmentConfig env1 = mock(KafkaEnvironmentConfig.class);
KafkaEnvironmentConfig env2 = mock(KafkaEnvironmentConfig.class);
when(env1.getId()).thenReturn("dev");
when(env2.getId()).thenReturn("test");
List<? extends KafkaEnvironmentConfig> ls = List.of(env1, env2);
doReturn(ls).when(kafkaClusters).getEnvironmentsMetadata();
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("app1.internal.topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("app1.internal.topic-2");
topic2.setOwnerApplicationId("app-1");
topic2.setType(TopicType.EVENTS);
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("schema-1");
schema1.setTopicName("app1.internal.topic-2");
schema1.setJsonSchema("{ }");
schema1.setSchemaVersion(1);
ApplicationMetadata appMetadata = new ApplicationMetadata();
appMetadata.setApplicationId("app-1");
appMetadata.setInternalTopicPrefixes(List.of("app1.internal."));
when(topicService.listTopics("dev")).thenReturn(List.of(topic1, topic2));
when(topicService.getTopicSchemaVersions("dev", "app1.internal.topic-2")).thenReturn(List.of(schema1));
when(topicService.buildTopicCreateParams("dev", "app1.internal.topic-2"))
.thenReturn(CompletableFuture.completedFuture(new TopicCreateParams(2, 1)));
when(topicService.createTopic(any(), any(), any(), any())).thenReturn(CompletableFuture.completedFuture(null));
when(topicService.addTopicSchemaVersion(any(), any(), any()))
.thenReturn(CompletableFuture.completedFuture(null));
when(applicationsService.isUserAuthorizedFor("app-1")).thenReturn(true);
when(applicationsService.getApplicationMetadata("test", "app-1")).thenReturn(Optional.of(appMetadata));
StagingService stagingService = new StagingServiceImpl(kafkaClusters, applicationsService, topicService,
subscriptionService);
ApplicationsController controller = new ApplicationsController(applicationsService, stagingService,
kafkaClusters);
Staging staging = controller.describeStaging("dev", "app-1");
assertEquals(2, staging.getChanges().size());
// must not succeed for first topic because no schema for API topic
List<StagingResult> result = controller.performStaging("dev", "app-1",
JsonUtil.newObjectMapper().writeValueAsString(staging.getChanges()));
assertEquals(2, result.size());
assertFalse(result.get(0).isStagingSuccessful());
assertTrue(result.get(1).isStagingSuccessful());
}
}
| 6,311 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsControllerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/applications/controller/ApplicationsControllerTest.java | package com.hermesworld.ais.galapagos.applications.controller;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.staging.StagingService;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.Optional;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class ApplicationsControllerTest {
private ApplicationsService applicationsService;
private StagingService stagingService;
private KafkaClusters kafkaClusters;
@BeforeEach
void feedMocks() {
applicationsService = mock(ApplicationsService.class);
stagingService = mock(StagingService.class);
kafkaClusters = mock(KafkaClusters.class);
}
@Test
void getRegisteredApplications_knownAppMissing() {
ApplicationsController controller = new ApplicationsController(applicationsService, stagingService,
kafkaClusters);
// WHEN we have one application registered, but not "known"
ApplicationMetadata existingApp = new ApplicationMetadata();
existingApp.setApplicationId("ex1");
ApplicationMetadata nonExistingApp = new ApplicationMetadata();
nonExistingApp.setApplicationId("nex1");
List<ApplicationMetadata> metas = List.of(existingApp, nonExistingApp);
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(metas);
KnownApplication kapp = mock(KnownApplication.class);
when(kapp.getName()).thenReturn("Existing App");
when(kapp.getId()).thenReturn("ex1");
when(applicationsService.getKnownApplication("ex1")).thenReturn(Optional.of(kapp));
// nex1 is "unknown"! (e.g. due to errorneous app import)
when(applicationsService.getKnownApplication("nex1")).thenReturn(Optional.empty());
List<KnownApplicationDto> regApps = controller.getRegisteredApplications("test");
assertEquals(1, regApps.size());
assertEquals("ex1", regApps.get(0).getId());
}
}
| 2,337 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsServiceRequestStatesImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/applications/impl/ApplicationsServiceRequestStatesImplTest.java | package com.hermesworld.ais.galapagos.applications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.events.GalapagosEventManager;
import com.hermesworld.ais.galapagos.events.GalapagosEventSink;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.ConnectedKafkaClusters;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.util.TimeService;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ApplicationsServiceRequestStatesImplTest {
private static final String testUserName = "Alice";
private static final String testAppId = "1";
private final GalapagosEventManager eventManager = mock(GalapagosEventManager.class);
private KafkaClusters kafkaEnvironments;
private CurrentUserService currentUserService;
private final TopicBasedRepositoryMock<ApplicationOwnerRequest> repository = new TopicBasedRepositoryMock<>();
@BeforeEach
void feedCommonMocks() {
currentUserService = mock(CurrentUserService.class);
when(currentUserService.getCurrentUserName()).thenReturn(Optional.of(testUserName));
KnownApplicationImpl knownApp = new KnownApplicationImpl(testAppId, "App1");
TopicBasedRepositoryMock<KnownApplicationImpl> appRepository = new TopicBasedRepositoryMock<>();
appRepository.save(knownApp);
when(eventManager.newEventSink(any())).thenReturn(mock(GalapagosEventSink.class));
kafkaEnvironments = mock(ConnectedKafkaClusters.class);
when(kafkaEnvironments.getGlobalRepository("application-owner-requests", ApplicationOwnerRequest.class))
.thenReturn(repository);
when(kafkaEnvironments.getGlobalRepository("known-applications", KnownApplicationImpl.class))
.thenReturn(appRepository);
}
@Test
public void testFromNothingToSubmitted() throws Exception {
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
applicationServiceImpl.submitApplicationOwnerRequest(testAppId, "Moin, bin neu hier.");
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertNotNull(savedRequests.get(0).getId());
assertEquals(RequestState.SUBMITTED, savedRequests.get(0).getState());
}
@Test
public void testFromSubmittedToSubmitted() throws Throwable {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.SUBMITTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
try {
ApplicationOwnerRequest appOwnReq = applicationServiceImpl
.submitApplicationOwnerRequest(applicationOwnerRequest.getApplicationId(), "").get();
assertEquals(RequestState.SUBMITTED, appOwnReq.getState());
assertEquals(applicationOwnerRequest.getId(), appOwnReq.getId());
}
catch (ExecutionException e) {
throw e.getCause();
}
}
@Test
public void testFromSubmittedToRejected() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.SUBMITTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
appsServImpl.updateApplicationOwnerRequest(applicationOwnerRequest.getId(), RequestState.REJECTED);
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.REJECTED, savedRequests.get(0).getState());
}
@Test
public void testFromSubmittedToDeletion() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.SUBMITTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
applicationServiceImpl.cancelUserApplicationOwnerRequest(applicationOwnerRequest.getId());
assertTrue(repository.getObject(applicationOwnerRequest.getId()).isEmpty());
}
@Test
public void testFromSubmittedToApproved() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.SUBMITTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
appsServImpl.updateApplicationOwnerRequest(applicationOwnerRequest.getId(), RequestState.APPROVED);
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.APPROVED, savedRequests.get(0).getState());
}
@Test
public void testFromApprovedToSubmitted() throws Throwable {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.APPROVED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
try {
ApplicationOwnerRequest appOwnReq = applicationServiceImpl
.submitApplicationOwnerRequest(applicationOwnerRequest.getApplicationId(), "").get();
assertEquals(RequestState.APPROVED, appOwnReq.getState());
assertEquals(applicationOwnerRequest.getId(), appOwnReq.getId());
}
catch (ExecutionException e) {
throw e.getCause();
}
}
@Test
public void testFromApprovedToResigned() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.APPROVED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
applicationServiceImpl.cancelUserApplicationOwnerRequest(applicationOwnerRequest.getId());
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.RESIGNED, savedRequests.get(0).getState());
}
@Test
public void testFromApprovedToRevoked() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.APPROVED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
appsServImpl.updateApplicationOwnerRequest(applicationOwnerRequest.getId(), RequestState.REVOKED);
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.REVOKED, savedRequests.get(0).getState());
}
@Test
public void testFromResignToSubmit() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.RESIGNED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
applicationServiceImpl.submitApplicationOwnerRequest(applicationOwnerRequest.getApplicationId(), "");
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.SUBMITTED, savedRequests.get(0).getState());
}
@Test
public void testFromRevokedToApproved() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.REVOKED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
appsServImpl.updateApplicationOwnerRequest(applicationOwnerRequest.getId(), RequestState.APPROVED);
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.APPROVED, savedRequests.get(0).getState());
}
@Test
public void testFromRevokedToSubmit() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneId.systemDefault());
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.REVOKED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
applicationServiceImpl.submitApplicationOwnerRequest(applicationOwnerRequest.getApplicationId(), "");
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.SUBMITTED, savedRequests.get(0).getState());
}
@Test
public void testFromRejectedToApproved() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.REJECTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
appsServImpl.updateApplicationOwnerRequest(applicationOwnerRequest.getId(), RequestState.APPROVED);
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.APPROVED, savedRequests.get(0).getState());
}
@Test
public void testFromRejectedToSubmitted() throws Exception {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.REJECTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaEnvironments,
currentUserService, mock(TimeService.class), mock(NamingService.class), eventManager);
applicationServiceImpl.submitApplicationOwnerRequest(applicationOwnerRequest.getApplicationId(), "");
List<ApplicationOwnerRequest> savedRequests = repository.getObjects().stream().collect(Collectors.toList());
assertEquals(1, savedRequests.size());
assertEquals(applicationOwnerRequest.getId(), savedRequests.get(0).getId());
assertEquals(RequestState.SUBMITTED, savedRequests.get(0).getState());
}
@Test
public void testImpossibleTransitionRevoked() throws Throwable {
assertThrows(IllegalStateException.class, () -> {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.REVOKED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
try {
appsServImpl.cancelUserApplicationOwnerRequest(applicationOwnerRequest.getId()).get();
}
catch (ExecutionException e) {
throw e.getCause();
}
});
}
@Test
public void testImpossibleTransitionRejected() throws Throwable {
assertThrows(IllegalStateException.class, () -> {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.REJECTED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
try {
appsServImpl.cancelUserApplicationOwnerRequest(applicationOwnerRequest.getId()).get();
}
catch (ExecutionException e) {
throw e.getCause();
}
});
}
@Test
public void testImpossibleTransitionResigned() throws Throwable {
assertThrows(IllegalStateException.class, () -> {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = createRequest(RequestState.RESIGNED, now);
repository.save(applicationOwnerRequest);
ApplicationsServiceImpl appsServImpl = new ApplicationsServiceImpl(kafkaEnvironments, currentUserService,
mock(TimeService.class), mock(NamingService.class), eventManager);
try {
appsServImpl.cancelUserApplicationOwnerRequest(applicationOwnerRequest.getId()).get();
}
catch (ExecutionException e) {
throw e.getCause();
}
});
}
private static ApplicationOwnerRequest createRequest(RequestState reqState, ZonedDateTime createdAt) {
ApplicationOwnerRequest dao = new ApplicationOwnerRequest();
dao.setApplicationId(testAppId);
dao.setCreatedAt(createdAt);
dao.setId(UUID.randomUUID().toString());
dao.setUserName(testUserName);
dao.setState(reqState);
return dao;
}
}
| 17,667 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
UpdateApplicationAclsListenerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/applications/impl/UpdateApplicationAclsListenerTest.java | package com.hermesworld.ais.galapagos.applications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.events.*;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaUser;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.util.AclSupport;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatchers;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.function.BiFunction;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class UpdateApplicationAclsListenerTest {
@Mock
private KafkaClusters kafkaClusters;
@Mock
private KafkaAuthenticationModule authenticationModule;
@Mock
private ApplicationsService applicationsService;
@Mock
private SubscriptionService subscriptionService;
@Mock
private KafkaCluster cluster;
@Mock
private AclSupport aclSupport;
private AclBinding dummyBinding;
@BeforeEach
void feedMocks() {
when(cluster.getId()).thenReturn("_test");
lenient().when(kafkaClusters.getEnvironment("_test")).thenReturn(Optional.of(cluster));
lenient().when(kafkaClusters.getAuthenticationModule("_test")).thenReturn(Optional.of(authenticationModule));
dummyBinding = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "testtopic", PatternType.LITERAL),
new AccessControlEntry("me", "*", AclOperation.ALL, AclPermissionType.ALLOW));
BiFunction<AclBinding, String, AclBinding> withName = (binding, name) -> new AclBinding(binding.pattern(),
new AccessControlEntry(name, binding.entry().host(), binding.entry().operation(),
binding.entry().permissionType()));
lenient().when(aclSupport.getRequiredAclBindings(any(), any(), any(), anyBoolean()))
.then(inv -> Set.of(withName.apply(dummyBinding, inv.getArgument(2))));
}
@Test
void testUpdateApplicationAcls() throws InterruptedException, ExecutionException {
when(authenticationModule
.extractKafkaUserName(ArgumentMatchers.argThat(obj -> obj.getString("dn").equals("CN=testapp"))))
.thenReturn("User:CN=testapp");
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
lenient().when(cluster.removeUserAcls(any())).thenThrow(UnsupportedOperationException.class);
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app01");
metadata.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=testapp")).toString());
metadata.setConsumerGroupPrefixes(List.of("group.myapp.", "group2.myapp."));
metadata.setInternalTopicPrefixes(List.of("de.myapp.", "de.myapp2."));
metadata.setTransactionIdPrefixes(List.of("de.myapp."));
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
ApplicationEvent event = new ApplicationEvent(context, metadata);
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
listener.handleApplicationRegistered(event).get();
ArgumentCaptor<KafkaUser> captor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(captor.capture());
assertEquals("User:CN=testapp", captor.getValue().getKafkaUserName());
Collection<AclBinding> createdAcls = captor.getValue().getRequiredAclBindings();
assertEquals(1, createdAcls.size());
assertEquals("User:CN=testapp", createdAcls.iterator().next().entry().principal());
}
@Test
void testHandleTopicCreated() throws ExecutionException, InterruptedException {
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
when(cluster.getId()).thenReturn("_test");
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
TopicMetadata topic = new TopicMetadata();
topic.setName("topic1");
topic.setType(TopicType.EVENTS);
topic.setOwnerApplicationId("producer1");
ApplicationMetadata producer1 = new ApplicationMetadata();
producer1.setApplicationId("producer1");
producer1.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=producer1")).toString());
when(applicationsService.getApplicationMetadata("_test", "producer1")).thenReturn(Optional.of(producer1));
when(authenticationModule
.extractKafkaUserName(ArgumentMatchers.argThat(obj -> obj.getString("dn").equals("CN=producer1"))))
.thenReturn("User:CN=producer1");
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
TopicCreatedEvent event = new TopicCreatedEvent(context, topic, new TopicCreateParams(1, 3));
listener.handleTopicCreated(event).get();
ArgumentCaptor<KafkaUser> captor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(captor.capture());
assertEquals("User:CN=producer1", captor.getValue().getKafkaUserName());
}
@Test
void testHandleAddProducer() throws ExecutionException, InterruptedException {
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
when(cluster.getId()).thenReturn("_test");
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
ApplicationMetadata producer1 = new ApplicationMetadata();
producer1.setApplicationId("producer1");
producer1.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=producer1")).toString());
when(applicationsService.getApplicationMetadata("_test", "producer1")).thenReturn(Optional.of(producer1));
when(authenticationModule
.extractKafkaUserName(ArgumentMatchers.argThat(obj -> obj.getString("dn").equals("CN=producer1"))))
.thenReturn("User:CN=producer1");
TopicAddProducerEvent event = new TopicAddProducerEvent(context, "producer1", new TopicMetadata());
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
listener.handleAddTopicProducer(event).get();
ArgumentCaptor<KafkaUser> captor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(captor.capture());
assertEquals("User:CN=producer1", captor.getValue().getKafkaUserName());
Collection<AclBinding> createdAcls = captor.getValue().getRequiredAclBindings();
assertEquals(1, createdAcls.size());
assertEquals("User:CN=producer1", createdAcls.iterator().next().entry().principal());
}
@Test
void testHandleRemoveProducer() throws ExecutionException, InterruptedException {
// more or less, this is same as add() - updateUserAcls for the removed producer must be called.
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
when(cluster.getId()).thenReturn("_test");
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
ApplicationMetadata producer1 = new ApplicationMetadata();
producer1.setApplicationId("producer1");
producer1.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=producer1")).toString());
when(applicationsService.getApplicationMetadata("_test", "producer1")).thenReturn(Optional.of(producer1));
when(authenticationModule
.extractKafkaUserName(ArgumentMatchers.argThat(obj -> obj.getString("dn").equals("CN=producer1"))))
.thenReturn("User:CN=producer1");
TopicRemoveProducerEvent event = new TopicRemoveProducerEvent(context, "producer1", new TopicMetadata());
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
listener.handleRemoveTopicProducer(event).get();
ArgumentCaptor<KafkaUser> captor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(captor.capture());
assertEquals("User:CN=producer1", captor.getValue().getKafkaUserName());
}
@Test
void testSubscriptionCreated() throws ExecutionException, InterruptedException {
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
when(cluster.getId()).thenReturn("_test");
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app01");
app1.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=testapp")).toString());
when(applicationsService.getApplicationMetadata("_test", "app01")).thenReturn(Optional.of(app1));
when(authenticationModule
.extractKafkaUserName(ArgumentMatchers.argThat(obj -> obj.getString("dn").equals("CN=testapp"))))
.thenReturn("User:CN=testapp");
SubscriptionMetadata metadata = new SubscriptionMetadata();
metadata.setClientApplicationId("app01");
metadata.setTopicName("topic1");
SubscriptionEvent event = new SubscriptionEvent(context, metadata);
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
listener.handleSubscriptionCreated(event).get();
ArgumentCaptor<KafkaUser> captor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(captor.capture());
assertEquals("User:CN=testapp", captor.getValue().getKafkaUserName());
}
@Test
void testNoDeleteAclsWhenUserNameIsSame() throws Exception {
// tests that, when an AuthenticationChanged event occurs but the resulting Kafka User Name is the same, the
// listener does not delete the ACLs of the user after updating them (because that would result in zero ACLs).
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app-1");
metadata.setAuthenticationJson("{ }");
JSONObject oldAuth = new JSONObject();
JSONObject newAuth = new JSONObject();
ApplicationAuthenticationChangeEvent event = new ApplicationAuthenticationChangeEvent(context, metadata,
oldAuth, newAuth);
KafkaAuthenticationModule authModule = mock(KafkaAuthenticationModule.class);
when(authModule.extractKafkaUserName(any())).thenReturn("User:JohnDoe");
when(kafkaClusters.getAuthenticationModule("_test")).thenReturn(Optional.of(authModule));
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
lenient().when(cluster.removeUserAcls(any())).thenReturn(FutureUtil.noop());
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
listener.handleApplicationAuthenticationChanged(event).get();
verify(cluster).updateUserAcls(any());
verify(cluster, times(0)).removeUserAcls(any());
}
@Test
void testNoDeleteAclsWhenNoPreviousUser() throws Exception {
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app-1");
metadata.setAuthenticationJson("{\"foo\": \"bar\" }");
JSONObject newAuth = new JSONObject(metadata.getAuthenticationJson());
ApplicationAuthenticationChangeEvent event = new ApplicationAuthenticationChangeEvent(context, metadata,
new JSONObject(), newAuth);
KafkaAuthenticationModule authModule = mock(KafkaAuthenticationModule.class);
when(authModule.extractKafkaUserName(argThat(arg -> arg != null && arg.has("foo")))).thenReturn("User:JohnDoe");
when(authModule.extractKafkaUserName(argThat(arg -> arg == null || !arg.has("foo")))).thenReturn(null);
when(kafkaClusters.getAuthenticationModule("_test")).thenReturn(Optional.of(authModule));
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
lenient().when(cluster.removeUserAcls(any())).thenReturn(FutureUtil.noop());
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
listener.handleApplicationAuthenticationChanged(event).get();
verify(cluster).updateUserAcls(any());
verify(cluster, times(0)).removeUserAcls(any());
}
@Test
void testNoApplicationAclUpdates() throws Exception {
// GIVEN a configuration where noUpdateApplicationAcls flag is active
KafkaEnvironmentConfig config = mock(KafkaEnvironmentConfig.class);
when(config.isNoUpdateApplicationAcls()).thenReturn(true);
when(kafkaClusters.getEnvironmentMetadata("_test")).thenReturn(Optional.of(config));
lenient().when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
lenient().when(cluster.removeUserAcls(any())).thenReturn(FutureUtil.noop());
GalapagosEventContext context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
TopicMetadata topic = new TopicMetadata();
topic.setName("topic1");
topic.setType(TopicType.EVENTS);
topic.setOwnerApplicationId("producer1");
ApplicationMetadata producer1 = new ApplicationMetadata();
producer1.setApplicationId("producer1");
producer1.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=producer1")).toString());
when(applicationsService.getApplicationMetadata("_test", "producer1")).thenReturn(Optional.of(producer1));
SubscriptionMetadata subscription = new SubscriptionMetadata();
subscription.setClientApplicationId("producer1");
subscription.setTopicName("topic1");
when(subscriptionService.getSubscriptionsForTopic("_test", "topic1", true)).thenReturn(List.of(subscription));
UpdateApplicationAclsListener listener = new UpdateApplicationAclsListener(kafkaClusters, subscriptionService,
applicationsService, aclSupport);
// WHEN any permission-related event happens
listener.handleApplicationRegistered(new ApplicationEvent(context, producer1)).get();
listener.handleTopicCreated(new TopicCreatedEvent(context, topic, new TopicCreateParams(1, 3))).get();
listener.handleTopicDeleted(new TopicEvent(context, topic)).get();
listener.handleAddTopicProducer(new TopicAddProducerEvent(context, "producer1", topic)).get();
listener.handleRemoveTopicProducer(new TopicRemoveProducerEvent(context, "producer1", topic)).get();
listener.handleSubscriptionCreated(new SubscriptionEvent(context, subscription)).get();
listener.handleSubscriptionUpdated(new SubscriptionEvent(context, subscription)).get();
listener.handleSubscriptionDeleted(new SubscriptionEvent(context, subscription)).get();
listener.handleApplicationAuthenticationChanged(new ApplicationAuthenticationChangeEvent(context, producer1,
new JSONObject(Map.of("dn", "CN=producer1")), new JSONObject(Map.of("dn", "CN=producer1")))).get();
listener.handleApplicationAuthenticationChanged(new ApplicationAuthenticationChangeEvent(context, producer1,
new JSONObject(Map.of("dn", "CN=producer2")), new JSONObject(Map.of("dn", "CN=producer1")))).get();
listener.handleTopicSubscriptionApprovalRequiredFlagChanged(new TopicEvent(context, topic)).get();
// THEN NONE of these functions may change application ACLs
verify(cluster, times(0)).updateUserAcls(any());
verify(cluster, times(0)).removeUserAcls(any());
// BUT every handler really checked the config
verify(kafkaClusters, times(11)).getEnvironmentMetadata("_test");
}
}
| 18,258 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ApplicationsServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/applications/impl/ApplicationsServiceImplTest.java | package com.hermesworld.ais.galapagos.applications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.events.GalapagosEventManagerMock;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.naming.ApplicationPrefixes;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.naming.config.CaseStrategy;
import com.hermesworld.ais.galapagos.naming.config.NamingConfig;
import com.hermesworld.ais.galapagos.naming.impl.NamingServiceImpl;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.util.TimeService;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.invocation.InvocationOnMock;
import java.io.ByteArrayOutputStream;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.*;
class ApplicationsServiceImplTest {
private KafkaClusters kafkaClusters;
private final TopicBasedRepository<ApplicationOwnerRequest> requestRepository = new TopicBasedRepositoryMock<>();
private final TopicBasedRepository<KnownApplicationImpl> knownApplicationRepository = new TopicBasedRepositoryMock<>();
private final TopicBasedRepository<ApplicationMetadata> applicationMetadataRepository = new TopicBasedRepositoryMock<>();
private final TopicBasedRepository<ApplicationMetadata> applicationMetadataRepository2 = new TopicBasedRepositoryMock<>();
private KafkaAuthenticationModule authenticationModule;
@BeforeEach
void feedMocks() {
kafkaClusters = mock(KafkaClusters.class);
when(kafkaClusters.getGlobalRepository("application-owner-requests", ApplicationOwnerRequest.class))
.thenReturn(requestRepository);
when(kafkaClusters.getGlobalRepository("known-applications", KnownApplicationImpl.class))
.thenReturn(knownApplicationRepository);
KafkaCluster testCluster = mock(KafkaCluster.class);
when(testCluster.getRepository("application-metadata", ApplicationMetadata.class))
.thenReturn(applicationMetadataRepository);
KafkaCluster testCluster2 = mock(KafkaCluster.class);
when(testCluster2.getRepository("application-metadata", ApplicationMetadata.class))
.thenReturn(applicationMetadataRepository2);
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(kafkaClusters.getEnvironment("test2")).thenReturn(Optional.of(testCluster2));
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test", "test2"));
authenticationModule = mock(KafkaAuthenticationModule.class);
CreateAuthenticationResult authResult = new CreateAuthenticationResult(
new JSONObject(Map.of("testentry", true)), new byte[] { 1 });
when(authenticationModule.createApplicationAuthentication(eq("quattro-1"), eq("quattro"), any()))
.thenReturn(CompletableFuture.completedFuture(authResult));
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(kafkaClusters.getAuthenticationModule("test2")).thenReturn(Optional.of(authenticationModule));
}
@Test
void testRemoveOldRequests() {
List<ApplicationOwnerRequest> daos = new ArrayList<>();
ZonedDateTime createdAt = ZonedDateTime.of(LocalDateTime.of(2019, 1, 1, 10, 0), ZoneId.systemDefault());
ZonedDateTime statusChange1 = ZonedDateTime.of(LocalDateTime.of(2019, 3, 1, 10, 0), ZoneId.systemDefault());
ZonedDateTime statusChange2 = ZonedDateTime.of(LocalDateTime.of(2019, 3, 1, 15, 0), ZoneId.systemDefault());
ZonedDateTime statusChange3 = ZonedDateTime.of(LocalDateTime.of(2019, 2, 28, 15, 0), ZoneId.systemDefault());
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2019, 3, 31, 14, 0), ZoneId.systemDefault());
ApplicationOwnerRequest dao = createRequestDao("req1", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange1);
dao.setState(RequestState.APPROVED);
daos.add(dao);
dao = createRequestDao("req2", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange1);
dao.setState(RequestState.REJECTED);
daos.add(dao);
dao = createRequestDao("req3", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange2);
dao.setState(RequestState.REJECTED);
daos.add(dao);
dao = createRequestDao("req4", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange3);
dao.setState(RequestState.REVOKED);
daos.add(dao);
dao = createRequestDao("req5", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange3);
dao.setState(RequestState.SUBMITTED);
daos.add(dao);
dao = createRequestDao("req6", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange1);
dao.setState(RequestState.RESIGNED);
daos.add(dao);
dao = createRequestDao("req7", createdAt, "testuser");
dao.setLastStatusChangeAt(statusChange2);
dao.setState(RequestState.RESIGNED);
daos.add(dao);
daos.forEach(requestRepository::save);
ApplicationsServiceImpl service = new ApplicationsServiceImpl(kafkaClusters, mock(CurrentUserService.class),
() -> now, mock(NamingService.class), new GalapagosEventManagerMock());
service.removeOldRequests();
assertTrue(requestRepository.getObject("req2").isEmpty());
assertTrue(requestRepository.getObject("req4").isEmpty());
assertTrue(requestRepository.getObject("req6").isEmpty());
assertFalse(requestRepository.getObject("req1").isEmpty());
assertFalse(requestRepository.getObject("req3").isEmpty());
assertFalse(requestRepository.getObject("req5").isEmpty());
assertFalse(requestRepository.getObject("req7").isEmpty());
}
@Test
void testKnownApplicationAfterSubmitting() {
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 3, 25, 10, 0), ZoneOffset.UTC);
String testUserName = "test";
String appId = "42";
ApplicationOwnerRequest appOwnReq = createRequestWithApplicationID(appId, "1", RequestState.SUBMITTED, now,
testUserName);
CurrentUserService currentUserService = mock(CurrentUserService.class);
when(currentUserService.getCurrentUserName()).thenReturn(Optional.of(appOwnReq.getUserName()));
KnownApplicationImpl knownApp = new KnownApplicationImpl(appOwnReq.getApplicationId(), "App1");
KnownApplicationImpl knownApp2 = new KnownApplicationImpl("43", "App2");
knownApplicationRepository.save(knownApp);
knownApplicationRepository.save(knownApp2);
requestRepository.save(appOwnReq);
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaClusters, currentUserService,
mock(TimeService.class), mock(NamingService.class), new GalapagosEventManagerMock());
List<? extends KnownApplication> result = applicationServiceImpl.getKnownApplications(true);
for (KnownApplication resultKnownApp : result) {
assertNotEquals(appId, resultKnownApp.getId());
}
assertEquals(1, result.size());
assertEquals("43", result.get(0).getId());
}
@Test
void testReplaceCertificate_appChanges() throws Exception {
// Application changed e.g. in external Architecture system
KnownApplicationImpl app = new KnownApplicationImpl("quattro-1", "Quattro");
app.setAliases(List.of("q2"));
knownApplicationRepository.save(app).get();
// But is already registered with Alias Q1 and associated rights
ApplicationMetadata appl = new ApplicationMetadata();
appl.setApplicationId("quattro-1");
appl.setInternalTopicPrefixes(List.of("quattro.internal.", "q1.internal."));
appl.setConsumerGroupPrefixes(List.of("groups.quattro.", "groups.q1."));
appl.setTransactionIdPrefixes(List.of("quattro.internal.", "q1.internal."));
applicationMetadataRepository.save(appl).get();
// The NamingService would return new rights
ApplicationPrefixes newPrefixes = mock(ApplicationPrefixes.class);
when(newPrefixes.combineWith(any())).thenCallRealMethod();
when(newPrefixes.getInternalTopicPrefixes()).thenReturn(List.of("quattro.internal.", "q2.internal."));
when(newPrefixes.getConsumerGroupPrefixes()).thenReturn(List.of("groups.quattro.", "groups.q2."));
when(newPrefixes.getTransactionIdPrefixes()).thenReturn(List.of("quattro.internal.", "q2.internal."));
NamingService namingService = mock(NamingService.class);
when(namingService.getAllowedPrefixes(any())).thenReturn(newPrefixes);
when(namingService.normalize("Quattro")).thenReturn("quattro");
GalapagosEventManagerMock eventManagerMock = new GalapagosEventManagerMock();
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaClusters,
mock(CurrentUserService.class), mock(TimeService.class), namingService, eventManagerMock);
applicationServiceImpl
.registerApplicationOnEnvironment("test", "quattro-1", new JSONObject(), new ByteArrayOutputStream())
.get();
// noinspection OptionalGetWithoutIsPresent
appl = applicationMetadataRepository.getObject("quattro-1").get();
assertTrue(new JSONObject(appl.getAuthenticationJson()).getBoolean("testentry"));
// resulting rights must contain BOTH old and new prefixes
assertTrue(appl.getInternalTopicPrefixes().contains("quattro.internal."));
assertTrue(appl.getInternalTopicPrefixes().contains("q1.internal."));
assertTrue(appl.getInternalTopicPrefixes().contains("q2.internal."));
assertTrue(appl.getTransactionIdPrefixes().contains("quattro.internal."));
assertTrue(appl.getTransactionIdPrefixes().contains("q1.internal."));
assertTrue(appl.getTransactionIdPrefixes().contains("q2.internal."));
assertTrue(appl.getConsumerGroupPrefixes().contains("groups.quattro."));
assertTrue(appl.getConsumerGroupPrefixes().contains("groups.q1."));
assertTrue(appl.getConsumerGroupPrefixes().contains("groups.q2."));
// also check (while we are here) that event has fired for update
List<InvocationOnMock> invs = eventManagerMock.getSinkInvocations();
assertEquals(1, invs.size());
assertEquals("handleApplicationAuthenticationChanged", invs.get(0).getMethod().getName());
}
@Test
void testRegisterNewFiresEvent() throws Exception {
KnownApplicationImpl app = new KnownApplicationImpl("quattro-1", "Quattro");
app.setAliases(List.of("q2"));
knownApplicationRepository.save(app).get();
GalapagosEventManagerMock eventManagerMock = new GalapagosEventManagerMock();
NamingService namingService = buildNamingService();
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaClusters,
mock(CurrentUserService.class), mock(TimeService.class), namingService, eventManagerMock);
applicationServiceImpl
.registerApplicationOnEnvironment("test", "quattro-1", new JSONObject(), new ByteArrayOutputStream())
.get();
List<InvocationOnMock> invs = eventManagerMock.getSinkInvocations();
assertEquals(1, invs.size());
assertEquals("handleApplicationRegistered", invs.get(0).getMethod().getName());
}
@Test
void testPrefix() throws Exception {
KnownApplicationImpl app = new KnownApplicationImpl("quattro-1", "Quattro");
app.setAliases(List.of("q2"));
knownApplicationRepository.save(app).get();
NamingService namingService = buildNamingService_forStagePrefixes();
GalapagosEventManagerMock eventManagerMock = new GalapagosEventManagerMock();
ApplicationMetadata appl = new ApplicationMetadata();
appl.setApplicationId("quattro-1");
appl.setInternalTopicPrefixes(List.of("quattro.internal.", "q2.internal."));
applicationMetadataRepository.save(appl).get();
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaClusters,
mock(CurrentUserService.class), mock(TimeService.class), namingService, eventManagerMock);
applicationServiceImpl
.registerApplicationOnEnvironment("test", "quattro-1", new JSONObject(), new ByteArrayOutputStream())
.get();
app.setAliases(List.of("q3"));
knownApplicationRepository.save(app).get();
ApplicationMetadata appl2 = applicationServiceImpl
.registerApplicationOnEnvironment("test2", "quattro-1", new JSONObject(), new ByteArrayOutputStream())
.get();
assertEquals(Set.of("quattro.internal.", "q2.internal.", "q3.internal."),
new HashSet<>(appl2.getInternalTopicPrefixes()));
}
@Test
void testExtendCertificate() throws Exception {
// TODO move to CertificeAuthenticationModuleTest
// KnownApplicationImpl app = new KnownApplicationImpl("quattro-1", "Quattro");
// knownApplicationRepository.save(app).get();
//
// NamingService namingService = mock(NamingService.class);
// ApplicationPrefixes prefixes = mock(ApplicationPrefixes.class);
// when(namingService.getAllowedPrefixes(any())).then(inv -> {
// KnownApplication appl = inv.getArgument(0);
// assertEquals("quattro-1", appl.getId());
// return prefixes;
// });
// when(prefixes.combineWith(any())).thenCallRealMethod();
//
// when(prefixes.getInternalTopicPrefixes()).thenReturn(List.of("quattro.internal."));
// when(prefixes.getConsumerGroupPrefixes()).thenReturn(List.of("test.group.quattro."));
// when(prefixes.getTransactionIdPrefixes()).thenReturn(List.of("quattro.internal."));
//
// X509Certificate cert = mock(X509Certificate.class);
// when(cert.getNotAfter()).thenReturn(new Date());
//
// CaManager caMan = mock(CaManager.class);
// when(kafkaClusters.getCaManager("test")).thenReturn(Optional.of(caMan));
// when(caMan.extendApplicationCertificate(any(), any())).then(inv -> CompletableFuture
// .completedFuture(new CertificateSignResult(cert, "", inv.getArgument(0), new byte[0])));
//
// ApplicationsServiceImpl service = new ApplicationsServiceImpl(kafkaClusters, mock(CurrentUserService.class),
// mock(TimeService.class), namingService, new GalapagosEventManagerMock());
//
// String testCsrData = StreamUtils.copyToString(
// new ClassPathResource("/certificates/test_quattroExtend.csr").getInputStream(), StandardCharsets.UTF_8);
//
// ApplicationMetadata appl = new ApplicationMetadata();
// appl.setApplicationId("quattro-1");
// appl.setDn("CN=quattro,OU=certification_12345");
// appl.setInternalTopicPrefixes(List.of("quattro.internal."));
// applicationMetadataRepository.save(appl).get();
//
// ByteArrayOutputStream baos = new ByteArrayOutputStream();
// ApplicationMetadata meta = service
// .createApplicationCertificateFromCsr("test", "quattro-1", testCsrData, true, baos).get();
// assertEquals("CN=quattro,OU=certification_12345", meta.getDn());
// assertEquals("CN=quattro,OU=certification_12345",
// applicationMetadataRepository.getObject("quattro-1").map(o -> o.getDn()).orElseThrow());
}
@Test
void testUpdateAuthentication() throws Exception {
// WHEN an already registered application is re-registered on an environment...
KnownApplicationImpl app = new KnownApplicationImpl("quattro-1", "Quattro");
knownApplicationRepository.save(app).get();
ApplicationMetadata appl = new ApplicationMetadata();
appl.setApplicationId("quattro-1");
appl.setAuthenticationJson("{}");
appl.setInternalTopicPrefixes(List.of("quattro.internal.", "q1.internal."));
appl.setConsumerGroupPrefixes(List.of("groups.quattro.", "groups.q1."));
appl.setTransactionIdPrefixes(List.of("quattro.internal.", "q1.internal."));
applicationMetadataRepository.save(appl).get();
ApplicationsServiceImpl applicationServiceImpl = new ApplicationsServiceImpl(kafkaClusters,
mock(CurrentUserService.class), mock(TimeService.class), buildNamingService(),
new GalapagosEventManagerMock());
CreateAuthenticationResult authResult = new CreateAuthenticationResult(new JSONObject(), new byte[] { 1 });
when(authenticationModule.createApplicationAuthentication(any(), any(), any()))
.thenThrow(new IllegalStateException(
"createApplicationAuthentication() should not be called for already registered application"));
when(authenticationModule.updateApplicationAuthentication(any(), any(), any(), any()))
.thenReturn(CompletableFuture.completedFuture(authResult));
applicationServiceImpl
.registerApplicationOnEnvironment("test", "quattro-1", new JSONObject(), new ByteArrayOutputStream())
.get();
// THEN updateApplicationAuthentication instead of create... must be used by the implementation.
verify(authenticationModule).updateApplicationAuthentication(eq("quattro-1"), any(), any(), any());
}
private static ApplicationOwnerRequest createRequestDao(String id, ZonedDateTime createdAt, String userName) {
ApplicationOwnerRequest dao = new ApplicationOwnerRequest();
dao.setCreatedAt(createdAt);
dao.setId(id);
dao.setUserName(userName);
dao.setState(RequestState.SUBMITTED);
return dao;
}
private static ApplicationOwnerRequest createRequestWithApplicationID(String appID, String id,
RequestState reqState, ZonedDateTime createdAt, String userName) {
ApplicationOwnerRequest dao = new ApplicationOwnerRequest();
dao.setApplicationId(appID);
dao.setCreatedAt(createdAt);
dao.setId(id);
dao.setUserName(userName);
dao.setState(reqState);
return dao;
}
private static NamingService buildNamingService() {
NamingService namingService = mock(NamingService.class);
ApplicationPrefixes prefixes = mock(ApplicationPrefixes.class);
when(namingService.getAllowedPrefixes(any())).then(inv -> {
KnownApplication appl = inv.getArgument(0);
assertEquals("quattro-1", appl.getId());
return prefixes;
});
when(prefixes.combineWith(any())).thenCallRealMethod();
when(prefixes.getInternalTopicPrefixes()).thenReturn(List.of("quattro.internal."));
when(prefixes.getConsumerGroupPrefixes()).thenReturn(List.of("test.group.quattro."));
when(prefixes.getTransactionIdPrefixes()).thenReturn(List.of("quattro.internal."));
when(namingService.normalize("Quattro")).thenReturn("quattro");
return namingService;
}
private static NamingService buildNamingService_forStagePrefixes() {
NamingConfig config = new NamingConfig();
config.setConsumerGroupPrefixFormat("{app-or-alias}.group.");
config.setInternalTopicPrefixFormat("{app-or-alias}.internal.");
config.setTransactionalIdPrefixFormat("{app-or-alias}.tx.");
config.setNormalizationStrategy(CaseStrategy.KEBAB_CASE);
return new NamingServiceImpl(config);
}
}
| 20,847 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CnUtilTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/util/CnUtilTest.java | package com.hermesworld.ais.galapagos.util;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
class CnUtilTest {
@Test
void testToAppCn() {
assertEquals("alpha", CertificateUtil.toAppCn("ALPHA"));
assertEquals("track_trace", CertificateUtil.toAppCn("Track & Trace"));
assertEquals("elisa", CertificateUtil.toAppCn(" Elisa "));
assertEquals("elisa", CertificateUtil.toAppCn(" &!Elisa"));
assertEquals("track_trace", CertificateUtil.toAppCn("track_trace"));
assertEquals("ebay_shipping_client", CertificateUtil.toAppCn("Ebay Shipping Client"));
}
}
| 660 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaSenderImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaSenderImplTest.java | package com.hermesworld.ais.galapagos.kafka.impl;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.junit.jupiter.api.Test;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory;
class KafkaSenderImplTest {
private static ThreadFactory tfDecoupled = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "decoupled-" + System.currentTimeMillis());
}
};
private static KafkaExecutorFactory executorFactory = () -> {
return Executors.newSingleThreadExecutor(tfDecoupled);
};
@Test
void testSendDecoupling() throws Exception {
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(executorFactory);
@SuppressWarnings("unchecked")
Producer<String, String> producer = mock(Producer.class);
when(producer.send(any(), any())).then(inv -> {
return CompletableFuture.runAsync(() -> {
try {
Thread.sleep(200);
}
catch (InterruptedException e) {
}
Callback cb = inv.getArgument(1);
cb.onCompletion(new RecordMetadata(new TopicPartition("a", 0), 0, 0, 0, null, 0, 0), null);
});
});
ProducerFactory<String, String> factory = () -> {
return producer;
};
KafkaTemplate<String, String> template = new KafkaTemplate<String, String>(factory);
KafkaSenderImpl sender = new KafkaSenderImpl(template, decoupler);
StringBuilder threadName = new StringBuilder();
sender.send("a", "b", "c").thenApply(o -> threadName.append(Thread.currentThread().getName())).get();
assertTrue(threadName.toString().startsWith("decoupled-"));
}
}
| 2,444 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicBasedRepositoryImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/impl/TopicBasedRepositoryImplTest.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.kafka.KafkaSender;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.json.JSONObject;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class TopicBasedRepositoryImplTest {
private final KafkaSender sender = mock(KafkaSender.class);
private final ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1);
@AfterEach
void shutdownExecutor() throws Exception {
executorService.shutdown();
executorService.awaitTermination(1, TimeUnit.SECONDS);
}
@Test
void testMessageReceived() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
assertEquals(ApplicationMetadata.class, repository.getValueClass());
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app-1");
metadata.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=app1")).toString());
JSONObject val = new JSONObject(JsonUtil.newObjectMapper().writeValueAsString(metadata));
JSONObject obj = new JSONObject();
obj.put("obj", val);
repository.messageReceived("galapagos.testtopic", "app-1", obj.toString());
assertEquals(1, repository.getObjects().size());
assertTrue(repository.containsObject("app-1"));
assertEquals("CN=app1",
new JSONObject(repository.getObject("app-1").orElseThrow().getAuthenticationJson()).getString("dn"));
}
@Test
void testMessageReceived_wrongTopic() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app-1");
JSONObject val = new JSONObject(JsonUtil.newObjectMapper().writeValueAsString(metadata));
JSONObject obj = new JSONObject();
obj.put("obj", val);
repository.messageReceived("testtopic", "app-1", obj.toString());
assertEquals(0, repository.getObjects().size());
assertFalse(repository.containsObject("app-1"));
}
@Test
void testWaitForInitialization_emptyRepository() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
long startTime = System.currentTimeMillis();
CompletableFuture<Void> future = repository.waitForInitialization(Duration.ofMillis(200),
Duration.ofMillis(100), executorService);
future.get();
assertTrue(System.currentTimeMillis() >= startTime + 300,
"Implementation only waited " + (System.currentTimeMillis() - startTime) + " ms instead of 300 ms");
assertFalse(System.currentTimeMillis() >= startTime + 1000);
}
@Test
void testWaitForInitialization_positive() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
long startTime = System.currentTimeMillis();
executorService.schedule(() -> repository.messageReceived("galapagos.testtopic", "key", "{\"deleted\": true}"),
250, TimeUnit.MILLISECONDS);
CompletableFuture<Void> future = repository.waitForInitialization(Duration.ofMillis(200),
Duration.ofMillis(100), executorService);
future.get();
assertTrue(System.currentTimeMillis() >= startTime + 350,
"Implementation only waited " + (System.currentTimeMillis() - startTime) + " ms instead of 350 ms");
assertFalse(System.currentTimeMillis() >= startTime + 1000);
}
@Test
void testWaitForInitialization_tooLateDataStart() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
long startTime = System.currentTimeMillis();
executorService.schedule(() -> repository.messageReceived("galapagos.testtopic", "key", "{\"deleted\": true}"),
350, TimeUnit.MILLISECONDS);
CompletableFuture<Void> future = repository.waitForInitialization(Duration.ofMillis(200),
Duration.ofMillis(100), executorService);
future.get();
assertFalse(System.currentTimeMillis() >= startTime + 349, "Repository waited too long");
}
@Test
void testWaitForInitialization_tooLateData() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
long startTime = System.currentTimeMillis();
executorService.schedule(() -> repository.messageReceived("galapagos.testtopic", "key", "{\"deleted\": true}"),
250, TimeUnit.MILLISECONDS);
executorService.schedule(() -> repository.messageReceived("galapagos.testtopic", "key", "{\"deleted\": true}"),
400, TimeUnit.MILLISECONDS);
CompletableFuture<Void> future = repository.waitForInitialization(Duration.ofMillis(200),
Duration.ofMillis(100), executorService);
future.get();
assertTrue(System.currentTimeMillis() >= startTime + 350,
"Implementation only waited " + (System.currentTimeMillis() - startTime) + " ms instead of 350 ms");
assertFalse(System.currentTimeMillis() >= startTime + 399, "Repository waited too long");
}
@Test
void testGetTopicName() {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
assertEquals("testtopic", repository.getTopicName());
}
@Test
void testSave() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
AtomicReference<String> topicName = new AtomicReference<>();
AtomicReference<String> key = new AtomicReference<>();
AtomicReference<String> message = new AtomicReference<>();
when(sender.send(any(), any(), any())).then(inv -> {
topicName.set(inv.getArgument(0));
key.set(inv.getArgument(1));
message.set(inv.getArgument(2));
return FutureUtil.noop();
});
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app-1");
repository.save(metadata).get();
assertEquals("galapagos.testtopic", topicName.get());
assertEquals("app-1", key.get());
JSONObject msg = new JSONObject(message.get());
assertEquals("app-1", JsonUtil.newObjectMapper()
.readValue(msg.getJSONObject("obj").toString(), ApplicationMetadata.class).getApplicationId());
}
@Test
void testDelete() throws Exception {
TopicBasedRepositoryImpl<ApplicationMetadata> repository = new TopicBasedRepositoryImpl<>("galapagos.testtopic",
"testtopic", ApplicationMetadata.class, sender);
AtomicReference<String> topicName = new AtomicReference<>();
AtomicReference<String> key = new AtomicReference<>();
AtomicReference<String> message = new AtomicReference<>();
when(sender.send(any(), any(), any())).then(inv -> {
topicName.set(inv.getArgument(0));
key.set(inv.getArgument(1));
message.set(inv.getArgument(2));
return FutureUtil.noop();
});
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app-1");
repository.delete(metadata).get();
assertEquals("galapagos.testtopic", topicName.get());
assertEquals("app-1", key.get());
JSONObject msg = new JSONObject(message.get());
assertTrue(msg.getBoolean("deleted"));
}
}
| 9,116 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
KafkaFutureDecouplerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/impl/KafkaFutureDecouplerTest.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.kafka.KafkaException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.jupiter.api.Assertions.*;
class KafkaFutureDecouplerTest {
private static final ThreadFactory tfAdminClient = r -> new Thread(r, "admin-client-" + System.currentTimeMillis());
private static final ThreadFactory tfDecoupled = r -> new Thread(r, "decoupled-" + System.currentTimeMillis());
private static final KafkaExecutorFactory adminClientExecutorFactory = () -> Executors
.newSingleThreadExecutor(tfAdminClient);
private static final KafkaExecutorFactory executorFactory = () -> Executors.newSingleThreadExecutor(tfDecoupled);
private AdminClientStub adminClient;
@BeforeEach
void initAdminClient() {
adminClient = new AdminClientStub();
adminClient.setKafkaThreadFactory(tfAdminClient);
}
@Test
void testDecoupling_kafkaFuture() throws Exception {
// first, test that the futures usually would complete on our Threads
AtomicBoolean onAdminClientThread = new AtomicBoolean();
adminClient.describeCluster().thenApply(c -> {
onAdminClientThread.set(Thread.currentThread().getName().startsWith("admin-client-"));
return null;
}).get();
assertTrue(onAdminClientThread.get());
// after decoupling, future should complete on another Thread
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(executorFactory);
onAdminClientThread.set(false);
decoupler.toCompletableFuture(adminClient.describeCluster()).thenCompose(o -> {
onAdminClientThread.set(Thread.currentThread().getName().startsWith("admin-client-"));
return CompletableFuture.completedFuture(null);
}).get();
assertFalse(onAdminClientThread.get());
}
@Test
void testDecoupling_completableFuture() throws Exception {
AtomicBoolean onAdminClientThread = new AtomicBoolean();
// after decoupling, future should complete on another Thread
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(executorFactory);
CompletableFuture<?> future = CompletableFuture.runAsync(() -> {
try {
Thread.sleep(200);
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
}, adminClientExecutorFactory.newExecutor());
decoupler.toCompletableFuture(future).thenCompose(o -> {
onAdminClientThread.set(Thread.currentThread().getName().startsWith("admin-client-"));
return CompletableFuture.completedFuture(null);
}).get();
assertFalse(onAdminClientThread.get(), "Future was not decoupled; completion stage ran on admin client Thread");
}
@Test
void testDecoupling_concatenation() throws Exception {
List<String> threadNames = new ArrayList<String>();
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(executorFactory);
decoupler.toCompletableFuture(adminClient.describeCluster()).thenCompose(o -> {
threadNames.add(Thread.currentThread().getName());
return decoupler.toCompletableFuture(adminClient.createAcls(
List.of(new AclBinding(new ResourcePattern(ResourceType.TOPIC, "test", PatternType.LITERAL),
new AccessControlEntry("testuser", "*", AclOperation.ALL, AclPermissionType.ALLOW)))));
}).thenApply(o -> {
threadNames.add(Thread.currentThread().getName());
return null;
}).get();
for (String tn : threadNames) {
assertTrue(tn.startsWith("decoupled-"));
}
// must be two different Threads!
assertEquals(2, new HashSet<>(threadNames).size());
}
@Test
void testDecoupling_doneFuture() throws Exception {
AtomicInteger factoryInvocations = new AtomicInteger();
KafkaExecutorFactory countingExecutorFactory = () -> {
factoryInvocations.incrementAndGet();
return Executors.newSingleThreadExecutor(tfDecoupled);
};
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(countingExecutorFactory);
KafkaFuture<?> future = adminClient.describeCluster();
future.get();
AtomicBoolean applyInvoked = new AtomicBoolean();
decoupler.toCompletableFuture(future).thenApply(o -> applyInvoked.getAndSet(true)).get();
assertTrue(applyInvoked.get());
assertEquals(0, factoryInvocations.get());
}
@Test
void testDecoupling_failingFuture() throws Exception {
AtomicInteger factoryInvocations = new AtomicInteger();
KafkaExecutorFactory countingExecutorFactory = () -> {
factoryInvocations.incrementAndGet();
return Executors.newSingleThreadExecutor(tfDecoupled);
};
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(countingExecutorFactory);
adminClient.setFailOnDescribeCluster(true);
KafkaFuture<?> future = adminClient.describeCluster();
try {
decoupler.toCompletableFuture(future).whenComplete((t, ex) -> {
}).get();
fail("Decoupled future should have failed");
}
catch (ExecutionException e) {
assertEquals(1, factoryInvocations.get());
assertTrue(e.getCause() instanceof KafkaException);
}
}
@Test
void testDecoupling_failedFuture_direct() throws Exception {
AtomicInteger factoryInvocations = new AtomicInteger();
KafkaExecutorFactory countingExecutorFactory = () -> {
factoryInvocations.incrementAndGet();
return Executors.newSingleThreadExecutor(tfDecoupled);
};
KafkaFutureDecoupler decoupler = new KafkaFutureDecoupler(countingExecutorFactory);
adminClient.setFailOnDescribeCluster(true);
KafkaFuture<?> future = adminClient.describeCluster();
try {
future.get();
fail("Future should have failed");
}
catch (ExecutionException e) {
// OK, Future is failed now
}
try {
decoupler.toCompletableFuture(future).get();
fail("Decoupled future should have failed");
}
catch (ExecutionException e) {
assertEquals(0, factoryInvocations.get());
assertTrue(e.getCause() instanceof KafkaException);
}
}
}
| 7,436 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
AdminClientStub.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/impl/AdminClientStub.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaClusterAdminClient;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.springframework.kafka.KafkaException;
import java.util.*;
import java.util.concurrent.ThreadFactory;
public class AdminClientStub implements KafkaClusterAdminClient {
private final List<AclBinding> aclBindings = new ArrayList<>();
private final List<NewTopic> topics = new ArrayList<>();
private ThreadFactory kafkaThreadFactory;
private boolean failOnDescribeCluster;
public List<AclBinding> getAclBindings() {
return aclBindings;
}
public List<NewTopic> getTopics() {
return topics;
}
public void setKafkaThreadFactory(ThreadFactory kafkaThreadFactory) {
this.kafkaThreadFactory = kafkaThreadFactory;
}
public void setFailOnDescribeCluster(boolean failOnDescribeCluster) {
this.failOnDescribeCluster = failOnDescribeCluster;
}
@Override
public KafkaFuture<Void> createTopic(NewTopic topic) {
topics.add(topic);
return completedFuture(null);
}
@Override
public KafkaFuture<Void> deleteTopic(String topicName) {
topics.removeIf(t -> topicName.equals(t.name()));
return completedFuture(null);
}
@Override
public KafkaFuture<TopicDescription> describeTopic(String topicName) {
if (topics.stream().anyMatch(t -> topicName.equals(t.name()))) {
return completedFuture(new TopicDescription(topicName, false, List.of()));
}
return completedFuture(null);
}
@Override
public KafkaFuture<Collection<Node>> describeCluster() {
Node node = new Node(1, "localhost", 1);
if (failOnDescribeCluster) {
return failingFuture(new KafkaException("Kafka failed"));
}
return completedFuture(List.of(node));
}
@Override
public KafkaFuture<Collection<AclBinding>> describeAcls(AclBindingFilter filter) {
List<AclBinding> matches = this.aclBindings.stream().filter(filter::matches).toList();
return completedFuture(matches);
}
@Override
public KafkaFuture<Void> createAcls(Collection<AclBinding> bindings) {
this.aclBindings.addAll(bindings);
return completedFuture(null);
}
@Override
public KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters) {
Set<AclBinding> removes = new HashSet<>();
filters.forEach(filter -> this.aclBindings.stream().filter(filter::matches).forEach(removes::add));
this.aclBindings.removeAll(removes);
return completedFuture(removes);
}
@Override
public KafkaFuture<Config> describeConfigs(ConfigResource resource) {
return completedFuture(null);
}
@Override
public KafkaFuture<Void> incrementalAlterConfigs(ConfigResource resource, Map<String, String> configValues) {
return completedFuture(null);
}
private <T> KafkaFuture<T> completedFuture(T value) {
if (kafkaThreadFactory != null) {
KafkaFutureImpl<T> result = new KafkaFutureImpl<>();
Runnable r = () -> {
// to force that callers receive a non-completed future, we have to spend some time here
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
return;
}
result.complete(value);
};
Thread t = kafkaThreadFactory.newThread(r);
t.start();
return result;
}
return KafkaFuture.completedFuture(value);
}
private <T> KafkaFuture<T> failingFuture(Throwable ex) {
KafkaFutureImpl<T> result = new KafkaFutureImpl<>();
Runnable r = () -> {
// to force that callers receive a non-completed future, we have to spend some time here
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
return;
}
result.completeExceptionally(ex);
};
Thread t = kafkaThreadFactory != null ? kafkaThreadFactory.newThread(r) : new Thread(r);
t.start();
return result;
}
}
| 4,721 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicBasedRepositoryMock.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/impl/TopicBasedRepositoryMock.java | package com.hermesworld.ais.galapagos.kafka.impl;
import java.time.Duration;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledExecutorService;
import javax.annotation.CheckReturnValue;
import com.hermesworld.ais.galapagos.kafka.util.TopicBasedRepository;
import com.hermesworld.ais.galapagos.util.HasKey;
public class TopicBasedRepositoryMock<T extends HasKey> implements TopicBasedRepository<T> {
private final String topicName;
private final Class<T> valueClass;
private final Map<String, T> data = new HashMap<>();
public TopicBasedRepositoryMock() {
this("unknown-topic", null);
}
public TopicBasedRepositoryMock(String topicName, Class<T> valueClass) {
this.topicName = topicName;
this.valueClass = valueClass;
}
@Override
public String getTopicName() {
return topicName;
}
@Override
public Class<T> getValueClass() {
return valueClass;
}
@Override
public boolean containsObject(String id) {
return data.containsKey(id);
}
@Override
public Optional<T> getObject(String id) {
return Optional.ofNullable(data.get(id));
}
@Override
public Collection<T> getObjects() {
return data.values();
}
@Override
@CheckReturnValue
public CompletableFuture<Void> save(T value) {
data.put(value.key(), value);
return CompletableFuture.completedFuture(null);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> delete(T value) {
data.remove(value.key());
return CompletableFuture.completedFuture(null);
}
@Override
@CheckReturnValue
public CompletableFuture<Void> waitForInitialization(Duration initialWaitTime, Duration idleTime,
ScheduledExecutorService executorService) {
return CompletableFuture.completedFuture(null);
}
}
| 2,031 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConnectedKafkaClusterTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/impl/ConnectedKafkaClusterTest.java | package com.hermesworld.ais.galapagos.kafka.impl;
import com.hermesworld.ais.galapagos.kafka.KafkaExecutorFactory;
import com.hermesworld.ais.galapagos.kafka.KafkaUser;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.acl.*;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executors;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
class ConnectedKafkaClusterTest {
@Test
void testUpdateAcls() throws Exception {
List<AclBindingFilter> deletedAcls = new ArrayList<>();
List<AclBinding> createdAcls = new ArrayList<>();
AdminClientStub adminClient = new AdminClientStub() {
@Override
public KafkaFuture<Void> createAcls(Collection<AclBinding> acls) {
createdAcls.addAll(acls);
return super.createAcls(acls);
}
@Override
public KafkaFuture<Collection<AclBinding>> deleteAcls(Collection<AclBindingFilter> filters) {
deletedAcls.addAll(filters);
return super.deleteAcls(filters);
}
};
AclBinding toRemove = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "topic1", PatternType.LITERAL),
new AccessControlEntry("User:CN=testuser", "*", AclOperation.ALL, AclPermissionType.ALLOW));
AclBinding toKeep = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "topic2", PatternType.LITERAL),
new AccessControlEntry("User:CN=testuser", "*", AclOperation.ALL, AclPermissionType.ALLOW));
AclBinding toCreate = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "topic3", PatternType.LITERAL),
new AccessControlEntry("User:CN=testuser", "*", AclOperation.ALL, AclPermissionType.ALLOW));
adminClient.getAclBindings().add(toRemove);
adminClient.getAclBindings().add(toKeep);
KafkaExecutorFactory executorFactory = () -> Executors.newSingleThreadExecutor();
KafkaFutureDecoupler futureDecoupler = new KafkaFutureDecoupler(executorFactory);
@SuppressWarnings("unchecked")
ConnectedKafkaCluster cluster = new ConnectedKafkaCluster("_test", mock(KafkaRepositoryContainer.class),
adminClient, mock(KafkaConsumerFactory.class), futureDecoupler);
cluster.updateUserAcls(new KafkaUser() {
@Override
public Collection<AclBinding> getRequiredAclBindings() {
return Set.of(toKeep, toCreate);
}
@Override
public String getKafkaUserName() {
return "User:CN=testuser";
}
}).get();
assertEquals(1, deletedAcls.size());
assertTrue(deletedAcls.get(0).matches(toRemove));
assertFalse(deletedAcls.get(0).matches(toKeep));
assertEquals(1, createdAcls.size());
assertTrue(createdAcls.contains(toCreate));
}
}
| 3,225 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
AclSupportTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/kafka/util/AclSupportTest.java | package com.hermesworld.ais.galapagos.kafka.util;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.kafka.config.DefaultAclConfig;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentsConfig;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.*;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class AclSupportTest {
private static final List<AclOperationAndType> WRITE_TOPIC_OPERATIONS = Arrays.asList(
new AclOperationAndType(AclOperation.ALL, AclPermissionType.ALLOW),
new AclOperationAndType(AclOperation.DELETE, AclPermissionType.DENY));
private static final List<AclOperationAndType> READ_TOPIC_OPERATIONS = Arrays.asList(
new AclOperationAndType(AclOperation.READ, AclPermissionType.ALLOW),
new AclOperationAndType(AclOperation.DESCRIBE_CONFIGS, AclPermissionType.ALLOW));
@Mock
private KafkaEnvironmentsConfig kafkaConfig;
@Mock
private TopicService topicService;
@Mock
private SubscriptionService subscriptionService;
@BeforeEach
void initMocks() {
}
@Test
void testGetRequiredAclBindings_simple() {
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app01");
metadata.setConsumerGroupPrefixes(List.of("group.myapp.", "group2.myapp."));
metadata.setInternalTopicPrefixes(List.of("de.myapp.", "de.myapp2."));
metadata.setTransactionIdPrefixes(List.of("de.myapp."));
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic1");
topic1.setType(TopicType.EVENTS);
topic1.setOwnerApplicationId("app01");
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("topic2");
topic2.setType(TopicType.EVENTS);
topic2.setOwnerApplicationId("app02");
TopicMetadata topic3 = new TopicMetadata();
topic3.setName("topic3");
topic3.setType(TopicType.EVENTS);
topic3.setOwnerApplicationId("app02");
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("1");
sub.setClientApplicationId("app01");
sub.setTopicName("topic2");
when(topicService.listTopics("_test")).thenReturn(List.of(topic1, topic2));
when(topicService.getTopic("_test", "topic2")).thenReturn(Optional.of(topic2));
when(subscriptionService.getSubscriptionsOfApplication("_test", "app01", false)).thenReturn(List.of(sub));
AclSupport aclSupport = new AclSupport(kafkaConfig, topicService, subscriptionService);
Collection<AclBinding> acls = aclSupport.getRequiredAclBindings("_test", metadata, "User:CN=testapp", false);
assertEquals(9, acls.size());
// two ACL for groups and two for topic prefixes must have been created
assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.PREFIXED
&& binding.pattern().name().equals("de.myapp.")
&& binding.entry().operation().equals(AclOperation.ALL))
.findAny().orElse(null));
assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.PREFIXED
&& binding.pattern().name().equals("de.myapp2.")
&& binding.entry().operation().equals(AclOperation.ALL))
.findAny().orElse(null));
assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.GROUP
&& binding.pattern().patternType() == PatternType.PREFIXED
&& binding.pattern().name().equals("group.myapp."))
.findAny().orElse(null));
assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.GROUP
&& binding.pattern().patternType() == PatternType.PREFIXED
&& binding.pattern().name().equals("group2.myapp."))
.findAny().orElse(null));
// transaction ACLs must have been created
assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.TRANSACTIONAL_ID
&& binding.pattern().patternType() == PatternType.PREFIXED
&& binding.pattern().name().equals("de.myapp.")
&& binding.entry().operation() == AclOperation.ALL)
.findAny().orElse(null));
// Write rights for owned topic must also be present
WRITE_TOPIC_OPERATIONS.forEach(op -> assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.LITERAL
&& binding.pattern().name().equals("topic1") && binding.entry().operation() == op.operation
&& binding.entry().permissionType() == op.permissionType)));
// and read rights for subscribed topic
READ_TOPIC_OPERATIONS.forEach(op -> assertNotNull(acls.stream()
.filter(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.LITERAL
&& binding.pattern().name().equals("topic2") && binding.entry().operation() == op.operation
&& binding.entry().permissionType() == op.permissionType)));
}
@Test
void testNoWriteAclsForInternalTopics() {
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app-1");
app1.setConsumerGroupPrefixes(List.of("groups."));
TopicMetadata topic = new TopicMetadata();
topic.setName("int1");
topic.setType(TopicType.INTERNAL);
topic.setOwnerApplicationId("app-1");
when(topicService.listTopics("_test")).thenReturn(List.of(topic));
AclSupport aclSupport = new AclSupport(kafkaConfig, topicService, subscriptionService);
Collection<AclBinding> bindings = aclSupport.getRequiredAclBindings("_test", app1, "User:CN=testapp", false);
assertEquals(1, bindings.size());
assertFalse(bindings.stream().anyMatch(binding -> binding.pattern().resourceType() == ResourceType.TOPIC));
}
@Test
void testAdditionalProducerWriteAccess() {
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app-1");
ApplicationMetadata producer1 = new ApplicationMetadata();
producer1.setApplicationId("producer1");
TopicMetadata topic = new TopicMetadata();
topic.setName("topic1");
topic.setType(TopicType.EVENTS);
topic.setOwnerApplicationId("app-1");
topic.setProducers(List.of("producer1"));
when(topicService.listTopics("_test")).thenReturn(List.of(topic));
AclSupport aclSupport = new AclSupport(kafkaConfig, topicService, subscriptionService);
Collection<AclBinding> bindings = aclSupport.getRequiredAclBindings("_test", producer1, "User:CN=producer1",
false);
WRITE_TOPIC_OPERATIONS.forEach(op -> assertNotNull(
bindings.stream().filter(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.LITERAL
&& binding.pattern().name().equals("topic1") && binding.entry().operation() == op.operation
&& binding.entry().permissionType() == op.permissionType
&& binding.entry().principal().equals("User:CN=producer1")).findAny().orElse(null),
"Did not find expected write ACL for topic (operation " + op.operation.name() + " is missing)"));
}
@Test
void testDefaultAcls() {
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app-1");
app1.setAuthenticationJson(new JSONObject(Map.of("dn", "CN=testapp")).toString());
app1.setConsumerGroupPrefixes(List.of("groups."));
List<DefaultAclConfig> defaultAcls = new ArrayList<>();
defaultAcls.add(defaultAclConfig("test-group", ResourceType.GROUP, PatternType.PREFIXED, AclOperation.READ));
defaultAcls.add(defaultAclConfig("test-topic", ResourceType.TOPIC, PatternType.LITERAL, AclOperation.CREATE));
when(kafkaConfig.getDefaultAcls()).thenReturn(defaultAcls);
AclSupport aclSupport = new AclSupport(kafkaConfig, topicService, subscriptionService);
Collection<AclBinding> bindings = aclSupport.getRequiredAclBindings("_test", app1, "User:CN=testapp", false);
assertTrue(bindings.stream()
.anyMatch(b -> b.pattern().patternType() == PatternType.PREFIXED
&& b.pattern().name().equals("test-group") && b.pattern().resourceType() == ResourceType.GROUP
&& b.entry().operation() == AclOperation.READ
&& b.entry().permissionType() == AclPermissionType.ALLOW && b.entry().host().equals("*")));
assertTrue(bindings.stream()
.anyMatch(b -> b.pattern().patternType() == PatternType.LITERAL
&& b.pattern().name().equals("test-topic") && b.pattern().resourceType() == ResourceType.TOPIC
&& b.entry().operation() == AclOperation.CREATE
&& b.entry().permissionType() == AclPermissionType.ALLOW && b.entry().host().equals("*")));
}
@Test
void testReadOnlyAcls() {
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("app01");
metadata.setConsumerGroupPrefixes(List.of("group.myapp.", "group2.myapp."));
metadata.setInternalTopicPrefixes(List.of("de.myapp.", "de.myapp2."));
metadata.setTransactionIdPrefixes(List.of("de.myapp."));
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic1");
topic1.setType(TopicType.EVENTS);
topic1.setOwnerApplicationId("app01");
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("topic2");
topic2.setType(TopicType.EVENTS);
topic2.setOwnerApplicationId("app02");
TopicMetadata topic3 = new TopicMetadata();
topic3.setName("topic3");
topic3.setType(TopicType.EVENTS);
topic3.setOwnerApplicationId("app02");
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("1");
sub.setClientApplicationId("app01");
sub.setTopicName("topic2");
when(topicService.listTopics("_test")).thenReturn(List.of(topic1, topic2));
when(topicService.getTopic("_test", "topic2")).thenReturn(Optional.of(topic2));
when(subscriptionService.getSubscriptionsOfApplication("_test", "app01", false)).thenReturn(List.of(sub));
AclSupport aclSupport = new AclSupport(kafkaConfig, topicService, subscriptionService);
Collection<AclBinding> acls = aclSupport.getRequiredAclBindings("_test", metadata, "User:CN=testapp", true);
assertEquals(8, acls.size());
// NO group ACLs must have been created
assertEquals(List.of(), acls.stream().filter(binding -> binding.pattern().resourceType() == ResourceType.GROUP)
.collect(Collectors.toList()));
// NO transaction ACLs must have been created
assertEquals(List.of(),
acls.stream().filter(binding -> binding.pattern().resourceType() == ResourceType.TRANSACTIONAL_ID)
.collect(Collectors.toList()));
// NO "ALL" and no WRITE rights must have been created
assertEquals(List.of(), acls.stream().filter(binding -> binding.entry().operation() == AclOperation.ALL)
.collect(Collectors.toList()));
assertEquals(List.of(), acls.stream().filter(binding -> binding.entry().operation() == AclOperation.WRITE)
.collect(Collectors.toList()));
// for internal, owned, and subscribed topics, DESCRIBE_CONFIGS and READ must exist
for (AclOperationAndType op : READ_TOPIC_OPERATIONS) {
assertTrue(acls.stream().anyMatch(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.PREFIXED
&& binding.pattern().name().equals("de.myapp.") && binding.entry().operation() == op.operation
&& binding.entry().permissionType() == op.permissionType));
assertTrue(acls.stream()
.anyMatch(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.LITERAL
&& binding.pattern().name().equals("topic1") && binding.entry().operation() == op.operation
&& binding.entry().permissionType() == op.permissionType));
assertTrue(acls.stream()
.anyMatch(binding -> binding.pattern().resourceType() == ResourceType.TOPIC
&& binding.pattern().patternType() == PatternType.LITERAL
&& binding.pattern().name().equals("topic2") && binding.entry().operation() == op.operation
&& binding.entry().permissionType() == op.permissionType));
}
}
@Test
void testSimplify() {
AclSupport support = new AclSupport(kafkaConfig, topicService, subscriptionService);
AclBinding superfluousBinding = new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "test", PatternType.PREFIXED),
new AccessControlEntry("me", "*", AclOperation.READ, AclPermissionType.ALLOW));
List<AclBinding> bindings = List.of(superfluousBinding,
new AclBinding(new ResourcePattern(ResourceType.TOPIC, "test", PatternType.PREFIXED),
new AccessControlEntry("me", "*", AclOperation.ALL, AclPermissionType.ALLOW)),
new AclBinding(new ResourcePattern(ResourceType.TOPIC, "2test", PatternType.LITERAL),
new AccessControlEntry("me", "*", AclOperation.ALL, AclPermissionType.ALLOW)),
new AclBinding(new ResourcePattern(ResourceType.TOPIC, "2test", PatternType.PREFIXED),
new AccessControlEntry("me", "*", AclOperation.READ, AclPermissionType.ALLOW)),
new AclBinding(new ResourcePattern(ResourceType.TOPIC, "2test", PatternType.PREFIXED),
new AccessControlEntry("me", "*", AclOperation.CREATE, AclPermissionType.ALLOW)));
Collection<AclBinding> reducedBindings = support.simplify(bindings);
assertEquals(4, reducedBindings.size());
assertFalse(reducedBindings.contains(superfluousBinding));
}
private DefaultAclConfig defaultAclConfig(String name, ResourceType resourceType, PatternType patternType,
AclOperation operation) {
DefaultAclConfig config = new DefaultAclConfig();
config.setName(name);
config.setResourceType(resourceType);
config.setPatternType(patternType);
config.setOperation(operation);
return config;
}
private static class AclOperationAndType {
private final AclOperation operation;
private final AclPermissionType permissionType;
private AclOperationAndType(AclOperation operation, AclPermissionType permissionType) {
this.operation = operation;
this.permissionType = permissionType;
}
}
}
| 16,974 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DeveloperAuthenticationServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/devauth/impl/DeveloperAuthenticationServiceImplTest.java | package com.hermesworld.ais.galapagos.devauth.impl;
import com.hermesworld.ais.galapagos.devauth.DevAuthenticationMetadata;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.TimeService;
import org.json.JSONObject;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
import org.mockito.Captor;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
import java.time.ZonedDateTime;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class DeveloperAuthenticationServiceImplTest {
@Mock
private KafkaClusters kafkaClusters;
@Mock
private KafkaCluster testCluster;
@Mock
private CurrentUserService userService;
@Mock
private DevUserAclListener aclUpdater;
@Mock
private TimeService timeService;
@Mock
private KafkaAuthenticationModule authenticationModule;
@Captor
private ArgumentCaptor<Set<DevAuthenticationMetadata>> testClusterDeletedMetas;
@Captor
private ArgumentCaptor<Set<DevAuthenticationMetadata>> test2ClusterDeletedMetas;
private final TopicBasedRepositoryMock<DevAuthenticationMetadata> metaRepo = new TopicBasedRepositoryMock<>();
@Test
void testCreateDeveloperAuthentication_positive() throws Exception {
// given
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
when(aclUpdater.updateAcls(any(), any())).thenReturn(FutureUtil.noop());
CreateAuthenticationResult result = new CreateAuthenticationResult(new JSONObject(Map.of("field1", "testval")),
"topsecret".getBytes(StandardCharsets.UTF_8));
when(authenticationModule.createDeveloperAuthentication(any(), any()))
.thenReturn(CompletableFuture.completedFuture(result));
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
// when
DevAuthenticationMetadata metadata = service.createDeveloperAuthenticationForCurrentUser("test", baos).get();
// then
verify(authenticationModule, times(1)).createDeveloperAuthentication(eq("testuser"), any());
verify(authenticationModule, times(0)).deleteDeveloperAuthentication(any(), any());
verify(aclUpdater, times(0)).removeAcls(any(), any());
verify(aclUpdater, times(1)).updateAcls(any(), eq(Set.of(metadata)));
assertEquals("testuser", metadata.getUserName());
assertEquals("testval", new JSONObject(metadata.getAuthenticationJson()).getString("field1"));
assertEquals("topsecret", baos.toString(StandardCharsets.UTF_8));
}
@Test
void testCreateDeveloperAuthentication_noUser() {
// given
when(userService.getCurrentUserName()).thenReturn(Optional.empty());
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when / then
ExecutionException exception = assertThrows(ExecutionException.class,
() -> service.createDeveloperAuthenticationForCurrentUser("test", new ByteArrayOutputStream()).get());
assertTrue(exception.getCause() instanceof IllegalStateException);
}
@Test
void testCreateDeveloperAuthentication_invalidEnvironmentId() {
// given
lenient().when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
lenient().when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
lenient().when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when / then
ExecutionException exception = assertThrows(ExecutionException.class,
() -> service.createDeveloperAuthenticationForCurrentUser("test2", new ByteArrayOutputStream()).get());
assertTrue(exception.getCause() instanceof NoSuchElementException);
}
@Test
void testCreateDeveloperAuthentication_deletePreviousAuth() throws Exception {
// given
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
CreateAuthenticationResult result = new CreateAuthenticationResult(new JSONObject(Map.of("field1", "testval")),
"topsecret".getBytes(StandardCharsets.UTF_8));
when(authenticationModule.createDeveloperAuthentication(any(), any()))
.thenReturn(CompletableFuture.completedFuture(result));
when(authenticationModule.deleteDeveloperAuthentication(any(), any())).thenReturn(FutureUtil.noop());
when(aclUpdater.updateAcls(any(), any())).thenReturn(FutureUtil.noop());
when(aclUpdater.removeAcls(any(), any())).thenReturn(FutureUtil.noop());
DevAuthenticationMetadata prevMeta = generateMetadata("testuser", "oldval");
metaRepo.save(prevMeta).get();
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
// when
DevAuthenticationMetadata metadata = service.createDeveloperAuthenticationForCurrentUser("test", baos).get();
// then
ArgumentCaptor<JSONObject> captor = ArgumentCaptor.forClass(JSONObject.class);
verify(authenticationModule, times(1)).createDeveloperAuthentication(eq("testuser"), any());
verify(authenticationModule, times(1)).deleteDeveloperAuthentication(eq("testuser"), captor.capture());
verify(aclUpdater, times(1)).removeAcls(any(), any());
verify(aclUpdater, times(1)).updateAcls(any(), eq(Set.of(metadata)));
assertEquals("testuser", metadata.getUserName());
assertEquals("oldval", captor.getValue().getString("field1"));
assertEquals("testval", new JSONObject(metadata.getAuthenticationJson()).getString("field1"));
assertEquals("topsecret", baos.toString(StandardCharsets.UTF_8));
}
@Test
void testGetDeveloperAuthenticationForCurrentUser_positive() throws Exception {
// given
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
DevAuthenticationMetadata prevMeta = generateMetadata("testuser", "oldval");
metaRepo.save(prevMeta).get();
when(userService.getCurrentUserName()).thenReturn(Optional.of(prevMeta.getUserName()));
when(timeService.getTimestamp()).thenReturn(ZonedDateTime.now());
when(authenticationModule.extractExpiryDate(json(new JSONObject(prevMeta.getAuthenticationJson()))))
.thenReturn(Optional.of(ZonedDateTime.now().plusDays(10).toInstant()));
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when
DevAuthenticationMetadata metadata = service.getDeveloperAuthenticationOfCurrentUser("test").orElseThrow();
// then
assertEquals(prevMeta.getUserName(), metadata.getUserName());
assertEquals(prevMeta.getAuthenticationJson(), metadata.getAuthenticationJson());
}
@Test
void testGetDeveloperAuthenticationForCurrentUser_wrongUserName() throws Exception {
// given
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
DevAuthenticationMetadata prevMeta = generateMetadata("testuser", "oldval");
metaRepo.save(prevMeta).get();
when(userService.getCurrentUserName()).thenReturn(Optional.of("otheruser"));
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when
Optional<DevAuthenticationMetadata> opMeta = service.getDeveloperAuthenticationOfCurrentUser("test");
// then
assertTrue(opMeta.isEmpty());
}
@Test
void testGetDeveloperAuthenticationForCurrentUser_noCurrentUser() throws Exception {
// given
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
// assert that USER missing is tested, not cluster missing
lenient().when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
lenient().when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.empty());
DevAuthenticationMetadata prevMeta = generateMetadata("testuser", "oldval");
metaRepo.save(prevMeta).get();
when(userService.getCurrentUserName()).thenReturn(Optional.empty());
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when
Optional<DevAuthenticationMetadata> opMeta = service.getDeveloperAuthenticationOfCurrentUser("test");
// then
assertTrue(opMeta.isEmpty());
}
@Test
void testGetDeveloperAuthenticationForCurrentUser_expired() throws Exception {
// given
when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(Optional.of(authenticationModule));
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
DevAuthenticationMetadata prevMeta = generateMetadata("testuser", "oldval");
metaRepo.save(prevMeta).get();
when(userService.getCurrentUserName()).thenReturn(Optional.of(prevMeta.getUserName()));
when(timeService.getTimestamp()).thenReturn(ZonedDateTime.now());
when(authenticationModule.extractExpiryDate(json(new JSONObject(prevMeta.getAuthenticationJson()))))
.thenReturn(Optional.of(ZonedDateTime.now().minusHours(1).toInstant()));
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when
Optional<DevAuthenticationMetadata> opMeta = service.getDeveloperAuthenticationOfCurrentUser("test");
// then
assertTrue(opMeta.isEmpty(), "Metadata found although expired - should not have been returned.");
}
@Test
void testClearExpiredDeveloperAuthenticationsOnAllClusters_positive() throws Exception {
// given
KafkaCluster cluster2 = mock(KafkaCluster.class);
TopicBasedRepositoryMock<DevAuthenticationMetadata> metaRepo2 = new TopicBasedRepositoryMock<>();
when(testCluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo);
when(cluster2.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(metaRepo2);
lenient().when(kafkaClusters.getEnvironment("test")).thenReturn(Optional.of(testCluster));
lenient().when(kafkaClusters.getEnvironment("test2")).thenReturn(Optional.of(cluster2));
when(kafkaClusters.getEnvironments()).thenReturn(List.of(testCluster, cluster2));
when(authenticationModule.deleteDeveloperAuthentication(any(), any())).thenReturn(FutureUtil.noop());
when(kafkaClusters.getAuthenticationModule(any())).thenReturn(Optional.of(authenticationModule));
metaRepo.save(generateMetadata("user1", "user1_test")).get();
metaRepo.save(generateMetadata("user2", "user2_test")).get();
metaRepo.save(generateMetadata("user3", "user3_test")).get();
metaRepo2.save(generateMetadata("user1", "user1_test2")).get();
metaRepo2.save(generateMetadata("user2", "user2_test2")).get();
metaRepo2.save(generateMetadata("user3", "user3_test2")).get();
metaRepo2.save(generateMetadata("user4", "user4_test2")).get();
// user1 is expired on "test" environment, user2 on "test2", and user3 on both.
// For user4, we do not provide an expiry date - should be untouched.
ArgumentMatcher<JSONObject> expiredMatcher = (obj) -> {
if (obj == null) {
return false;
}
String val = obj.getString("field1");
return List.of("user1_test", "user2_test2", "user3_test", "user3_test2").contains(val);
};
ArgumentMatcher<JSONObject> noDateMatcher = (obj) -> obj != null
&& obj.getString("field1").equals("user4_test2");
when(authenticationModule.extractExpiryDate(any()))
.thenReturn(Optional.of(ZonedDateTime.now().plusDays(10).toInstant()));
when(authenticationModule.extractExpiryDate(argThat(expiredMatcher)))
.thenReturn(Optional.of(ZonedDateTime.now().minusDays(10).toInstant()));
when(authenticationModule.extractExpiryDate(argThat(noDateMatcher))).thenReturn(Optional.empty());
when(timeService.getTimestamp()).thenReturn(ZonedDateTime.now());
when(aclUpdater.removeAcls(any(), any())).thenReturn(FutureUtil.noop());
DeveloperAuthenticationServiceImpl service = new DeveloperAuthenticationServiceImpl(kafkaClusters, userService,
aclUpdater, timeService);
// when
int cleared = service.clearExpiredDeveloperAuthenticationsOnAllClusters().get();
// then
assertEquals(4, cleared);
verify(authenticationModule, times(4)).deleteDeveloperAuthentication(any(), argThat(expiredMatcher));
verify(authenticationModule, times(0)).deleteDeveloperAuthentication(any(), argThat(noDateMatcher));
verify(aclUpdater).removeAcls(eq(testCluster), testClusterDeletedMetas.capture());
verify(aclUpdater).removeAcls(eq(cluster2), test2ClusterDeletedMetas.capture());
assertEquals(Set.of("user1", "user3"),
testClusterDeletedMetas.getValue().stream().map(m -> m.getUserName()).collect(Collectors.toSet()));
assertEquals(Set.of("user2", "user3"),
test2ClusterDeletedMetas.getValue().stream().map(m -> m.getUserName()).collect(Collectors.toSet()));
assertEquals(Set.of("user2"),
metaRepo.getObjects().stream().map(m -> m.getUserName()).collect(Collectors.toSet()));
assertEquals(Set.of("user1", "user4"),
metaRepo2.getObjects().stream().map(m -> m.getUserName()).collect(Collectors.toSet()));
}
private static DevAuthenticationMetadata generateMetadata(String userName, String jsonField1Value) {
DevAuthenticationMetadata meta = new DevAuthenticationMetadata();
meta.setUserName(userName);
meta.setAuthenticationJson(new JSONObject(Map.of("field1", jsonField1Value)).toString());
return meta;
}
private static JSONObject json(JSONObject expected) {
return argThat(new JSONMatcher(expected));
}
private static class JSONMatcher implements ArgumentMatcher<JSONObject> {
private final JSONObject expected;
private JSONMatcher(JSONObject expected) {
this.expected = expected;
}
@Override
public boolean matches(JSONObject argument) {
return argument != null && expected.toString().equals(argument.toString());
}
}
}
| 17,920 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
DevUserAclListenerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/devauth/impl/DevUserAclListenerTest.java | package com.hermesworld.ais.galapagos.devauth.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationConfig;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationModule;
import com.hermesworld.ais.galapagos.devauth.DevAuthenticationMetadata;
import com.hermesworld.ais.galapagos.events.ApplicationEvent;
import com.hermesworld.ais.galapagos.events.GalapagosEventContext;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.KafkaUser;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import com.hermesworld.ais.galapagos.kafka.util.AclSupport;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import com.hermesworld.ais.galapagos.util.TimeService;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
class DevUserAclListenerTest {
@Mock
private ApplicationsService applicationsService;
@Mock
private KafkaCluster cluster;
@Mock
private GalapagosEventContext context;
private DevUserAclListener listener;
private KafkaClusters clusters;
private AclSupport aclSupport;
private TopicBasedRepositoryMock<DevAuthenticationMetadata> repository;
private ZonedDateTime timestamp;
@BeforeEach
void initMocks() {
MockitoAnnotations.openMocks(this);
SubscriptionService subscriptionService = mock(SubscriptionService.class);
timestamp = ZonedDateTime.of(LocalDateTime.of(2020, 10, 5, 10, 0, 0), ZoneOffset.UTC);
TimeService timeService = () -> timestamp;
aclSupport = mock(AclSupport.class);
clusters = mock(KafkaClusters.class);
when(clusters.getAuthenticationModule(any())).thenReturn(Optional
.of(new CertificatesAuthenticationModule("test", mock(CertificatesAuthenticationConfig.class))));
listener = new DevUserAclListener(applicationsService, subscriptionService, timeService, aclSupport, clusters);
repository = new TopicBasedRepositoryMock<>();
when(cluster.getRepository("devauth", DevAuthenticationMetadata.class)).thenReturn(repository);
context = mock(GalapagosEventContext.class);
when(context.getKafkaCluster()).thenReturn(cluster);
when(cluster.updateUserAcls(any())).thenReturn(FutureUtil.noop());
}
@Test
void testApplicationRegistered_invalidCertificate() throws Exception {
DevAuthenticationMetadata devAuth = new DevAuthenticationMetadata();
devAuth.setUserName("testuser");
devAuth.setAuthenticationJson("{\"expiresAt\":\"2017-02-03T10:37:30Z\"}");
repository.save(devAuth).get();
List<ApplicationOwnerRequest> requests = new ArrayList<>();
ApplicationOwnerRequest request = new ApplicationOwnerRequest();
request.setId("1");
request.setApplicationId("test123");
request.setUserName("testuser");
request.setState(RequestState.APPROVED);
requests.add(request);
when(applicationsService.getAllApplicationOwnerRequests()).thenReturn(requests);
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("test123");
ApplicationEvent event = new ApplicationEvent(context, metadata);
listener.handleApplicationRegistered(event).get();
verify(cluster, times(0)).updateUserAcls(any());
}
@Test
void testApplicationRegistered() throws Exception {
DevAuthenticationMetadata devAuth = new DevAuthenticationMetadata();
devAuth.setUserName("testuser");
devAuth.setAuthenticationJson(
"{\"expiresAt\":\"" + timestamp.plusDays(10).toInstant().toString() + "\",\"dn\":\"CN=testuser\"}");
repository.save(devAuth).get();
devAuth = new DevAuthenticationMetadata();
devAuth.setUserName("testuser2");
devAuth.setAuthenticationJson(
"{\"expiresAt\":\"" + timestamp.plusDays(10).toInstant().toString() + "\",\"dn\":\"CN=testuser2\"}");
repository.save(devAuth).get();
List<ApplicationOwnerRequest> requests = new ArrayList<>();
ApplicationOwnerRequest request = new ApplicationOwnerRequest();
request.setId("1");
request.setApplicationId("test123");
request.setUserName("testuser");
request.setState(RequestState.APPROVED);
requests.add(request);
when(applicationsService.getAllApplicationOwnerRequests()).thenReturn(requests);
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("test123");
ApplicationEvent event = new ApplicationEvent(context, metadata);
listener.handleApplicationRegistered(event).get();
ArgumentCaptor<KafkaUser> userCaptor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(userCaptor.capture());
KafkaUser user = userCaptor.getValue();
assertEquals("User:CN=testuser", user.getKafkaUserName());
}
@Test
void testWriteAccessFlag() throws Exception {
KafkaEnvironmentConfig config = mock(KafkaEnvironmentConfig.class);
when(config.isDeveloperWriteAccess()).thenReturn(true);
when(cluster.getId()).thenReturn("test");
when(clusters.getEnvironmentMetadata("test")).thenReturn(Optional.of(config));
DevAuthenticationMetadata metadata = new DevAuthenticationMetadata();
metadata.setUserName("user123");
metadata.setAuthenticationJson("{\"dn\":\"CN=testuser\"}");
ApplicationOwnerRequest request1 = new ApplicationOwnerRequest();
request1.setUserName("user123");
request1.setState(RequestState.APPROVED);
request1.setApplicationId("app-1");
ApplicationOwnerRequest request2 = new ApplicationOwnerRequest();
request2.setUserName("user123");
request2.setState(RequestState.SUBMITTED);
request2.setApplicationId("app-2");
ApplicationMetadata app1 = new ApplicationMetadata();
app1.setApplicationId("app-1");
ApplicationMetadata app2 = new ApplicationMetadata();
app1.setApplicationId("app-2");
when(applicationsService.getAllApplicationOwnerRequests()).thenReturn(List.of(request1, request2));
when(applicationsService.getApplicationMetadata("test", "app-1")).thenReturn(Optional.of(app1));
when(applicationsService.getApplicationMetadata("test", "app-2")).thenReturn(Optional.of(app2));
when(aclSupport.getRequiredAclBindings("test", app1, "User:CN=testuser", false)).thenReturn(List.of());
when(aclSupport.getRequiredAclBindings("test", app2, "User:CN=testuser", false))
.thenThrow(new RuntimeException("No ACLs for app2 should be assigned"));
listener.updateAcls(cluster, Set.of(metadata)).get();
ArgumentCaptor<KafkaUser> userCaptor = ArgumentCaptor.forClass(KafkaUser.class);
verify(cluster, times(1)).updateUserAcls(userCaptor.capture());
userCaptor.getValue().getRequiredAclBindings();
verify(aclSupport, times(1)).getRequiredAclBindings("test", app1, "User:CN=testuser", false);
verify(aclSupport, times(0)).getRequiredAclBindings("test", app1, "User:CN=testuser", true);
}
}
| 8,236 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NotificationEventListenerTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/notifications/impl/NotificationEventListenerTest.java | package com.hermesworld.ais.galapagos.notifications.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.events.GalapagosEventContext;
import com.hermesworld.ais.galapagos.events.TopicEvent;
import com.hermesworld.ais.galapagos.events.TopicSchemaAddedEvent;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.notifications.NotificationParams;
import com.hermesworld.ais.galapagos.notifications.NotificationService;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.util.FutureUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
class NotificationEventListenerTest {
private NotificationEventListener listener;
private NotificationService notificationService;
private GalapagosEventContext context;
@BeforeEach
void feedMocks() {
notificationService = spy(mock(NotificationService.class));
KafkaClusters kafkaClusters = mock(KafkaClusters.class);
ApplicationsService applicationsService = mock(ApplicationsService.class);
TopicService topicService = mock(TopicService.class);
CurrentUserService userService = mock(CurrentUserService.class);
when(kafkaClusters.getProductionEnvironmentId()).thenReturn("prod");
when(userService.getCurrentUserName()).thenReturn(Optional.of("testuser"));
listener = new NotificationEventListener(notificationService, applicationsService, topicService, userService,
kafkaClusters);
context = mock(GalapagosEventContext.class);
KafkaCluster kafkaCluster = mock(KafkaCluster.class);
when(kafkaCluster.getId()).thenReturn("test");
when(context.getKafkaCluster()).thenReturn(kafkaCluster);
}
@Test
void testHandleTopicDeprecated() {
AtomicInteger sendCalled = new AtomicInteger();
when(notificationService.notifySubscribers(any(), any(), any(), any())).then(inv -> {
sendCalled.incrementAndGet();
return FutureUtil.noop();
});
listener.handleTopicDeprecated(buildTestEvent("test1"));
listener.handleTopicDeprecated(buildTestEvent("test2"));
listener.handleTopicDeprecated(buildTestEvent("prod"));
assertEquals(1, sendCalled.get(), "Deprecation mail should only be sent for production environment");
}
@Test
void testHandleTopicUndeprecated() {
AtomicInteger sendCalled = new AtomicInteger();
when(notificationService.notifySubscribers(any(), any(), any(), any())).then(inv -> {
sendCalled.incrementAndGet();
return FutureUtil.noop();
});
listener.handleTopicUndeprecated(buildTestEvent("test1"));
listener.handleTopicUndeprecated(buildTestEvent("test2"));
listener.handleTopicUndeprecated(buildTestEvent("prod"));
assertEquals(1, sendCalled.get(), "Undeprecation mail should only be sent for production environment");
}
@Test
void testHandleSchemaChangeDesc() throws ExecutionException, InterruptedException {
TopicMetadata metadata = new TopicMetadata();
metadata.setName("testtopic");
metadata.setType(TopicType.EVENTS);
SchemaMetadata schema = new SchemaMetadata();
schema.setId("99");
schema.setJsonSchema("{}");
schema.setSchemaVersion(1);
schema.setTopicName("testtopic");
schema.setChangeDescription("some change description goes here");
ArgumentCaptor<NotificationParams> captor = ArgumentCaptor.forClass(NotificationParams.class);
when(notificationService.notifySubscribers(any(), any(), any(), any())).thenReturn(FutureUtil.noop());
TopicSchemaAddedEvent event = new TopicSchemaAddedEvent(context, metadata, schema);
listener.handleTopicSchemaAdded(event).get();
verify(notificationService).notifySubscribers(eq("test"), eq("testtopic"), captor.capture(), any());
NotificationParams params = captor.getValue();
assertTrue(params.getVariables().get("change_action_text").toString()
.contains("some change description goes here"));
}
private TopicEvent buildTestEvent(String envId) {
GalapagosEventContext context = mock(GalapagosEventContext.class);
KafkaCluster cluster = mock(KafkaCluster.class);
when(cluster.getId()).thenReturn(envId);
when(context.getKafkaCluster()).thenReturn(cluster);
TopicMetadata metadata = new TopicMetadata();
metadata.setName("topic-1");
metadata.setType(TopicType.EVENTS);
return new TopicEvent(context, metadata);
}
}
| 5,413 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
NotificationServiceImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/notifications/impl/NotificationServiceImplTest.java | package com.hermesworld.ais.galapagos.notifications.impl;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import jakarta.mail.Address;
import jakarta.mail.MessagingException;
import jakarta.mail.internet.MimeMessage;
import jakarta.mail.internet.MimeMessage.RecipientType;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.notifications.NotificationParams;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentMatchers;
import static org.mockito.Mockito.*;
import org.springframework.mail.MailSendException;
import org.springframework.mail.javamail.JavaMailSender;
import org.springframework.scheduling.concurrent.ConcurrentTaskExecutor;
import org.thymeleaf.ITemplateEngine;
class NotificationServiceImplTest {
private static final String TEST_ENV = "test-Environment";
private static final String TEST_TOPIC = "test-Topic";
private static final String TEST_USER = "test-User";
private SubscriptionService subscriptionService;
private ApplicationsService applicationService;
private TopicService topicService;
private JavaMailSender mailSender;
private ITemplateEngine templateEngine;
private ExecutorService executor;
private MimeMessageHolder messageHolder;
@BeforeEach
void feedMocks() throws MessagingException {
subscriptionService = mock(SubscriptionService.class);
applicationService = mock(ApplicationsService.class);
topicService = mock(TopicService.class);
mailSender = mock(JavaMailSender.class);
messageHolder = new MimeMessageHolder();
when(mailSender.createMimeMessage()).thenReturn(messageHolder.mockMessage);
templateEngine = mock(ITemplateEngine.class);
executor = Executors.newSingleThreadExecutor();
}
@AfterEach
void destroyMocks() {
executor.shutdown();
}
@Test
void testDoSendAsync_NoFailOnMailException() throws Exception {
String testFromAddress = "test@abc.de";
String testAdminMailsRecipients = "Test@abc.de";
NotificationParams testNotificationParams = generateNotificationParams(TEST_USER, TEST_TOPIC);
doThrow(new MailSendException("mail cannot be sent")).when(mailSender).send((MimeMessage) any());
String htmlCode = "<html><head><title>Testmail</title></head><body><p>Test</p></body></html>";
when(templateEngine.process(ArgumentMatchers.<String>any(), ArgumentMatchers.any())).thenReturn(htmlCode);
ConcurrentTaskExecutor exec = new ConcurrentTaskExecutor(executor);
NotificationServiceImpl notificationServiceImpl = new NotificationServiceImpl(subscriptionService,
applicationService, topicService, mailSender, exec, templateEngine, testFromAddress,
testAdminMailsRecipients);
notificationServiceImpl.notifyAdmins(testNotificationParams).get();
}
@Test
void testNotifySubscribersWithExclusionUser() throws Exception {
String testFromAddress = "test@abc.de";
String testAdminMailsRecipients = "Test@abc.de";
String testApplicationId = "42";
NotificationParams testNotificationParams = generateNotificationParams(TEST_USER, TEST_TOPIC);
List<SubscriptionMetadata> subscriptionMetadatas = generateSubscriptionMetadatas(testApplicationId);
when(subscriptionService.getSubscriptionsForTopic(TEST_ENV, TEST_TOPIC, false))
.thenReturn(subscriptionMetadatas);
List<ApplicationOwnerRequest> applicationOwnerRequests = generateApplicationOwnerRequests(testFromAddress,
testApplicationId);
when(applicationService.getAllApplicationOwnerRequests()).thenReturn(applicationOwnerRequests);
String htmlCode = "<html><head><title>Testmail</title></head><body><p>Test</p></body></html>";
when(templateEngine.process(ArgumentMatchers.<String>any(), ArgumentMatchers.any())).thenReturn(htmlCode);
ConcurrentTaskExecutor exec = new ConcurrentTaskExecutor(executor);
NotificationServiceImpl notificationServiceImpl = new NotificationServiceImpl(subscriptionService,
applicationService, topicService, mailSender, exec, templateEngine, testFromAddress,
testAdminMailsRecipients);
notificationServiceImpl.notifySubscribers(TEST_ENV, TEST_TOPIC, testNotificationParams, TEST_USER).get();
assertFalse(messageHolder.recipients.contains(testFromAddress));
}
@Test
void testNotifyApplicationTopicOwners_noSubmittedIncluded() throws Exception {
String testFromAddress = "test@abc.de";
String testAdminMailsRecipients = "Test@abc.de";
String applicationId = "1";
NotificationParams testNotificationParams = generateNotificationParams(TEST_USER, TEST_TOPIC);
ApplicationOwnerRequest requestSucess1 = new ApplicationOwnerRequest();
requestSucess1.setApplicationId("1");
requestSucess1.setUserName("User1");
requestSucess1.setState(RequestState.APPROVED);
requestSucess1.setComments("my comment");
requestSucess1.setNotificationEmailAddress("foo@bar.com");
ApplicationOwnerRequest requestSucess2 = new ApplicationOwnerRequest();
requestSucess2.setApplicationId("1");
requestSucess2.setUserName("User2");
requestSucess2.setState(RequestState.APPROVED);
requestSucess2.setComments("my awesome comment");
requestSucess2.setNotificationEmailAddress("foo2@bar2.com");
ApplicationOwnerRequest requestFail = new ApplicationOwnerRequest();
requestFail.setApplicationId("1");
requestFail.setUserName("User2");
requestFail.setState(RequestState.SUBMITTED);
requestFail.setComments("This user will get no email");
requestFail.setNotificationEmailAddress("no@mail.com");
when(applicationService.getAllApplicationOwnerRequests())
.thenReturn(List.of(requestSucess1, requestSucess2, requestFail));
String htmlCode = "<html><head><title>Testmail</title></head><body><p>Test</p></body></html>";
when(templateEngine.process(ArgumentMatchers.<String>any(), ArgumentMatchers.any())).thenReturn(htmlCode);
ConcurrentTaskExecutor exec = new ConcurrentTaskExecutor(executor);
NotificationServiceImpl notificationServiceImpl = new NotificationServiceImpl(subscriptionService,
applicationService, topicService, mailSender, exec, templateEngine, testFromAddress,
testAdminMailsRecipients);
notificationServiceImpl.notifyApplicationTopicOwners(applicationId, testNotificationParams).get();
assertTrue(messageHolder.recipients.contains("foo@bar.com"));
assertTrue(messageHolder.recipients.contains("foo2@bar2.com"));
assertFalse(messageHolder.recipients.contains("no@mail.com"));
}
@Test
void testNotifySubscribers_noSubmittedIncluded() throws Exception {
String testFromAddress = "test@abc.de";
String testAdminMailsRecipients = "Test@abc.de";
NotificationParams testNotificationParams = generateNotificationParams(TEST_USER, TEST_TOPIC);
ApplicationOwnerRequest requestSucess1 = new ApplicationOwnerRequest();
requestSucess1.setApplicationId("1");
requestSucess1.setUserName("User1");
requestSucess1.setState(RequestState.APPROVED);
requestSucess1.setComments("my comment");
requestSucess1.setNotificationEmailAddress("foo@bar.com");
ApplicationOwnerRequest requestSucess2 = new ApplicationOwnerRequest();
requestSucess2.setApplicationId("1");
requestSucess2.setUserName("User2");
requestSucess2.setState(RequestState.APPROVED);
requestSucess2.setComments("my awesome comment");
requestSucess2.setNotificationEmailAddress("foo2@bar2.com");
ApplicationOwnerRequest requestFail = new ApplicationOwnerRequest();
requestFail.setApplicationId("1");
requestFail.setUserName("User3");
requestFail.setState(RequestState.SUBMITTED);
requestFail.setComments("This user will get no email");
requestFail.setNotificationEmailAddress("no@mail.com");
SubscriptionMetadata sub = new SubscriptionMetadata();
sub.setId("123");
sub.setClientApplicationId("1");
sub.setTopicName("topic1");
when(subscriptionService.getSubscriptionsForTopic(TEST_ENV, TEST_TOPIC, false)).thenReturn(List.of(sub));
when(applicationService.getAllApplicationOwnerRequests())
.thenReturn(List.of(requestSucess1, requestSucess2, requestFail));
String htmlCode = "<html><head><title>Testmail</title></head><body><p>Test</p></body></html>";
when(templateEngine.process(ArgumentMatchers.<String>any(), ArgumentMatchers.any())).thenReturn(htmlCode);
ConcurrentTaskExecutor exec = new ConcurrentTaskExecutor(executor);
NotificationServiceImpl notificationServiceImpl = new NotificationServiceImpl(subscriptionService,
applicationService, topicService, mailSender, exec, templateEngine, testFromAddress,
testAdminMailsRecipients);
notificationServiceImpl.notifySubscribers(TEST_ENV, TEST_TOPIC, testNotificationParams, TEST_USER).get();
assertTrue(messageHolder.recipients.contains("foo@bar.com"));
assertTrue(messageHolder.recipients.contains("foo2@bar2.com"));
assertFalse(messageHolder.recipients.contains("no@mail.com"));
}
private List<SubscriptionMetadata> generateSubscriptionMetadatas(String applicationId) {
List<SubscriptionMetadata> subscriptionMetadatas = new ArrayList<>();
SubscriptionMetadata subscriptionMetadata = generateSubscriptionMetadata(applicationId, TEST_TOPIC);
subscriptionMetadatas.add(subscriptionMetadata);
SubscriptionMetadata subscriptionMetadata2 = generateSubscriptionMetadata("100", "moin");
subscriptionMetadatas.add(subscriptionMetadata2);
SubscriptionMetadata subscriptionMetadata3 = generateSubscriptionMetadata("", "");
subscriptionMetadatas.add(subscriptionMetadata3);
return subscriptionMetadatas;
}
private SubscriptionMetadata generateSubscriptionMetadata(String applicationId, String topicName) {
SubscriptionMetadata subscriptionMetadata = new SubscriptionMetadata();
subscriptionMetadata.setClientApplicationId(applicationId);
subscriptionMetadata.setId("1");
subscriptionMetadata.setTopicName(topicName);
return subscriptionMetadata;
}
private List<ApplicationOwnerRequest> generateApplicationOwnerRequests(String notificationEmailAddress,
String applicationId) {
List<ApplicationOwnerRequest> requests = new ArrayList<>();
ApplicationOwnerRequest applicationOwnerRequest = generateApplicationOwnerRequest(TEST_USER,
notificationEmailAddress, applicationId);
requests.add(applicationOwnerRequest);
ApplicationOwnerRequest applicationOwnerRequest2 = generateApplicationOwnerRequest("Alice", "abc@abc.de",
"100");
requests.add(applicationOwnerRequest2);
ApplicationOwnerRequest applicationOwnerRequest3 = generateApplicationOwnerRequest("Bob", "null@null.de", "");
requests.add(applicationOwnerRequest3);
return requests;
}
private ApplicationOwnerRequest generateApplicationOwnerRequest(String userName, String notificationEmailAddress,
String applicationId) {
ZonedDateTime past = ZonedDateTime.of(LocalDateTime.of(2020, 6, 19, 10, 0), ZoneOffset.UTC);
ZonedDateTime now = ZonedDateTime.of(LocalDateTime.of(2020, 6, 20, 10, 0), ZoneOffset.UTC);
ApplicationOwnerRequest applicationOwnerRequest = new ApplicationOwnerRequest();
applicationOwnerRequest.setApplicationId(applicationId);
applicationOwnerRequest.setComments(null);
applicationOwnerRequest.setCreatedAt(now);
applicationOwnerRequest.setId("1");
applicationOwnerRequest.setLastStatusChangeAt(past);
applicationOwnerRequest.setLastStatusChangeBy(null);
applicationOwnerRequest.setNotificationEmailAddress(notificationEmailAddress);
applicationOwnerRequest.setState(RequestState.APPROVED);
applicationOwnerRequest.setUserName(userName);
return applicationOwnerRequest;
}
private NotificationParams generateNotificationParams(String userName, String topicName) {
NotificationParams notificationParams = new NotificationParams("topic-changed");
notificationParams.addVariable("user_name", userName);
notificationParams.addVariable("topic_name", topicName);
notificationParams.addVariable("change_action_text", "ein neues JSON-Schema veröffentlicht");
notificationParams.addVariable("galapagos_topic_url", "/some/url");
return notificationParams;
}
private static class MimeMessageHolder {
private final List<String> recipients = new ArrayList<>();
private final MimeMessage mockMessage;
public MimeMessageHolder() throws MessagingException {
mockMessage = mock(MimeMessage.class);
doAnswer(inv -> {
Address[] addrs = inv.getArgument(1);
recipients.addAll(Arrays.stream(addrs).map(Address::toString).collect(Collectors.toList()));
return null;
}).when(mockMessage).setRecipients(ArgumentMatchers.<RecipientType>any(),
ArgumentMatchers.<Address[]>any());
}
}
}
| 14,475 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentCloudAuthenticationModuleTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/ccloud/ConfluentCloudAuthenticationModuleTest.java | package com.hermesworld.ais.galapagos.ccloud;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ApiKeySpec;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ConfluentCloudApiClient;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ServiceAccountSpec;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthConfig;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthUtil;
import com.hermesworld.ais.galapagos.ccloud.auth.ConfluentCloudAuthenticationModule;
import com.hermesworld.ais.galapagos.kafka.auth.CreateAuthenticationResult;
import com.hermesworld.ais.galapagos.kafka.auth.KafkaAuthenticationModule;
import org.hamcrest.core.StringContains;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import org.springframework.test.util.ReflectionTestUtils;
import reactor.core.publisher.Mono;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.nullable;
import static org.mockito.Mockito.*;
class ConfluentCloudAuthenticationModuleTest {
private KafkaAuthenticationModule authenticationModule;
private ConfluentCloudApiClient client;
@BeforeEach
void init() {
authenticationModule = new ConfluentCloudAuthenticationModule(basicConfig());
client = mock(ConfluentCloudApiClient.class);
ReflectionTestUtils.setField(authenticationModule, "client", client);
}
private ConfluentCloudAuthConfig basicConfig() {
ConfluentCloudAuthConfig config = new ConfluentCloudAuthConfig();
config.setEnvironmentId("testEnv");
config.setClusterId("testCluster");
config.setOrganizationApiKey("orgApiKey");
config.setOrganizationApiSecret("orgApiTopSecret123");
config.setServiceAccountIdCompatMode(false);
return config;
}
private void useIdCompatMode() {
ConfluentCloudAuthConfig config = basicConfig();
config.setServiceAccountIdCompatMode(true);
authenticationModule = new ConfluentCloudAuthenticationModule(config);
ReflectionTestUtils.setField(authenticationModule, "client", client);
}
private void enableDeveloperAuthentication() {
ConfluentCloudAuthConfig config = basicConfig();
config.setDeveloperApiKeyValidity("P7D");
authenticationModule = new ConfluentCloudAuthenticationModule(config);
ReflectionTestUtils.setField(authenticationModule, "client", client);
}
private ApiKeySpec newApiKey(String key, String secret, String serviceAccountResourceId) {
ApiKeySpec spec = new ApiKeySpec();
spec.setId(key);
spec.setSecret(secret);
spec.setCreatedAt(Instant.now());
spec.setServiceAccountId(serviceAccountResourceId);
return spec;
}
private String toAuthJson(Map<String, String> fields) {
return new JSONObject(fields).toString();
}
@Test
void extractKafkaUserNameTest_positive() {
String kafkaUserName = authenticationModule.extractKafkaUserName(new JSONObject("{userId:1234}"));
assertEquals("User:1234", kafkaUserName);
}
@Test
void extractKafkaUserNameTest_negative() {
assertThrows(JSONException.class, () -> authenticationModule.extractKafkaUserName(new JSONObject("{}")));
}
@Test
void fillCorrectProps_positive() {
ConfluentCloudAuthConfig config = basicConfig();
config.setClusterApiSecret("secretPassword");
config.setClusterApiKey("someApiKey");
authenticationModule = new ConfluentCloudAuthenticationModule(config);
Properties props = new Properties();
authenticationModule.addRequiredKafkaProperties(props);
assertEquals("PLAIN", props.getProperty("sasl.mechanism"));
assertThat(props.getProperty("sasl.jaas.config"), StringContains.containsString("username='someApiKey'"));
assertThat(props.getProperty("sasl.jaas.config"), StringContains.containsString("password='secretPassword'"));
}
@Test
void fillCorrectProps_negative() {
Properties props = new Properties();
authenticationModule.addRequiredKafkaProperties(props);
assertThat(props.getProperty("sasl.jaas.config"), StringContains.containsString("username='null'"));
assertThat(props.getProperty("sasl.jaas.config"), StringContains.containsString("password='null'"));
}
@Test
void testDeleteApplicationAuthentication_positive() throws ExecutionException, InterruptedException {
ApiKeySpec apiKey1 = newApiKey("someKey1", "someSecret1", "sa-xy123");
ApiKeySpec apiKey2 = newApiKey("someKey2", "someSecret2", "sa-xy124");
when(client.listClusterApiKeys("testCluster")).thenReturn(Mono.just(List.of(apiKey1, apiKey2)));
when(client.deleteApiKey(apiKey1)).thenReturn(Mono.just(true));
authenticationModule.deleteApplicationAuthentication("quattro-1",
new JSONObject(Map.of("userId", "sa-xy123", "apiKey", "someKey1"))).get();
verify(client).deleteApiKey(apiKey1);
verify(client, times(0)).createApiKey(any(), any(), any(), any());
}
@Test
void deleteApplicationAuthenticationTest_negativeNoApiKeyObjectInAuthJson()
throws ExecutionException, InterruptedException {
ApiKeySpec apiKey1 = newApiKey("someKey1", "someSecret1", "sa-xy123");
ApiKeySpec apiKey2 = newApiKey("someKey2", "someSecret2", "sa-xy124");
when(client.listClusterApiKeys("testCluster")).thenReturn(Mono.just(List.of(apiKey1, apiKey2)));
authenticationModule.deleteApplicationAuthentication("quattro-1", new JSONObject(Map.of("userId", "sa-xy123")))
.get();
verify(client, times(0)).deleteApiKey(any());
}
@Test
void createApplicationAuthenticationTest_createServiceAccForAppThatHasNoAcc()
throws ExecutionException, InterruptedException {
ApiKeySpec apiKey1 = newApiKey("someKey1", "someSecret1", "sa-xy123");
ApiKeySpec apiKey2 = newApiKey("someKey2", "someSecret2", "sa-xy124");
ApiKeySpec apiKey3 = newApiKey("someKey3", "someSecret3", "sa-xy125");
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setResourceId("sa-xy125");
testServiceAccount.setDisplayName("testName");
when(client.listClusterApiKeys("testCluster")).thenReturn(Mono.just(List.of(apiKey1, apiKey2)));
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of()));
when(client.createServiceAccount("application-normalizedAppNameTest", "APP_quattro-1"))
.thenReturn(Mono.just(testServiceAccount));
when(client.createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest", "sa-xy125"))
.thenReturn(Mono.just(apiKey3));
CreateAuthenticationResult result = authenticationModule
.createApplicationAuthentication("quattro-1", "normalizedAppNameTest", new JSONObject()).get();
assertEquals("sa-xy125", result.getPublicAuthenticationData().getString("userId"));
assertFalse(result.getPublicAuthenticationData().has("numericId"));
assertEquals("someKey3", result.getPublicAuthenticationData().getString("apiKey"));
verify(client, times(1)).createServiceAccount("application-normalizedAppNameTest", "APP_quattro-1");
verify(client, times(1)).createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest",
"sa-xy125");
}
@Test
void createApplicationAuthenticationTest_reuseServiceAccIfExists() throws ExecutionException, InterruptedException {
ApiKeySpec apiKey1 = newApiKey("someKey1", "someSecret1", "sa-xy123");
ApiKeySpec apiKey2 = newApiKey("someKey2", "someSecret2", "sa-xy124");
ApiKeySpec apiKey3 = newApiKey("someKey3", "someSecret3", "sa-xy125");
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setResourceId("sa-xy125");
testServiceAccount.setDisplayName("testName");
testServiceAccount.setDescription("APP_quattro-1");
when(client.listClusterApiKeys("testCluster")).thenReturn(Mono.just(List.of(apiKey1, apiKey2)));
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of(testServiceAccount)));
when(client.createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest", "sa-xy125"))
.thenReturn(Mono.just(apiKey3));
authenticationModule.createApplicationAuthentication("quattro-1", "normalizedAppNameTest", new JSONObject())
.get();
verify(client, times(0)).createServiceAccount(nullable(String.class), nullable(String.class));
verify(client, times(1)).createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest",
"sa-xy125");
}
@Test
void createApplicationAuthenticationTest_queryNumericId() throws ExecutionException, InterruptedException {
useIdCompatMode();
ApiKeySpec apiKey1 = newApiKey("someKey1", "someSecret1", "sa-xy123");
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setResourceId("sa-xy123");
testServiceAccount.setDisplayName("testName");
testServiceAccount.setDescription("APP_quattro-1");
when(client.listClusterApiKeys("testCluster")).thenReturn(Mono.just(List.of()));
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of(testServiceAccount)));
when(client.createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest", "sa-xy123"))
.thenReturn(Mono.just(apiKey1));
Map<String, String> internalIdMapping = Map.of("sa-xy123", "12345", "sa-xy125", "12346");
when(client.getServiceAccountInternalIds()).thenReturn(Mono.just(internalIdMapping));
CreateAuthenticationResult result = authenticationModule
.createApplicationAuthentication("quattro-1", "normalizedAppNameTest", new JSONObject()).get();
assertEquals("sa-xy123", result.getPublicAuthenticationData().getString("userId"));
assertEquals("12345", result.getPublicAuthenticationData().getString("numericId"));
verify(client, times(0)).createServiceAccount(nullable(String.class), nullable(String.class));
verify(client, times(1)).createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest",
"sa-xy123");
verify(client, times(1)).getServiceAccountInternalIds();
}
@Test
void updateApplicationAuthenticationTest() throws ExecutionException, InterruptedException {
ApiKeySpec apiKey1 = newApiKey("someKey1", "someSecret1", "sa-xy123");
ApiKeySpec apiKey2 = newApiKey("someKey2", "someSecret2", "sa-xy124");
ApiKeySpec apiKey3 = newApiKey("someKey3", "someSecret3", "sa-xy123");
ApplicationMetadata app = new ApplicationMetadata();
app.setApplicationId("quattro-1");
app.setAuthenticationJson(toAuthJson(Map.of("userId", "sa-xy123", "apiKey", "someKey1")));
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setResourceId("sa-xy123");
testServiceAccount.setDisplayName("testName");
testServiceAccount.setDescription("APP_quattro-1");
when(client.listClusterApiKeys("testCluster")).thenReturn(Mono.just(List.of(apiKey1, apiKey2)));
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of(testServiceAccount)));
when(client.deleteApiKey(apiKey1)).thenReturn(Mono.just(true));
when(client.createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest", "sa-xy123"))
.thenReturn(Mono.just(apiKey3));
String auth = app.getAuthenticationJson();
authenticationModule.updateApplicationAuthentication("quattro-1", "normalizedAppNameTest", new JSONObject(),
new JSONObject(auth)).get();
verify(client).deleteApiKey(apiKey1);
verify(client, times(0)).createServiceAccount(nullable(String.class), nullable(String.class));
verify(client, times(1)).createApiKey("testEnv", "testCluster", "Application normalizedAppNameTest",
"sa-xy123");
}
@Test
void testLookupNumericId_positive() {
useIdCompatMode();
JSONObject authData = new JSONObject(Map.of("userId", "sa-xy125", "apiKey", "ABC123"));
Map<String, String> internalIdMapping = Map.of("sa-xy123", "12399", "sa-xy125", "12345");
when(client.getServiceAccountInternalIds()).thenReturn(Mono.just(internalIdMapping));
assertEquals("User:12345", authenticationModule.extractKafkaUserName(authData));
}
@Test
void testLookupNumericId_noLookup_noCompatMode() {
// idCompatMode is by default false in config!
JSONObject authData = new JSONObject(Map.of("userId", "sa-xy125", "apiKey", "ABC123"));
Map<String, String> internalIdMapping = Map.of("sa-xy123", "12399", "sa-xy125", "12345");
when(client.getServiceAccountInternalIds()).thenReturn(Mono.just(internalIdMapping));
assertEquals("User:sa-xy125", authenticationModule.extractKafkaUserName(authData));
verify(client, times(0)).getServiceAccountInternalIds();
}
@Test
void testLookupNumericId_noLookup_numericUserId() {
useIdCompatMode();
JSONObject authData = new JSONObject(Map.of("userId", "12345", "apiKey", "ABC123"));
Map<String, String> internalIdMapping = Map.of("sa-xy123", "12399", "sa-xy125", "12346");
when(client.getServiceAccountInternalIds()).thenReturn(Mono.just(internalIdMapping));
assertEquals("User:12345", authenticationModule.extractKafkaUserName(authData));
verify(client, times(0)).getServiceAccountInternalIds();
}
@Test
void testLookupNumericId_noLookup_explicitNumericId() {
useIdCompatMode();
JSONObject authData = new JSONObject(Map.of("userId", "sa-xy123", "apiKey", "ABC123", "numericId", "12345"));
Map<String, String> internalIdMapping = Map.of("sa-xy123", "12399", "sa-xy125", "12346");
when(client.getServiceAccountInternalIds()).thenReturn(Mono.just(internalIdMapping));
assertEquals("User:12345", authenticationModule.extractKafkaUserName(authData));
verify(client, times(0)).getServiceAccountInternalIds();
}
@Test
void testLookupNumericId_noUseOfNumericId() {
// idCompatMode is by default false in config!
JSONObject authData = new JSONObject(Map.of("userId", "sa-xy123", "apiKey", "ABC123", "numericId", "12345"));
Map<String, String> internalIdMapping = Map.of("sa-xy123", "12399", "sa-xy125", "12346");
when(client.getServiceAccountInternalIds()).thenReturn(Mono.just(internalIdMapping));
assertEquals("User:sa-xy123", authenticationModule.extractKafkaUserName(authData));
verify(client, times(0)).getServiceAccountInternalIds();
}
@Test
void testDevAuth_positive() throws Exception {
Instant now = Instant.now();
// make sure that there really is slight delay
Thread.sleep(100);
enableDeveloperAuthentication();
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setDisplayName("Test Display Name");
testServiceAccount.setResourceId("sa-xy123");
testServiceAccount.setDescription("Test description");
ApiKeySpec apiKey = newApiKey("TESTKEY", "testSecret", "sa-xy123");
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of()));
when(client.createServiceAccount("developer-test-user", "DEV_test-user@test.demo"))
.thenReturn(Mono.just(testServiceAccount));
when(client.createApiKey("testEnv", "testCluster", "Developer test-user@test.demo", "sa-xy123"))
.thenReturn(Mono.just(apiKey));
CreateAuthenticationResult result = authenticationModule
.createDeveloperAuthentication("test-user@test.demo", new JSONObject()).get();
JSONObject authData = result.getPublicAuthenticationData();
Instant expiresAt = ConfluentCloudAuthUtil.getExpiresAt(authData.toString());
if (expiresAt == null) {
fail("No expiry date found in created developer authentication");
return;
}
assertTrue(now.plus(7, ChronoUnit.DAYS).isBefore(expiresAt));
assertTrue(now.plus(8, ChronoUnit.DAYS).isAfter(expiresAt));
assertEquals("sa-xy123", authData.getString("userId"));
assertEquals("TESTKEY", authData.getString("apiKey"));
verify(client, times(1)).createServiceAccount("developer-test-user", "DEV_test-user@test.demo");
verify(client, times(1)).createApiKey("testEnv", "testCluster", "Developer test-user@test.demo", "sa-xy123");
}
@Test
void testDevAuth_notEnabled() throws Exception {
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setDisplayName("Test Display Name");
testServiceAccount.setResourceId("sa-xy123");
testServiceAccount.setDescription("Test description");
ApiKeySpec apiKey = newApiKey("TESTKEY", "testSecret", "sa-xy123");
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of()));
when(client.createServiceAccount("developer-test-user", "DEV_test-user@test.demo"))
.thenReturn(Mono.just(testServiceAccount));
when(client.createApiKey("testEnv", "testCluster", "Developer test-user@test.demo", "sa-xy123"))
.thenReturn(Mono.just(apiKey));
try {
authenticationModule.createDeveloperAuthentication("test-user@test.demo", new JSONObject()).get();
fail("Expected an exception when developer auth is not enabled");
}
catch (ExecutionException e) {
// OK
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
@Test
public void testShortenAppNames() throws Exception {
ServiceAccountSpec testServiceAccount = new ServiceAccountSpec();
testServiceAccount.setDisplayName("Test Display Name");
testServiceAccount.setResourceId("sa-xy123");
testServiceAccount.setDescription("Test description");
ApiKeySpec apiKey = newApiKey("TESTKEY", "testSecret", "sa-xy123");
when(client.listServiceAccounts()).thenReturn(Mono.just(List.of()));
ArgumentCaptor<String> displayNameCaptor = ArgumentCaptor.forClass(String.class);
when(client.createServiceAccount(displayNameCaptor.capture(), anyString()))
.thenReturn(Mono.just(testServiceAccount));
when(client.createApiKey(anyString(), anyString(), anyString(), anyString())).thenReturn(Mono.just(apiKey));
authenticationModule.createApplicationAuthentication("app-1",
"A_very_long_strange_application_name_which_likely_exceeds_50_characters_whoever_names_such_applications",
new JSONObject()).get();
assertTrue(displayNameCaptor.getValue().length() <= 64);
}
}
| 19,669 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
ConfluentCloudApiClientTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/ccloud/ConfluentCloudApiClientTest.java | package com.hermesworld.ais.galapagos.ccloud;
import com.github.tomakehurst.wiremock.client.MappingBuilder;
import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder;
import com.github.tomakehurst.wiremock.client.WireMock;
import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo;
import com.github.tomakehurst.wiremock.junit5.WireMockTest;
import com.github.tomakehurst.wiremock.matching.*;
import com.github.tomakehurst.wiremock.verification.NearMiss;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ApiKeySpec;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ConfluentApiException;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ConfluentCloudApiClient;
import com.hermesworld.ais.galapagos.ccloud.apiclient.ServiceAccountSpec;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.util.StreamUtils;
import reactor.test.StepVerifier;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.Consumer;
import static com.github.tomakehurst.wiremock.client.WireMock.*;
import static org.junit.jupiter.api.Assertions.*;
@WireMockTest
class ConfluentCloudApiClientTest {
private static final String SERVICE_ACCOUNTS_ENDPOINT = "/iam/v2/service-accounts";
private static final String API_KEYS_ENDPOINT = "/iam/v2/api-keys";
private String baseUrl;
private WireMock wireMock;
@BeforeEach
void init(WireMockRuntimeInfo info) {
wireMock = info.getWireMock();
baseUrl = "http://localhost:%s".formatted(info.getHttpPort());
}
@AfterEach
void checkWireMockStatus(WireMockRuntimeInfo info) {
WireMock wireMock = info.getWireMock();
wireMock.findAllUnmatchedRequests().forEach(req -> {
System.err.println("Unmatched request: " + req.getAbsoluteUrl());
List<NearMiss> nearMisses = WireMock.findNearMissesFor(req);
nearMisses.forEach(miss -> System.err.println("Potential near miss:" + miss.getDiff()));
});
}
private String readTestResource(String resourceName) throws IOException {
try (InputStream in = ConfluentCloudApiClientTest.class.getClassLoader().getResourceAsStream(resourceName)) {
if (in == null) {
throw new NoSuchElementException("Could not find test resource: " + resourceName);
}
return StreamUtils.copyToString(in, StandardCharsets.UTF_8);
}
}
private static MappingBuilder authenticatedEndpoint(String path, HttpMethod method) {
UrlPathPattern urlPattern = urlPathEqualTo(path);
MappingBuilder builder = switch (method.name()) {
case "POST" -> post(urlPattern);
case "PUT" -> put(urlPattern);
case "DELETE" -> delete(urlPattern);
case "PATCH" -> patch(urlPattern);
default -> get(urlPattern);
};
return builder.withBasicAuth("myKey", "mySecret");
}
private static MappingBuilder authenticatedEndpoint(String path) {
return authenticatedEndpoint(path, HttpMethod.GET);
}
private static MappingBuilder serviceAccountsEndpoint(HttpMethod method) {
return authenticatedEndpoint(SERVICE_ACCOUNTS_ENDPOINT, method);
}
private static MappingBuilder serviceAccountsEndpoint() {
return serviceAccountsEndpoint(HttpMethod.GET).withQueryParam("page_size", new RegexPattern("[0-9]+"));
}
private static ResponseDefinitionBuilder okForPlainJson(String jsonSource) {
return ResponseDefinitionBuilder.responseDefinition().withStatus(HttpStatus.OK.value()).withBody(jsonSource)
.withHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
}
@Test
void testListServiceAccounts() throws Exception {
wireMock.register(
serviceAccountsEndpoint().willReturn(okForPlainJson(readTestResource("ccloud/service-accounts.json"))));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
List<ServiceAccountSpec> accounts = apiClient.listServiceAccounts().block();
assertNotNull(accounts);
assertEquals(2, accounts.size());
assertEquals("service-account-one", accounts.get(0).getDisplayName());
assertEquals("sa-xy123", accounts.get(0).getResourceId());
assertEquals("service-account-two", accounts.get(1).getDisplayName());
assertEquals("sa-xy124", accounts.get(1).getResourceId());
wireMock.verifyThat(1, requestedFor(HttpMethod.GET.name(), urlPathEqualTo(SERVICE_ACCOUNTS_ENDPOINT)));
}
@Test
void testPagination() throws Exception {
wireMock.register(serviceAccountsEndpoint().willReturn(
okForPlainJson(readTestResource("ccloud/service-accounts-page1.json").replace("${baseurl}", baseUrl))));
wireMock.register(get(urlPathEqualTo("/next_page")).withBasicAuth("myKey", "mySecret")
.withQueryParam("page_token", new EqualToPattern("ABC"))
.willReturn(okForPlainJson(readTestResource("ccloud/service-accounts-page2.json"))));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
Consumer<List<ServiceAccountSpec>> verifyAccounts = accounts -> {
assertEquals(3, accounts.size());
assertEquals("service-account-one", accounts.get(0).getDisplayName());
assertEquals("sa-xy123", accounts.get(0).getResourceId());
assertEquals("service-account-two", accounts.get(1).getDisplayName());
assertEquals("sa-xy124", accounts.get(1).getResourceId());
assertEquals("service-account-three", accounts.get(2).getDisplayName());
assertEquals("sa-xy125", accounts.get(2).getResourceId());
};
StepVerifier.create(apiClient.listServiceAccounts()).assertNext(verifyAccounts).verifyComplete();
wireMock.verifyThat(1, requestedFor(HttpMethod.GET.name(), urlPathEqualTo(SERVICE_ACCOUNTS_ENDPOINT)));
wireMock.verifyThat(1, requestedFor(HttpMethod.GET.name(), urlPathEqualTo("/next_page")));
}
@Test
void testListApiKeys() throws Exception {
wireMock.register(authenticatedEndpoint(API_KEYS_ENDPOINT)
.withQueryParam("spec.resource", new EqualToPattern("lkc-mycluster"))
.willReturn(okForPlainJson(readTestResource("ccloud/api-keys.json"))));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
List<ApiKeySpec> apiKeys = apiClient.listClusterApiKeys("lkc-mycluster").block();
assertNotNull(apiKeys);
assertEquals(1, apiKeys.size());
assertEquals("ABCDEFG123456", apiKeys.get(0).getId());
assertEquals("My API Key", apiKeys.get(0).getDescription());
assertEquals("sa-xy123", apiKeys.get(0).getServiceAccountId());
assertEquals("2022-09-16T11:45:01.722675Z", apiKeys.get(0).getCreatedAt().toString());
wireMock.verifyThat(1, requestedFor(HttpMethod.GET.name(), urlPathEqualTo(API_KEYS_ENDPOINT)));
}
@Test
void testCreateServiceAccount() throws Exception {
wireMock.register(serviceAccountsEndpoint(HttpMethod.POST)
.withRequestBody(
new JsonWithPropertiesPattern(Map.of("display_name", "myaccount", "description", "mydesc")))
.willReturn(okForPlainJson(readTestResource("ccloud/service-account.json"))
.withStatus(HttpStatus.CREATED.value())));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ServiceAccountSpec spec = apiClient.createServiceAccount("myaccount", "mydesc").block();
assertNotNull(spec);
assertEquals("Created Service Account.", spec.getDescription());
assertEquals("CREATED_service_account", spec.getDisplayName());
assertEquals("sa-xy123", spec.getResourceId());
assertNull(spec.getNumericId());
wireMock.verifyThat(1, requestedFor(HttpMethod.POST.name(), urlPathEqualTo(SERVICE_ACCOUNTS_ENDPOINT)));
}
@Test
void testCreateServiceAccount_withNumericId() throws Exception {
wireMock.register(serviceAccountsEndpoint(HttpMethod.POST)
.willReturn(okForPlainJson(readTestResource("ccloud/service-account.json"))
.withStatus(HttpStatus.CREATED.value())));
wireMock.register(authenticatedEndpoint("/service_accounts")
.willReturn(okForPlainJson(readTestResource("ccloud/service-account-mapping.json"))));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", true);
ServiceAccountSpec spec = apiClient.createServiceAccount("myaccount", "mydesc").block();
assertNotNull(spec);
assertEquals("123456", spec.getNumericId());
wireMock.verifyThat(1, requestedFor(HttpMethod.POST.name(), urlPathEqualTo(SERVICE_ACCOUNTS_ENDPOINT)));
wireMock.verifyThat(1, requestedFor(HttpMethod.GET.name(), urlPathEqualTo("/service_accounts")));
}
@Test
void testCreateApiKey() throws Exception {
Map<String, String> expectedJsonProperties = Map.of("spec.display_name", "", "spec.description",
"description param", "spec.owner.id", "sa-xy123", "spec.owner.environment", "env-ab123",
"spec.resource.id", "lkc-abc123", "spec.resource.environment", "env-ab123");
wireMock.register(authenticatedEndpoint(API_KEYS_ENDPOINT, HttpMethod.POST)
.withRequestBody(new JsonWithPropertiesPattern(expectedJsonProperties))
.willReturn(okForPlainJson(readTestResource("ccloud/api-key.json"))
.withStatus(HttpStatus.ACCEPTED.value())));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ApiKeySpec spec = apiClient.createApiKey("env-ab123", "lkc-abc123", "description param", "sa-xy123").block();
assertNotNull(spec);
assertEquals("ABCDEF123456", spec.getId());
assertEquals("2022-07-22T14:48:41.966079Z", spec.getCreatedAt().toString());
assertEquals("API Key Description", spec.getDescription());
assertEquals("sa-xy123", spec.getServiceAccountId());
wireMock.verifyThat(1, requestedFor(HttpMethod.POST.name(), urlPathEqualTo(API_KEYS_ENDPOINT)));
}
@Test
void testDeleteApiKey() {
wireMock.register(
authenticatedEndpoint(API_KEYS_ENDPOINT + "/ABCDEF123456", HttpMethod.DELETE).willReturn(noContent()));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ApiKeySpec spec = new ApiKeySpec();
spec.setId("ABCDEF123456");
apiClient.deleteApiKey(spec).block();
wireMock.verifyThat(1,
requestedFor(HttpMethod.DELETE.name(), urlPathEqualTo(API_KEYS_ENDPOINT + "/ABCDEF123456")));
}
@Test
void testErrorStatusCode() {
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ApiKeySpec spec = new ApiKeySpec();
spec.setId("ABCDEF123456");
StepVerifier.create(apiClient.deleteApiKey(spec)).expectErrorMatches(t -> (t instanceof ConfluentApiException)
&& t.getMessage().startsWith("Could not delete API key: Server returned 404 for ")).verify();
wireMock.resetRequests();
}
@Test
void testErrorMessage_singleError() {
JSONObject errorObj = new JSONObject(Map.of("error", "something went wrong"));
wireMock.register(authenticatedEndpoint(API_KEYS_ENDPOINT + "/ABCDEF123456", HttpMethod.DELETE)
.willReturn(badRequest().withBody(errorObj.toString()).withHeader(HttpHeaders.CONTENT_TYPE,
MediaType.APPLICATION_JSON_VALUE)));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ApiKeySpec spec = new ApiKeySpec();
spec.setId("ABCDEF123456");
StepVerifier.create(apiClient.deleteApiKey(spec)).expectErrorMatches(t -> (t instanceof ConfluentApiException)
&& t.getMessage().equals("Could not delete API key: something went wrong")).verify();
}
@Test
void testErrorMessage_errorsArray() {
JSONObject errorObj = new JSONObject(Map.of("detail", "something went wrong"));
JSONObject errorObj2 = new JSONObject(Map.of("detail", "all is broken"));
JSONArray errors = new JSONArray();
errors.put(errorObj);
errors.put(errorObj2);
JSONObject body = new JSONObject(Map.of("errors", errors));
wireMock.register(authenticatedEndpoint(API_KEYS_ENDPOINT + "/ABCDEF123456", HttpMethod.DELETE)
.willReturn(badRequest().withBody(body.toString()).withHeader(HttpHeaders.CONTENT_TYPE,
MediaType.APPLICATION_JSON_VALUE)));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ApiKeySpec spec = new ApiKeySpec();
spec.setId("ABCDEF123456");
StepVerifier.create(apiClient.deleteApiKey(spec)).expectErrorMatches(t -> (t instanceof ConfluentApiException)
&& t.getMessage().equals("Could not delete API key: something went wrong")).verify();
}
@Test
void testError_textOnlyResponse() {
wireMock.register(authenticatedEndpoint(API_KEYS_ENDPOINT + "/ABCDEF123456", HttpMethod.DELETE)
.willReturn(badRequest().withBody("This is your friendly error message in text only.")
.withHeader(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_PLAIN_VALUE)));
ConfluentCloudApiClient apiClient = new ConfluentCloudApiClient(baseUrl, "myKey", "mySecret", false);
ApiKeySpec spec = new ApiKeySpec();
spec.setId("ABCDEF123456");
StepVerifier.create(apiClient.deleteApiKey(spec)).expectErrorMatches(t -> (t instanceof ConfluentApiException)
&& t.getMessage().startsWith("Could not delete API key: Server returned 400 for ")).verify();
}
private static class JsonWithPropertiesPattern extends ContentPattern<byte[]> {
private final Map<String, String> propertiesAndValues;
public JsonWithPropertiesPattern(Map<String, String> propertiesAndValues) {
super(propertiesAndValues.toString().getBytes(StandardCharsets.UTF_8));
this.propertiesAndValues = propertiesAndValues;
}
@Override
public String getName() {
return "json-with-properties";
}
@Override
public String getExpected() {
return propertiesAndValues.toString();
}
private boolean containsProperty(JSONObject obj, String propertyName, String expectedValue) {
if (propertyName.contains(".")) {
String pn = propertyName.split("\\.")[0];
if (!obj.has(pn)) {
return false;
}
return containsProperty(obj.getJSONObject(pn), propertyName.substring(propertyName.indexOf('.') + 1),
expectedValue);
}
return obj.has(propertyName) && Objects.equals(obj.getString(propertyName), expectedValue);
}
@Override
public MatchResult match(byte[] value) {
try {
JSONObject obj = new JSONObject(new String(value, StandardCharsets.UTF_8));
int matchCounter = 0;
for (Map.Entry<String, String> pairs : propertiesAndValues.entrySet()) {
if (containsProperty(obj, pairs.getKey(), pairs.getValue())) {
matchCounter++;
}
}
if (matchCounter == propertiesAndValues.size()) {
return MatchResult.exactMatch();
}
if (matchCounter == 0) {
return MatchResult.noMatch();
}
return MatchResult.partialMatch(propertiesAndValues.size() - matchCounter);
}
catch (JSONException e) {
return MatchResult.noMatch();
}
}
}
}
| 16,877 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
StagingImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/staging/impl/StagingImplTest.java | package com.hermesworld.ais.galapagos.staging.impl;
import com.hermesworld.ais.galapagos.changes.ApplicableChange;
import com.hermesworld.ais.galapagos.changes.Change;
import com.hermesworld.ais.galapagos.changes.ChangeType;
import com.hermesworld.ais.galapagos.kafka.TopicCreateParams;
import com.hermesworld.ais.galapagos.subscriptions.SubscriptionMetadata;
import com.hermesworld.ais.galapagos.subscriptions.service.SubscriptionService;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.TopicService;
import com.hermesworld.ais.galapagos.util.JsonUtil;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class StagingImplTest {
@Test
void testSubscriptionIdentity() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-2");
topic1.setType(TopicType.EVENTS);
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
when(topicService.listTopics("int")).thenReturn(List.of(topic1));
SubscriptionService subscriptionService = mock(SubscriptionService.class);
SubscriptionMetadata sub1 = new SubscriptionMetadata();
sub1.setClientApplicationId("app-1");
sub1.setId("123");
sub1.setTopicName("topic-1");
when(subscriptionService.getSubscriptionsOfApplication("dev", "app-1", false)).thenReturn(List.of(sub1));
StagingImpl staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
// subscription must be staged
List<? extends Change> changes = staging.getChanges();
assertEquals(1, changes.size());
Change change = changes.get(0);
assertEquals(ChangeType.TOPIC_SUBSCRIBED, change.getChangeType());
// now, let's assume subscription exists on INT, then it must not be staged again
// note that the ID may be different on different environments!
SubscriptionMetadata sub2 = new SubscriptionMetadata();
sub2.setClientApplicationId("app-1");
sub2.setId("456");
sub2.setTopicName("topic-1");
when(subscriptionService.getSubscriptionsOfApplication("int", "app-1", false)).thenReturn(List.of(sub2));
staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
changes = staging.getChanges();
assertEquals(0, changes.size());
}
@Test
@DisplayName("should stage new added producer to next stage")
void testProducerAddStaging() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topic1.setProducers(List.of("producer1"));
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("topic-1");
topic2.setOwnerApplicationId("app-1");
topic2.setType(TopicType.EVENTS);
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
when(topicService.listTopics("int")).thenReturn(List.of(topic2));
StagingImpl staging = StagingImpl
.build("app-1", "dev", "int", null, topicService, mock(SubscriptionService.class)).get();
List<? extends Change> changes = staging.getChanges();
assertEquals(1, staging.getChanges().size());
Change change = changes.get(0);
assertEquals(ChangeType.TOPIC_PRODUCER_APPLICATION_ADDED, change.getChangeType());
topic2.setProducers(List.of("producer1"));
staging = StagingImpl.build("app-1", "dev", "int", null, topicService, mock(SubscriptionService.class)).get();
changes = staging.getChanges();
assertEquals(0, changes.size());
}
@Test
@DisplayName("should stage removed producer to next stage")
void testProducerRemoveStaging() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topic1.setProducers(List.of("producer1"));
TopicMetadata topic2 = new TopicMetadata();
topic2.setName("topic-1");
topic2.setOwnerApplicationId("app-1");
topic2.setType(TopicType.EVENTS);
topic2.setProducers(List.of("producer1"));
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
when(topicService.listTopics("int")).thenReturn(List.of(topic2));
List<String> producers = new ArrayList<>(topic1.getProducers());
producers.remove("producer1");
topic1.setProducers(producers);
StagingImpl staging = StagingImpl
.build("app-1", "dev", "int", null, topicService, mock(SubscriptionService.class)).get();
List<? extends Change> changes = staging.getChanges();
assertEquals(1, staging.getChanges().size());
Change change = changes.get(0);
assertEquals(ChangeType.TOPIC_PRODUCER_APPLICATION_REMOVED, change.getChangeType());
}
@Test
void testSchemaIdentity() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
when(topicService.listTopics("int")).thenReturn(List.of(topic1));
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("999");
schema1.setSchemaVersion(1);
schema1.setCreatedBy("test");
schema1.setTopicName("topic-1");
when(topicService.getTopicSchemaVersions("dev", "topic-1")).thenReturn(List.of(schema1));
SubscriptionService subscriptionService = mock(SubscriptionService.class);
StagingImpl staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
// schema must be staged
List<? extends Change> changes = staging.getChanges();
assertEquals(1, changes.size());
Change change = changes.get(0);
assertEquals(ChangeType.TOPIC_SCHEMA_VERSION_PUBLISHED, change.getChangeType());
// now, let's assume schema exists on INT, then it must not be staged again
// note that the ID may be different on different environments!
SchemaMetadata schema2 = new SchemaMetadata();
schema2.setId("000");
schema2.setCreatedBy("test");
schema2.setTopicName("topic-1");
schema2.setSchemaVersion(1);
when(topicService.getTopicSchemaVersions("int", "topic-1")).thenReturn(List.of(schema2));
staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
changes = staging.getChanges();
assertEquals(0, changes.size());
}
@Test
void testCompoundChangeForApiTopicCreation() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
when(topicService.buildTopicCreateParams("dev", "topic-1"))
.thenReturn(CompletableFuture.completedFuture(new TopicCreateParams(2, 2)));
SchemaMetadata schema1 = new SchemaMetadata();
schema1.setId("999");
schema1.setSchemaVersion(1);
schema1.setCreatedBy("test");
schema1.setTopicName("topic-1");
SchemaMetadata schema2 = new SchemaMetadata();
schema2.setId("000");
schema2.setSchemaVersion(2);
schema2.setCreatedBy("test");
schema2.setTopicName("topic-1");
when(topicService.getTopicSchemaVersions("dev", "topic-1")).thenReturn(List.of(schema1, schema2));
SubscriptionService subscriptionService = mock(SubscriptionService.class);
StagingImpl staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
// Must contain TWO changes: One compound change for creating the topic and the first schema, and another one
// for
// creating second schema
List<? extends Change> changes = staging.getChanges();
assertEquals(2, changes.size());
assertEquals(ChangeType.COMPOUND_CHANGE, changes.get(0).getChangeType());
assertEquals(ChangeType.TOPIC_SCHEMA_VERSION_PUBLISHED, changes.get(1).getChangeType());
String firstChangeJson = JsonUtil.newObjectMapper().writeValueAsString(changes.get(0));
assertTrue(firstChangeJson.contains("app-1"));
assertTrue(firstChangeJson.contains("999"));
assertFalse(firstChangeJson.contains("000"));
}
@Test
void testApiTopicWithoutSchema_fail() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
SubscriptionService subscriptionService = mock(SubscriptionService.class);
StagingImpl staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
List<? extends Change> changes = staging.getChanges();
assertEquals(1, changes.size());
try {
((ApplicableChange) changes.get(0)).applyTo(null).get();
fail("Applying create topic change expected to fail because no JSON schema published");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
assertTrue(e.getCause().getMessage().contains("schema"));
}
}
@Test
void testStageDeprecatedTopic_fail() throws Exception {
TopicService topicService = mock(TopicService.class);
TopicMetadata topic1 = new TopicMetadata();
topic1.setName("topic-1");
topic1.setOwnerApplicationId("app-1");
topic1.setType(TopicType.EVENTS);
topic1.setDeprecated(true);
when(topicService.listTopics("dev")).thenReturn(List.of(topic1));
SubscriptionService subscriptionService = mock(SubscriptionService.class);
StagingImpl staging = StagingImpl.build("app-1", "dev", "int", null, topicService, subscriptionService).get();
List<? extends Change> changes = staging.getChanges();
assertEquals(1, changes.size());
try {
((ApplicableChange) changes.get(0)).applyTo(null).get();
fail("Applying create topic change expected to fail because topic is deprecated, but succeeded.");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
assertTrue(e.getCause().getMessage().toLowerCase(Locale.US).contains("deprecated"));
}
}
}
| 11,853 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CertificateExpiryReminderRunnerIntegrationTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/certificates/reminders/impl/CertificateExpiryReminderRunnerIntegrationTest.java | package com.hermesworld.ais.galapagos.certificates.reminders.impl;
import com.hermesworld.ais.galapagos.GalapagosTestConfig;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationOwnerRequest;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.RequestState;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationConfig;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationModule;
import com.hermesworld.ais.galapagos.certificates.reminders.CertificateExpiryReminder;
import com.hermesworld.ais.galapagos.certificates.reminders.CertificateExpiryReminderService;
import com.hermesworld.ais.galapagos.certificates.reminders.ReminderType;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import jakarta.mail.MessagingException;
import jakarta.mail.internet.MimeMessage;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.autoconfigure.mail.MailHealthContributorAutoConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.context.annotation.Import;
import org.springframework.mail.javamail.JavaMailSender;
import java.io.ByteArrayInputStream;
import java.util.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.when;
/**
* This test mainly focuses on the "notification integration" part, i.e., that the mail templates do render correctly
* and the correct e-mail recipients are determined and passed to the mail engine.
*/
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.MOCK)
@EnableAutoConfiguration(exclude = { MailHealthContributorAutoConfiguration.class })
@Import(GalapagosTestConfig.class)
class CertificateExpiryReminderRunnerIntegrationTest {
@MockBean
private KafkaClusters kafkaClusters;
@MockBean
private CertificateExpiryReminderService reminderService;
@MockBean
private ApplicationsService applicationsService;
@MockBean
private JavaMailSender mailSender;
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
@Autowired
private CertificateExpiryReminderRunner runner;
private final List<MimeMessage> sentMessages = new ArrayList<>();
@BeforeEach
void initMocks() throws MessagingException {
when(kafkaClusters.getEnvironmentIds()).thenReturn(List.of("test"));
doAnswer(inv -> sentMessages.add(inv.getArgument(0))).when(mailSender).send((MimeMessage) any());
when(mailSender.createMimeMessage()).thenReturn(new MimeMessage(null, new ByteArrayInputStream(new byte[0])));
ApplicationOwnerRequest request = new ApplicationOwnerRequest();
request.setApplicationId("123");
request.setNotificationEmailAddress("test@test.com");
request.setState(RequestState.APPROVED);
when(applicationsService.getAllApplicationOwnerRequests()).thenReturn(List.of(request));
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(new JSONObject(Map.of("expiresAt", "2020-11-10T10:20:00.000Z")).toString());
when(applicationsService.getApplicationMetadata("test", "123")).thenReturn(Optional.of(metadata));
when(kafkaClusters.getAuthenticationModule("test")).thenReturn(
Optional.of(new CertificatesAuthenticationModule("test", new CertificatesAuthenticationConfig())));
}
@Test
void testSendNotification_threeMonths_success() throws Exception {
CertificateExpiryReminder reminder = new CertificateExpiryReminder("123", "test", ReminderType.THREE_MONTHS);
when(reminderService.calculateDueCertificateReminders()).thenReturn(List.of(reminder));
runner.checkCertificatesForExpiration();
assertEquals(1, sentMessages.size());
MimeMessage msg = sentMessages.get(0);
assertTrue(Arrays.stream(msg.getAllRecipients()).allMatch(addr -> "test@test.com".equals(addr.toString())));
}
@Test
void testSendNotification_oneMonth_success() throws Exception {
CertificateExpiryReminder reminder = new CertificateExpiryReminder("123", "test", ReminderType.ONE_MONTH);
when(reminderService.calculateDueCertificateReminders()).thenReturn(List.of(reminder));
runner.checkCertificatesForExpiration();
assertEquals(1, sentMessages.size());
MimeMessage msg = sentMessages.get(0);
assertTrue(Arrays.stream(msg.getAllRecipients()).allMatch(addr -> "test@test.com".equals(addr.toString())));
}
@Test
void testSendNotification_oneWeek_success() throws Exception {
CertificateExpiryReminder reminder = new CertificateExpiryReminder("123", "test", ReminderType.ONE_WEEK);
when(reminderService.calculateDueCertificateReminders()).thenReturn(List.of(reminder));
runner.checkCertificatesForExpiration();
assertEquals(1, sentMessages.size());
MimeMessage msg = sentMessages.get(0);
assertTrue(Arrays.stream(msg.getAllRecipients()).allMatch(addr -> "test@test.com".equals(addr.toString())));
}
}
| 5,677 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CertificateExpiryReminderServiceTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/certificates/reminders/impl/CertificateExpiryReminderServiceTest.java | package com.hermesworld.ais.galapagos.certificates.reminders.impl;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationConfig;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationModule;
import com.hermesworld.ais.galapagos.certificates.reminders.CertificateExpiryReminder;
import com.hermesworld.ais.galapagos.certificates.reminders.ReminderType;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.kafka.impl.TopicBasedRepositoryMock;
import org.json.JSONObject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.*;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class CertificateExpiryReminderServiceTest {
private KafkaClusters clusters;
private ApplicationsService applicationsService;
private CertificateExpiryReminderServiceImpl service;
private final TopicBasedRepositoryMock<ReminderMetadata> reminderRepository = new TopicBasedRepositoryMock<>();
@BeforeEach
void initClusters() {
clusters = mock(KafkaClusters.class);
KafkaCluster cluster = mock(KafkaCluster.class);
applicationsService = mock(ApplicationsService.class);
when(cluster.getId()).thenReturn("test");
KafkaEnvironmentConfig envMeta = mock(KafkaEnvironmentConfig.class);
when(envMeta.getAuthenticationMode()).thenReturn("certificates");
when(clusters.getEnvironmentIds()).thenReturn(List.of("test"));
when(clusters.getEnvironment("test")).thenReturn(Optional.of(cluster));
when(clusters.getEnvironments()).thenCallRealMethod();
when(clusters.getEnvironmentMetadata("test")).thenReturn(Optional.of(envMeta));
CertificatesAuthenticationConfig certConfig = new CertificatesAuthenticationConfig();
when(clusters.getAuthenticationModule("test"))
.thenReturn(Optional.of(new CertificatesAuthenticationModule("test", certConfig)));
when(cluster.getRepository("reminders", ReminderMetadata.class)).thenReturn(reminderRepository);
service = new CertificateExpiryReminderServiceImpl(clusters, applicationsService);
}
@Test
void testNoReminder() {
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 365));
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(List.of(metadata));
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertTrue(reminders.isEmpty());
}
@Test
void testSimpleCase() {
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 40));
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(List.of(metadata));
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertEquals(1, reminders.size());
assertEquals("123", reminders.get(0).getApplicationId());
assertEquals("test", reminders.get(0).getEnvironmentId());
assertEquals(ReminderType.THREE_MONTHS, reminders.get(0).getReminderType());
}
@Test
void testMultipleCallsWithoutMarkMustReturnSameReminders() {
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 40));
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(List.of(metadata));
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertEquals(1, reminders.size());
// no call to markReminderSentOut - same reminder must still be contained
reminders = service.calculateDueCertificateReminders();
assertEquals(1, reminders.size());
}
@Test
void testSimpleMark() {
List<ApplicationMetadata> applications = new ArrayList<>();
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 40));
applications.add(metadata);
metadata = new ApplicationMetadata();
metadata.setApplicationId("456");
metadata.setAuthenticationJson(authJson("CN=def", 10));
applications.add(metadata);
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(applications);
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertEquals(2, reminders.size());
CertificateExpiryReminder reminder = reminders.get(0);
service.markReminderSentOut(reminder);
reminders = service.calculateDueCertificateReminders();
assertEquals(1, reminders.size());
assertNotEquals(reminder.getApplicationId(), reminders.get(0).getApplicationId());
}
@Test
void testShortTimeAlreadySentShouldNotSendLongerTimeReminder() throws Exception {
List<ApplicationMetadata> applications = new ArrayList<>();
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 5));
applications.add(metadata);
ReminderMetadata shortReminder = new ReminderMetadata();
shortReminder.setApplicationId("123");
shortReminder.setReminderType(ReminderType.ONE_WEEK);
shortReminder.setReminderId("1");
reminderRepository.save(shortReminder).get();
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(applications);
// no reminder for one month or three months should be sent if one_week has already been sent...
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertEquals(0, reminders.size());
}
@Test
void testMultipleEnvironmentsWithExpiredEach() {
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 5));
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(List.of(metadata));
KafkaCluster env2 = mock(KafkaCluster.class);
when(env2.getId()).thenReturn("test2");
when(env2.getRepository("reminders", ReminderMetadata.class)).thenReturn(new TopicBasedRepositoryMock<>());
when(clusters.getEnvironment("test2")).thenReturn(Optional.of(env2));
when(clusters.getEnvironmentIds()).thenReturn(Arrays.asList("test", "test2"));
KafkaEnvironmentConfig envMeta = mock(KafkaEnvironmentConfig.class);
when(envMeta.getAuthenticationMode()).thenReturn("certificates");
when(clusters.getEnvironmentMetadata("test2")).thenReturn(Optional.of(envMeta));
CertificatesAuthenticationConfig certConfig = new CertificatesAuthenticationConfig();
when(clusters.getAuthenticationModule("test2"))
.thenReturn(Optional.of(new CertificatesAuthenticationModule("test2", certConfig)));
metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 40));
when(applicationsService.getAllApplicationMetadata("test2")).thenReturn(List.of(metadata));
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertEquals(2, reminders.size());
CertificateExpiryReminder rem1 = reminders.get(0).getEnvironmentId().equals("test") ? reminders.get(0)
: reminders.get(1);
CertificateExpiryReminder rem2 = reminders.get(0).getEnvironmentId().equals("test") ? reminders.get(1)
: reminders.get(0);
assertEquals("test", rem1.getEnvironmentId());
assertEquals(ReminderType.ONE_WEEK, rem1.getReminderType());
assertEquals("test2", rem2.getEnvironmentId());
assertEquals(ReminderType.THREE_MONTHS, rem2.getReminderType());
}
@Test
void testMultipleEnvironmentsWithOnlyOneExpired() {
List<ApplicationMetadata> applications = new ArrayList<>();
ApplicationMetadata metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 5));
applications.add(metadata);
when(applicationsService.getAllApplicationMetadata("test")).thenReturn(applications);
KafkaCluster env2 = mock(KafkaCluster.class);
when(env2.getId()).thenReturn("test2");
when(env2.getRepository("reminders", ReminderMetadata.class)).thenReturn(new TopicBasedRepositoryMock<>());
when(clusters.getEnvironment("test2")).thenReturn(Optional.of(env2));
when(clusters.getEnvironmentIds()).thenReturn(Arrays.asList("test", "test2"));
applications = new ArrayList<>();
metadata = new ApplicationMetadata();
metadata.setApplicationId("123");
metadata.setAuthenticationJson(authJson("CN=abc", 120));
applications.add(metadata);
when(applicationsService.getAllApplicationMetadata("test2")).thenReturn(applications);
List<CertificateExpiryReminder> reminders = service.calculateDueCertificateReminders();
assertEquals(1, reminders.size());
CertificateExpiryReminder rem = reminders.get(0);
assertEquals("test", rem.getEnvironmentId());
assertEquals(ReminderType.ONE_WEEK, rem.getReminderType());
}
private static String authJson(String dn, int daysFromNow) {
return new JSONObject(
Map.of("dn", dn, "expiresAt", Instant.now().plus(daysFromNow, ChronoUnit.DAYS).toString())).toString();
}
}
| 10,336 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
CaManagerImplTest.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/test/java/com/hermesworld/ais/galapagos/certificates/impl/CaManagerImplTest.java | package com.hermesworld.ais.galapagos.certificates.impl;
import com.hermesworld.ais.galapagos.certificates.auth.CertificatesAuthenticationConfig;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.core.io.ClassPathResource;
import org.springframework.util.StreamUtils;
import java.nio.charset.StandardCharsets;
import java.security.Security;
import java.security.cert.CertificateException;
import java.security.cert.CertificateParsingException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import static org.junit.jupiter.api.Assertions.*;
class CaManagerImplTest {
private final static String testAppId = "four";
private final static String testAppName = "Quattro";
private final static String workdir = "target/temp-certificates";
private CertificatesAuthenticationConfig authConfig;
@BeforeEach
void init() {
Security.addProvider(new BouncyCastleProvider());
authConfig = mockAuthConfig();
}
@Test
void testCreateApplicationFromCsrWithValidCn() throws Exception {
String testCsrData = StreamUtils.copyToString(
new ClassPathResource("/certificates/test_quattroValidCn.csr").getInputStream(),
StandardCharsets.UTF_8);
CaManagerImpl testCaManagerImpl = new CaManagerImpl("test", authConfig);
CompletableFuture<CertificateSignResult> future = testCaManagerImpl
.createApplicationCertificateFromCsr(testAppId, testCsrData, testAppName);
CertificateSignResult result = future.get();
assertFalse(future.isCompletedExceptionally());
assertFalse(future.isCancelled());
assertNotNull(result.getDn());
}
@Test
void testCreateApplicationFromCsrWithInvalidCn() throws Exception {
String testCsrData = StreamUtils.copyToString(
new ClassPathResource("/certificates/test_quattroInvalidCn.csr").getInputStream(),
StandardCharsets.UTF_8);
CaManagerImpl testCaManagerImpl = new CaManagerImpl("test", authConfig);
try {
testCaManagerImpl.createApplicationCertificateFromCsr(testAppId, testCsrData, testAppName).get();
fail("Expected exception has not been thrown");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof CertificateParsingException);
}
}
@Test
void testCreateApplicationFromCsrWithInvalidAppId() throws Exception {
String testCsrData = StreamUtils.copyToString(
new ClassPathResource("/certificates/test_quattroInvalidAppId.csr").getInputStream(),
StandardCharsets.UTF_8);
CaManagerImpl testCaManagerImpl = new CaManagerImpl("test", authConfig);
try {
testCaManagerImpl.createApplicationCertificateFromCsr(testAppId, testCsrData, testAppName).get();
fail("Expected exception has not been thrown");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof CertificateParsingException);
}
}
@Test
void testCreateApplicationFromInvalidCsr() throws Exception {
CaManagerImpl testCaManagerImpl = new CaManagerImpl("test", authConfig);
try {
testCaManagerImpl.createApplicationCertificateFromCsr(testAppId, "testCsrData", testAppName).get();
fail("Expected exception has not been thrown");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof CertificateException);
}
}
@Test
void testExtendCertificate() throws Exception {
CaManagerImpl testCaManagerImpl = new CaManagerImpl("test", authConfig);
String testCsrData = StreamUtils.copyToString(
new ClassPathResource("/certificates/test_quattroExtend.csr").getInputStream(), StandardCharsets.UTF_8);
CompletableFuture<CertificateSignResult> future = testCaManagerImpl
.extendApplicationCertificate("CN=quattro,OU=certification_12345", testCsrData);
CertificateSignResult result = future.get();
assertEquals("CN=quattro,OU=certification_12345", result.getDn());
// to be VERY sure, also inspect certificate (note that toString() output is slightly different)
assertEquals("CN=quattro, OU=certification_12345",
result.getCertificate().getSubjectX500Principal().toString());
}
@Test
void testExtendCertificate_wrongDn() throws Exception {
CaManagerImpl testCaManagerImpl = new CaManagerImpl("test", authConfig);
String testCsrData = StreamUtils.copyToString(
new ClassPathResource("/certificates/test_quattroExtend.csr").getInputStream(), StandardCharsets.UTF_8);
try {
testCaManagerImpl.extendApplicationCertificate("CN=quattro", testCsrData).get();
fail("Expected exception has not been thrown");
}
catch (ExecutionException e) {
assertTrue(e.getCause() instanceof CertificateException);
}
}
private CertificatesAuthenticationConfig mockAuthConfig() {
CertificatesAuthenticationConfig config = new CertificatesAuthenticationConfig();
config.setClientDn("CN=KafkaAdmin");
config.setCaCertificateFile(new ClassPathResource("/certificates/ca.cer"));
config.setCaKeyFile(new ClassPathResource("/certificates/ca.key"));
config.setCertificatesWorkdir(workdir);
return config;
}
}
| 5,631 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
HomeController.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/HomeController.java | package com.hermesworld.ais.galapagos;
import jakarta.servlet.http.HttpServletRequest;
import org.springframework.stereotype.Controller;
import org.springframework.util.ObjectUtils;
import org.springframework.web.bind.annotation.GetMapping;
/**
* Controller for the Angular Frontend. Redirects (forwards) all "virtual" paths (routes) of the Angular application to
* the <code>index.html</code> page.
*
* @author AlbrechtFlo
*
*/
@Controller
public class HomeController {
/**
* Forwards all calls to an Angular route to the <code>index.html</code> page. Note: If new routes are added to the
* frontend, you will have to add them here as part of the mapping as well.
*
* @return Forward command to the <code>index.html</code> page.
*/
@GetMapping({ "/app/applications", "/app/admin", "/app/topics", "/app/topics/**", "/app/dashboard",
"/app/createtopic", "/app/user-settings" })
public String app(HttpServletRequest request) {
if (!ObjectUtils.isEmpty(request.getQueryString())) {
return "forward:/app/index.html?" + request.getQueryString();
}
return "forward:/app/index.html";
}
@GetMapping("/app")
public String appRoot() {
return "redirect:/app/dashboard";
}
@GetMapping("/")
public String root() {
return "redirect:/app/dashboard";
}
}
| 1,380 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
GalapagosApplication.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/GalapagosApplication.java | package com.hermesworld.ais.galapagos;
import java.security.Security;
import java.util.List;
import com.hermesworld.ais.galapagos.adminjobs.AdminJob;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.ApplicationArguments;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.context.ApplicationContext;
import org.springframework.scheduling.annotation.EnableScheduling;
@ServletComponentScan
@SpringBootApplication
@EnableScheduling
public class GalapagosApplication implements ApplicationRunner {
private static final String ADMIN_JOB_OPTION_PREFIX = "galapagos.jobs.";
@Autowired
private List<AdminJob> adminJobs;
@Autowired
private ApplicationContext applicationContext;
public static void main(String[] args) {
Security.setProperty("crypto.policy", "unlimited");
Security.addProvider(new BouncyCastleProvider());
SpringApplication.run(GalapagosApplication.class, args);
}
@Override
public void run(ApplicationArguments args) throws Exception {
boolean exit = false;
for (String optionName : args.getOptionNames()) {
if (optionName.startsWith(ADMIN_JOB_OPTION_PREFIX)) {
String jobName = optionName.substring(ADMIN_JOB_OPTION_PREFIX.length());
AdminJob job = adminJobs.stream().filter(j -> jobName.equals(j.getJobName())).findFirst().orElseThrow(
() -> new IllegalArgumentException("Unknown Galapagos Admin job type: " + optionName));
try {
job.run(args);
exit = true;
}
catch (Throwable t) {
t.printStackTrace();
SpringApplication.exit(applicationContext, () -> 1);
}
}
}
if (exit) {
SpringApplication.exit(applicationContext, () -> 0);
}
}
}
| 2,213 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicInUseException.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/TopicInUseException.java | package com.hermesworld.ais.galapagos.topics;
public class TopicInUseException extends Exception {
private static final long serialVersionUID = 6487252851931266201L;
public TopicInUseException(String message, Throwable cause) {
super(message, cause);
}
public TopicInUseException(String message) {
super(message);
}
public TopicInUseException(Throwable cause) {
super(cause);
}
}
| 438 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
SchemaMetadata.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/SchemaMetadata.java | package com.hermesworld.ais.galapagos.topics;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.Getter;
import lombok.Setter;
import java.time.ZonedDateTime;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonSerialize
@Getter
@Setter
public class SchemaMetadata implements HasKey {
private String id;
private String topicName;
private int schemaVersion;
private String jsonSchema;
private ZonedDateTime createdAt;
private String createdBy;
private String changeDescription;
public SchemaMetadata() {
}
public SchemaMetadata(SchemaMetadata original) {
this.id = original.id;
this.topicName = original.topicName;
this.schemaVersion = original.schemaVersion;
this.jsonSchema = original.jsonSchema;
this.createdAt = original.createdAt;
this.createdBy = original.createdBy;
this.changeDescription = original.changeDescription;
}
@Override
public String key() {
return id;
}
}
| 1,144 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicType.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/TopicType.java | package com.hermesworld.ais.galapagos.topics;
public enum TopicType {
EVENTS, DATA, COMMANDS, INTERNAL;
}
| 113 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
MessagesPerDay.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/MessagesPerDay.java | package com.hermesworld.ais.galapagos.topics;
import lombok.Getter;
@Getter
public enum MessagesPerDay {
FEW(0), NORMAL(1000), MANY(100000), VERY_MANY(1000000);
private final int lowerBoundary;
MessagesPerDay(int lowerBoundary) {
this.lowerBoundary = lowerBoundary;
}
}
| 300 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicMetadata.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/TopicMetadata.java | package com.hermesworld.ais.galapagos.topics;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonFormat.Shape;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.hermesworld.ais.galapagos.util.HasKey;
import lombok.Getter;
import lombok.Setter;
import java.time.LocalDate;
import java.util.List;
@JsonSerialize
@JsonIgnoreProperties(ignoreUnknown = true)
@Getter
@Setter
public class TopicMetadata implements HasKey {
private String name;
private TopicType type;
private String description;
/**
* Can be set from external tooling via Kafka Topic galapagos.internal.topics
*/
private String infoUrl;
private String ownerApplicationId;
private boolean isDeprecated;
private String deprecationText;
@JsonFormat(shape = Shape.STRING)
private LocalDate eolDate;
private boolean subscriptionApprovalRequired;
private long compactionTimeMillis;
private long retentionTimeMillis;
private Criticality criticality;
private MessagesPerDay messagesPerDay;
private MessagesSize messagesSize;
private List<String> producers = List.of();
public TopicMetadata() {
}
public TopicMetadata(TopicMetadata original) {
this.name = original.name;
this.type = original.type;
this.description = original.description;
this.infoUrl = original.infoUrl;
this.ownerApplicationId = original.ownerApplicationId;
this.isDeprecated = original.isDeprecated;
this.deprecationText = original.deprecationText;
this.eolDate = original.eolDate;
this.subscriptionApprovalRequired = original.subscriptionApprovalRequired;
this.compactionTimeMillis = original.compactionTimeMillis;
this.retentionTimeMillis = original.retentionTimeMillis;
this.criticality = original.criticality;
this.messagesPerDay = original.messagesPerDay;
this.messagesSize = original.messagesSize;
this.producers = original.producers;
}
@Override
public String key() {
return name;
}
}
| 2,202 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
MessagesSize.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/MessagesSize.java | package com.hermesworld.ais.galapagos.topics;
import lombok.Getter;
@Getter
public enum MessagesSize {
VERY_SMALL(0), SMALL(1000), NORMAL(10000), LARGE(100000), VERY_LARGE(1000000);
private final int lowerBoundary;
MessagesSize(int lowerBoundary) {
this.lowerBoundary = lowerBoundary;
}
}
| 318 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
GalapagosTopicConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/config/GalapagosTopicConfig.java | package com.hermesworld.ais.galapagos.topics.config;
import lombok.Getter;
import lombok.Setter;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import java.time.Period;
/**
* Represents all (technical) configuration elements related to Topics. <br>
* Default values can be found in resource file <code>application.properties</code>. <br>
* For Naming Rules, see {@link com.hermesworld.ais.galapagos.naming.config.NamingConfig}.
*/
@Configuration
@ConfigurationProperties("galapagos.topics")
@Getter
@Setter
public class GalapagosTopicConfig {
private int maxPartitionCount;
private int defaultPartitionCount;
private Period minDeprecationTime;
private int standardReplicationFactor;
private int criticalReplicationFactor;
private TopicSchemaConfig schemas = new TopicSchemaConfig();
}
| 913 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicSchemaConfig.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/config/TopicSchemaConfig.java | package com.hermesworld.ais.galapagos.topics.config;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class TopicSchemaConfig {
private boolean allowAddedPropertiesOnCommandTopics;
private boolean allowRemovedOptionalProperties;
}
| 262 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
UpdateTopicConfigEntryDto.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/UpdateTopicConfigEntryDto.java | package com.hermesworld.ais.galapagos.topics.controller;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import lombok.Getter;
import lombok.Setter;
@JsonSerialize
@Getter
@Setter
public class UpdateTopicConfigEntryDto {
private String name;
private String value;
}
| 295 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
TopicController.java | /FileExtraction/Java_unseen/HermesGermany_galapagos/src/main/java/com/hermesworld/ais/galapagos/topics/controller/TopicController.java | package com.hermesworld.ais.galapagos.topics.controller;
import com.hermesworld.ais.galapagos.applications.ApplicationMetadata;
import com.hermesworld.ais.galapagos.applications.ApplicationsService;
import com.hermesworld.ais.galapagos.applications.BusinessCapability;
import com.hermesworld.ais.galapagos.applications.KnownApplication;
import com.hermesworld.ais.galapagos.kafka.KafkaCluster;
import com.hermesworld.ais.galapagos.kafka.KafkaClusters;
import com.hermesworld.ais.galapagos.kafka.TopicConfigEntry;
import com.hermesworld.ais.galapagos.kafka.config.KafkaEnvironmentConfig;
import com.hermesworld.ais.galapagos.naming.InvalidTopicNameException;
import com.hermesworld.ais.galapagos.naming.NamingService;
import com.hermesworld.ais.galapagos.schemas.IncompatibleSchemaException;
import com.hermesworld.ais.galapagos.security.CurrentUserService;
import com.hermesworld.ais.galapagos.topics.SchemaCompatCheckMode;
import com.hermesworld.ais.galapagos.topics.SchemaMetadata;
import com.hermesworld.ais.galapagos.topics.TopicMetadata;
import com.hermesworld.ais.galapagos.topics.TopicType;
import com.hermesworld.ais.galapagos.topics.service.ValidatingTopicService;
import jakarta.validation.Valid;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.header.Header;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.server.ResponseStatusException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
@RestController
@Slf4j
public class TopicController {
private final ValidatingTopicService topicService;
private final KafkaClusters kafkaEnvironments;
private final ApplicationsService applicationsService;
private final NamingService namingService;
private final CurrentUserService userService;
private static final Supplier<ResponseStatusException> badRequest = () -> new ResponseStatusException(
HttpStatus.BAD_REQUEST);
private static final Supplier<ResponseStatusException> notFound = () -> new ResponseStatusException(
HttpStatus.NOT_FOUND);
private static final int PEEK_LIMIT = 100;
public TopicController(ValidatingTopicService topicService, KafkaClusters kafkaEnvironments,
ApplicationsService applicationsService, NamingService namingService, CurrentUserService userService) {
this.topicService = topicService;
this.kafkaEnvironments = kafkaEnvironments;
this.applicationsService = applicationsService;
this.namingService = namingService;
this.userService = userService;
}
@GetMapping(value = "/api/topics/{environmentId}", produces = MediaType.APPLICATION_JSON_VALUE)
public List<TopicDto> listTopics(@PathVariable String environmentId,
@RequestParam(required = false, defaultValue = "true") boolean includeInternal) {
kafkaEnvironments.getEnvironmentMetadata(environmentId).orElseThrow(notFound);
List<String> userAppIds = !includeInternal ? Collections.emptyList()
: applicationsService.getUserApplications().stream().map(KnownApplication::getId)
.collect(Collectors.toList());
return topicService.listTopics(environmentId).stream()
.filter(t -> t.getType() != TopicType.INTERNAL || userAppIds.contains(t.getOwnerApplicationId()))
.map(t -> toDto(environmentId, t, topicService.canDeleteTopic(environmentId, t.getName())))
.collect(Collectors.toList());
}
@GetMapping(value = "/api/topicconfigs/{environmentId}/{topicName}", produces = MediaType.APPLICATION_JSON_VALUE)
public List<TopicConfigEntryDto> getTopicConfig(@PathVariable String environmentId,
@PathVariable String topicName) {
KafkaCluster cluster = kafkaEnvironments.getEnvironment(environmentId).orElseThrow(notFound);
topicService.listTopics(environmentId).stream().filter(topic -> topicName.equals(topic.getName())).findAny()
.orElseThrow(notFound);
try {
return cluster.getTopicConfig(topicName)
.thenApply(set -> set.stream().map(this::toConfigEntryDto).collect(Collectors.toList())).get();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
return null;
}
}
@PostMapping(value = "/api/producers/{environmentId}/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE)
public void addProducerToTopic(@PathVariable String environmentId, @PathVariable String topicName,
@RequestBody AddProducerDto producer) {
TopicMetadata topic = topicService.getTopic(environmentId, topicName).orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(topic.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
if (!StringUtils.hasLength(producer.getProducerApplicationId())) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST);
}
try {
topicService.addTopicProducer(environmentId, topicName, producer.getProducerApplicationId()).get();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@DeleteMapping(value = "/api/producers/{envId}/{topicName}/{producerApplicationId}")
public ResponseEntity<Void> removeProducerFromTopic(@PathVariable String envId, @PathVariable String topicName,
@PathVariable String producerApplicationId) {
if (envId.isEmpty() || topicName.isEmpty() || producerApplicationId.isEmpty()) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST);
}
TopicMetadata topic = topicService.getTopic(envId, topicName).orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(topic.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
topicService.removeTopicProducer(envId, topicName, producerApplicationId).get();
return ResponseEntity.noContent().build();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
return null;
}
}
@PostMapping(value = "/api/change-owner/{envId}/{topicName}")
public void changeTopicOwner(@PathVariable String envId, @PathVariable String topicName,
@RequestBody @Valid ChangeTopicOwnerDto request) {
if (envId.isEmpty() || topicName.isEmpty()) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST);
}
TopicMetadata topic = topicService.getTopic(envId, topicName).orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(topic.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
topicService.changeTopicOwner(envId, topicName, request.getProducerApplicationId()).get();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@PostMapping(value = "/api/topics/{environmentId}/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE)
public void updateTopic(@PathVariable String environmentId, @PathVariable String topicName,
@RequestBody UpdateTopicDto request) {
TopicMetadata topic = topicService.getTopic(environmentId, topicName).orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(topic.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
if (request.isUpdateDescription()) {
topicService.updateTopicDescription(environmentId, topicName, request.getDescription()).get();
return;
}
if (StringUtils.hasLength(request.getDeprecationText())) {
if (request.getEolDate() == null) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
"eolDate must be set for Topic deprecation");
}
topicService.markTopicDeprecated(topicName, request.getDeprecationText(), request.getEolDate()).get();
}
else {
if (!topic.isDeprecated()) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
"Cannot remove deprecation from a topic that was not deprecated");
}
topicService.unmarkTopicDeprecated(topicName).get();
}
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@PostMapping(value = "/api/topicconfigs/{environmentId}/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public void updateTopicConfig(@PathVariable String environmentId, @PathVariable String topicName,
@RequestBody List<UpdateTopicConfigEntryDto> configs) throws InterruptedException {
KafkaCluster cluster = kafkaEnvironments.getEnvironment(environmentId).orElseThrow(notFound);
TopicMetadata metadata = topicService.listTopics(environmentId).stream()
.filter(topic -> topicName.equals(topic.getName())).findAny().orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(metadata.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
for (UpdateTopicConfigEntryDto config : configs) {
if (!StringUtils.hasLength(config.getName()) || !StringUtils.hasLength(config.getValue())) {
throw badRequest.get();
}
}
try {
cluster.setTopicConfig(topicName,
configs.stream().collect(
Collectors.toMap(UpdateTopicConfigEntryDto::getName, UpdateTopicConfigEntryDto::getValue)))
.get();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
}
@PostMapping(value = "/api/util/topicname", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public TopicNameDto getTopicNameSuggestion(@RequestBody TopicNameSuggestionQueryDto query) {
if (!StringUtils.hasLength(query.getApplicationId()) || !StringUtils.hasLength(query.getEnvironmentId())
|| query.getTopicType() == null) {
throw badRequest.get();
}
// TODO should go into TopicService
KnownApplication app = applicationsService.getKnownApplication(query.getApplicationId())
.orElseThrow(badRequest);
BusinessCapability cap = app.getBusinessCapabilities().stream()
.filter(bc -> bc.getId().equals(query.getBusinessCapabilityId())).findFirst().orElse(null);
ApplicationMetadata metadata = applicationsService
.getApplicationMetadata(query.getEnvironmentId(), query.getApplicationId()).orElse(null);
if (metadata == null) {
throw badRequest.get();
}
String name = namingService.getTopicNameSuggestion(query.getTopicType(), app, cap);
if (name == null) {
throw badRequest.get();
}
return new TopicNameDto(name);
}
@PutMapping(value = "/api/topics/{environmentId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE)
public TopicDto createTopic(@PathVariable String environmentId, @RequestBody CreateTopicDto topicData) {
if (!applicationsService.isUserAuthorizedFor(topicData.getOwnerApplicationId())) {
// TODO Security Audit log?
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
KafkaEnvironmentConfig envMeta = kafkaEnvironments.getEnvironmentMetadata(environmentId)
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND));
if (envMeta.isStagingOnly()) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
if (topicData.getTopicType() == null) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "Missing topic type");
}
if (!StringUtils.hasLength(topicData.getName())) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "Missing topic name");
}
try {
return toDto(environmentId,
topicService
.createTopic(environmentId, toMetadata(topicData), topicData.getPartitionCount(),
Optional.ofNullable(topicData.getTopicConfig()).orElse(Collections.emptyMap()))
.get(),
true);
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (InterruptedException e) {
return null;
}
}
@DeleteMapping(value = "/api/topics/{environmentId}/{topicName}")
public ResponseEntity<Void> deleteTopic(@PathVariable String environmentId, @PathVariable String topicName) {
TopicMetadata metadata = topicService.listTopics(environmentId).stream()
.filter(topic -> topicName.equals(topic.getName())).findAny().orElseThrow(notFound);
kafkaEnvironments.getEnvironmentMetadata(environmentId).orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(metadata.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
if (!topicService.canDeleteTopic(environmentId, topicName)) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
topicService.deleteTopic(environmentId, topicName).get();
}
catch (InterruptedException e) {
return null;
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
return ResponseEntity.noContent().build();
}
@GetMapping(value = "/api/schemas/{environmentId}/{topicName}")
public List<SchemaMetadata> getTopicSchemas(@PathVariable String environmentId, @PathVariable String topicName) {
if (topicService.getTopic(environmentId, topicName).isEmpty()) {
throw notFound.get();
}
return topicService.getTopicSchemaVersions(environmentId, topicName);
}
// intentionally no /api - unprotected resource!
@GetMapping(value = "/schema/{schemaId}", produces = MediaType.APPLICATION_JSON_VALUE)
public String getSchema(@PathVariable String schemaId) {
if ("empty".equals(schemaId)) {
return "{}";
}
for (String id : kafkaEnvironments.getEnvironmentIds()) {
Optional<SchemaMetadata> schema = topicService.getSchemaById(id, schemaId);
if (schema.isPresent()) {
return schema.get().getJsonSchema();
}
}
throw notFound.get();
}
@PutMapping(value = "/api/schemas/{environmentId}/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<String> addTopicSchemaVersion(@PathVariable String environmentId,
@PathVariable String topicName, @RequestParam(defaultValue = "false") boolean skipCompatCheck,
@RequestBody AddSchemaVersionDto schemaVersionDto) {
TopicMetadata topic = topicService.listTopics(environmentId).stream().filter(t -> topicName.equals(t.getName()))
.findAny().orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(topic.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
if (schemaVersionDto == null || !StringUtils.hasLength(schemaVersionDto.getJsonSchema())) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
"JSON Schema (jsonSchema property) is missing from request body");
}
SchemaCompatCheckMode checkMode = skipCompatCheck ? SchemaCompatCheckMode.SKIP_SCHEMA_CHECK
: SchemaCompatCheckMode.CHECK_SCHEMA;
if (skipCompatCheck && !userService.isAdmin()) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
SchemaMetadata metadata = topicService.addTopicSchemaVersion(environmentId, topicName,
schemaVersionDto.getJsonSchema(), schemaVersionDto.getChangeDescription(), checkMode).get();
return ResponseEntity.created(new URI("/schema/" + metadata.getId())).build();
}
catch (InterruptedException e) {
return null;
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
catch (URISyntaxException e) {
// should not occur for /schema/ + UUID
throw new RuntimeException(e);
}
}
@DeleteMapping(value = "/api/schemas/{environmentId}/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<Void> deleteLatestTopicSchemaVersion(@PathVariable String environmentId,
@PathVariable String topicName) {
TopicMetadata topic = topicService.listTopics(environmentId).stream().filter(t -> topicName.equals(t.getName()))
.findAny().orElseThrow(notFound);
if (!applicationsService.isUserAuthorizedFor(topic.getOwnerApplicationId())) {
throw new ResponseStatusException(HttpStatus.FORBIDDEN);
}
try {
topicService.deleteLatestTopicSchemaVersion(environmentId, topicName).get();
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
return ResponseEntity.noContent().build();
}
@GetMapping("/api/util/peek-data/{environmentId}/{topicName}")
public List<ConsumerRecordDto> peekTopicData(@PathVariable String environmentId, @PathVariable String topicName) {
try {
return topicService.peekTopicData(environmentId, topicName, PEEK_LIMIT).get().stream()
.map(this::toRecordDto).collect(Collectors.toList());
}
catch (InterruptedException e) {
return Collections.emptyList();
}
catch (ExecutionException e) {
throw handleExecutionException(e);
}
}
private TopicDto toDto(String environmentId, TopicMetadata topic, boolean canDelete) {
return new TopicDto(topic.getName(), topic.getType().toString(), environmentId, topic.getDescription(),
topic.getInfoUrl(), topic.getOwnerApplicationId(), topic.isDeprecated(), topic.getDeprecationText(),
topic.getEolDate() == null ? null : topic.getEolDate().toString(),
topic.isSubscriptionApprovalRequired(), canDelete, topic.getCompactionTimeMillis(),
topic.getRetentionTimeMillis(), topic.getCriticality(), topic.getMessagesPerDay(),
topic.getMessagesSize(), topic.getProducers());
}
private TopicConfigEntryDto toConfigEntryDto(TopicConfigEntry configEntry) {
return new TopicConfigEntryDto(configEntry.getName(), configEntry.getValue(), configEntry.isDefault(),
configEntry.isReadOnly(), configEntry.isSensitive());
}
private TopicMetadata toMetadata(CreateTopicDto dto) {
TopicMetadata topic = new TopicMetadata();
topic.setName(dto.getName());
topic.setDescription(dto.getDescription());
topic.setOwnerApplicationId(dto.getOwnerApplicationId());
topic.setType(dto.getTopicType());
topic.setSubscriptionApprovalRequired(dto.isSubscriptionApprovalRequired());
topic.setCompactionTimeMillis(dto.getCompactionTimeMillis());
topic.setRetentionTimeMillis(dto.getRetentionTimeMillis());
topic.setCriticality(dto.getCriticality());
topic.setMessagesPerDay(dto.getMessagesPerDay());
topic.setMessagesSize(dto.getMessagesSize());
return topic;
}
private ConsumerRecordDto toRecordDto(ConsumerRecord<String, String> record) {
Map<String, String> headers = StreamSupport.stream(record.headers().spliterator(), false)
.collect(Collectors.toMap(Header::key, h -> new String(h.value(), StandardCharsets.UTF_8)));
return new ConsumerRecordDto(record.key(), record.value(), record.offset(), record.timestamp(),
record.partition(), headers);
}
private ResponseStatusException handleExecutionException(ExecutionException e) {
Throwable t = e.getCause();
if (t instanceof IllegalArgumentException || t instanceof IllegalStateException
|| t instanceof InvalidTopicNameException || t instanceof IncompatibleSchemaException) {
return new ResponseStatusException(HttpStatus.BAD_REQUEST, e.getMessage());
}
if (t instanceof KafkaException) {
log.error("Unexpected Kafka exception during handling Topic REST call", t);
return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
if (t instanceof NoSuchElementException) {
return new ResponseStatusException(HttpStatus.NOT_FOUND);
}
log.error("Unexpected exception during request handling: ", t);
return new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
| 22,568 | Java | .java | HermesGermany/galapagos | 81 | 22 | 15 | 2020-10-02T09:40:40Z | 2024-05-08T13:11:33Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.